【全网最全】kubernetes集群部署保姆级教程,手把手教学

资源分配

工作站 192.168.31.236,k8s-master

新服务器 192.168.31.43,k8s-node1

笔记本 192.168.31.41,k8s-node2

前置工作

关闭防火墙,ubuntu版本

sudo systemctl stop ufw     # 关闭ufw
sudo systemctl disable ufw  # 取消开机自启
sudo systemctl mask ufw     # 可选,防止被其他服务再次拉起

systemctl status ufw        # 查看防火墙状态

image-20250912163013178

在 Ubuntu 上搭建 K8s 不需要关闭 SELinux,因为 Ubuntu 用的是 AppArmor,不是 SELinux

swapoff -a ; sed -i '/swap/d' /etc/fstab # 立即关闭 swap,并删除 /etc/fstab 里的 swap 配置,从而保证下次开机也不会启用 swap。
cat /etc/fstab							 # 查看swap 分区/文件 的设置
free -h									 # 查看swap是否关闭

image-20250912164406785

查询三台机器的IP地址

ifconfig
# 查看主机的ip地址分别为192.168.31.236, 192.168.31.43, 192.168.31.41

配置主机名,将三台机器的主机名分别改为k8s-master, k8s-node1, k8s-node2

vim /etc/hostname

在三台主机中分别配置主机名映射

# k8s-master
vim /etc/hosts

# 输入如下内容

127.0.0.1       localhost
127.0.1.1       k8s-master

# K8s 集群节点映射
192.168.31.236  k8s-master
192.168.31.43   k8s-node1
192.168.31.41   k8s-node2

# The following lines are desirable for IPv6 capable hosts
::1     ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
# k8s-node1
vim /etc/hosts

# 输入如下内容

127.0.0.1       localhost
127.0.1.1       k8s-node1

# K8s 集群节点映射
192.168.31.236  k8s-master
192.168.31.43   k8s-node1
192.168.31.41   k8s-node2

# The following lines are desirable for IPv6 capable hosts
::1     ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
# k8s-node2
vim /etc/hosts

# 输入如下内容

127.0.0.1       localhost
127.0.1.1       k8s-node2

# K8s 集群节点映射
192.168.31.236  k8s-master
192.168.31.43   k8s-node1
192.168.31.41   k8s-node2

# The following lines are desirable for IPv6 capable hosts
::1     ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters

在三台机器上分别ping另外两台机器,如果可以ping通,则成功

root@Dell:~# ping k8s-node1
PING k8s-node1 (192.168.31.43) 56(84) bytes of data.
64 bytes from k8s-node1 (192.168.31.43): icmp_seq=1 ttl=64 time=1.70 ms
64 bytes from k8s-node1 (192.168.31.43): icmp_seq=2 ttl=64 time=4.85 ms
64 bytes from k8s-node1 (192.168.31.43): icmp_seq=3 ttl=64 time=1.26 ms
64 bytes from k8s-node1 (192.168.31.43): icmp_seq=4 ttl=64 time=1.23 ms
64 bytes from k8s-node1 (192.168.31.43): icmp_seq=5 ttl=64 time=85.9 ms
64 bytes from k8s-node1 (192.168.31.43): icmp_seq=6 ttl=64 time=3.63 ms
64 bytes from k8s-node1 (192.168.31.43): icmp_seq=7 ttl=64 time=1.47 ms
64 bytes from k8s-node1 (192.168.31.43): icmp_seq=8 ttl=64 time=1.22 ms
64 bytes from k8s-node1 (192.168.31.43): icmp_seq=9 ttl=64 time=1.24 ms
64 bytes from k8s-node1 (192.168.31.43): icmp_seq=10 ttl=64 time=1.22 ms
^C
--- k8s-node1 ping statistics ---
10 packets transmitted, 10 received, 0% packet loss, time 9013ms
rtt min/avg/max/mdev = 1.219/10.368/85.855/25.190 ms
root@Dell:~# ping k8s-node2
PING k8s-node2 (192.168.31.41) 56(84) bytes of data.
64 bytes from k8s-node2 (192.168.31.41): icmp_seq=1 ttl=64 time=62.3 ms
64 bytes from k8s-node2 (192.168.31.41): icmp_seq=2 ttl=64 time=89.5 ms
^C
--- k8s-node2 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1002ms
rtt min/avg/max/mdev = 62.337/75.898/89.460/13.561 ms
root@ps:~# ping k8s-master
PING k8s-master (192.168.31.236) 56(84) bytes of data.
64 bytes from k8s-master (192.168.31.236): icmp_seq=1 ttl=64 time=1.50 ms
64 bytes from k8s-master (192.168.31.236): icmp_seq=2 ttl=64 time=1.07 ms
64 bytes from k8s-master (192.168.31.236): icmp_seq=3 ttl=64 time=1.56 ms
64 bytes from k8s-master (192.168.31.236): icmp_seq=4 ttl=64 time=1.30 ms
^C
--- k8s-master ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 3004ms
rtt min/avg/max/mdev = 1.071/1.356/1.560/0.191 ms
root@ps:~# ping k8s-node2
PING k8s-node2 (192.168.31.41) 56(84) bytes of data.
64 bytes from k8s-node2 (192.168.31.41): icmp_seq=1 ttl=64 time=64.2 ms
64 bytes from k8s-node2 (192.168.31.41): icmp_seq=2 ttl=64 time=290 ms
64 bytes from k8s-node2 (192.168.31.41): icmp_seq=3 ttl=64 time=99.7 ms
64 bytes from k8s-node2 (192.168.31.41): icmp_seq=4 ttl=64 time=20.4 ms
^C
--- k8s-node2 ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 3004ms
rtt min/avg/max/mdev = 20.436/118.481/289.549/102.681 ms
root@shuoxing-Precision-7530:/# ping k8s-master
PING k8s-master (192.168.31.236) 56(84) bytes of data.
64 bytes from k8s-master (192.168.31.236): icmp_seq=1 ttl=64 time=1.04 ms
64 bytes from k8s-master (192.168.31.236): icmp_seq=2 ttl=64 time=3.24 ms
64 bytes from k8s-master (192.168.31.236): icmp_seq=3 ttl=64 time=3.63 ms
64 bytes from k8s-master (192.168.31.236): icmp_seq=4 ttl=64 time=3.52 ms
^C
--- k8s-master ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 3002ms
rtt min/avg/max/mdev = 1.049/2.861/3.633/1.056 ms
root@shuoxing-Precision-7530:/# ping k8s-node1
PING k8s-node1 (192.168.31.43) 56(84) bytes of data.
64 bytes from k8s-node1 (192.168.31.43): icmp_seq=1 ttl=64 time=2.66 ms
64 bytes from k8s-node1 (192.168.31.43): icmp_seq=2 ttl=64 time=97.5 ms
64 bytes from k8s-node1 (192.168.31.43): icmp_seq=3 ttl=64 time=122 ms
64 bytes from k8s-node1 (192.168.31.43): icmp_seq=4 ttl=64 time=2.84 ms
^C
--- k8s-node1 ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 3003ms
rtt min/avg/max/mdev = 2.664/56.318/122.207/54.268 ms

配一下外网环境(可选)

# 确认一下 Clash 代理是否在运行
ss -tlnp | grep 789

# Linux命令行不会自动走Clash,需要导出环境变量
export http_proxy=http://127.0.0.1:7890
export https_proxy=http://127.0.0.1:7890

curl -I https://www.google.com
ping 8.8.8.8

# 永久设置环境变量(可选,但不建议)
vim ~/.bashrc

export http_proxy=http://127.0.0.1:7890
export https_proxy=http://127.0.0.1:7890
export all_proxy=http://127.0.0.1:7890

source ~/.bashrc

安装docker,配置相关信息

安装docker,配置镜像源,首先要查询一下系统的版本和Codename,主要过程几乎不变,先添加Docker源,然后根据Codename选择正确的apt源,更新apt并安装docker相关组件,验证是否安装成功

# 对于k8s-master
# 查看系统的版本
lsb_release -a

No LSB modules are available.
Distributor ID: Ubuntu
Description:    Ubuntu 20.04.6 LTS
Release:        20.04
Codename:       focal

# 添加 Docker 源(focal):
sudo mkdir -p /etc/apt/keyrings
curl -fsSL https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/ubuntu/gpg \
 | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg

# 添加 apt 源:
echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] \
https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/ubuntu focal stable" \
 | sudo tee /etc/apt/sources.list.d/docker.list >/dev/null

# 更新并安装:
sudo apt-get update
sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin

# 验证安装:
docker --version
dpkg -l | grep docker
# 设置docker开机自启并现在启动docker
systemctl enable docker --now
sudo systemctl status docker
# 对于k8s-node1
# 查看系统的版本
lsb_release -a


No LSB modules are available.
Distributor ID: Ubuntu
Description:    Ubuntu 22.04.5 LTS
Release:        22.04
Codename:       jammy

# 用清华镜像源安装 Docker CE(适配 Ubuntu 18.04 bionic)
# 添加 GPG key:
sudo mkdir -p /etc/apt/keyrings
curl -fsSL https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/ubuntu/gpg \
  | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg

# 添加 apt 源:
echo \
  "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] \
  https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/ubuntu \
  jammy stable" \
  | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null

# 更新并安装:
sudo apt-get update
sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin

# 验证安装:
docker --version
dpkg -l | grep docker
# 设置docker开机自启并现在启动docker
systemctl enable docker --now
sudo systemctl status docker
# 对于k8s-node2
# 查看系统的版本
cat /etc/os-release

NAME="Ubuntu"
VERSION="18.04.6 LTS (Bionic Beaver)"
ID=ubuntu
ID_LIKE=debian
PRETTY_NAME="Ubuntu 18.04.6 LTS"
VERSION_ID="18.04"
HOME_URL="https://www.ubuntu.com/"
SUPPORT_URL="https://help.ubuntu.com/"
BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/"
PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy"
VERSION_CODENAME=bionic
UBUNTU_CODENAME=bionic

# 用清华镜像源安装 Docker CE(适配 Ubuntu 18.04 bionic)
# 添加 GPG key:
sudo mkdir -p /etc/apt/keyrings
curl -fsSL https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/ubuntu/gpg \
  | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg

# 添加 apt 源:
echo \
  "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] \
  https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/ubuntu \
  bionic stable" \
  | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null

# 更新并安装:
sudo apt-get update
sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin

# 验证安装:
docker --version
dpkg -l | grep docker
# 设置docker开机自启并现在启动docker
systemctl enable docker --now
sudo systemctl status docker

docker 版本

  • k8s-master28.1.1
  • k8s-node128.4.0
  • k8s-node224.0.2

注意kubernetes V1.22.2版本及其以后,要求容器的cgroup driver 为systemd,但是docker默认的cgroup driver 是cgroupfs,kubernetes 1.21版本及其之前,是不需要修改cgroup driver的。

可以使用docker info | grep -i cgroup查看cgroup driver,如果是cgroupfs,则需要修改为systemd

docker info | grep -i cgroup

root@Dell:~# docker info | grep -i cgroup
 Cgroup Driver: cgroupfs
 Cgroup Version: 1

覆盖生成 Docker 的配置文件 /etc/docker/daemon.json,让 Docker 启动时使用国内镜像加速器和 systemd 作为 cgroup 驱动。

sudo tee /etc/docker/daemon.json > /dev/null <<'EOF'
{
  "registry-mirrors": ["https://frz7i079.mirror.aliyuncs.com"],
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": { "max-size": "100m" },
  "storage-driver": "overlay2"
}
EOF

查看docker配置文件是否写入成功

cat /etc/docker/daemon.json

root@Dell:~# cat /etc/docker/daemon.json
{
  "registry-mirrors": ["https://frz7i079.mirror.aliyuncs.com"],
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": { "max-size": "100m" },
  "storage-driver": "overlay2"
}

重启docker

systemctl restart docker
systemctl status docker

# 再次查看Cgroup Driver
docker info | grep -i cgroup

root@Dell:~# docker info | grep -i cgroup
 Cgroup Driver: systemd
 Cgroup Version: 1

给 Linux 内核网络模块设置参数,让 Kubernetes 的网络插件能正常工作。

cat <<EOF> /etc/sysctl.d/k8s.conf 
net.bridge.bridge-nf-call-ip6tables = 1 
net.bridge.bridge-nf-call-iptables = 1 
net.ipv4.ip_forward = 1 
EOF

立即应用配置

sysctl -p /etc/sysctl.d/k8s.conf

安装kubelet,kubeadm,kubectl

三个节点都安装kubelet,kubeadm,kubectl

  • kubelet: 它是运行在每台节点上的核心组件,负责和 API Server 通信,根据调度器的分配结果在本节点上创建和管理 Pod(通过容器运行时,比如 Docker 或 containerd)。kubelet会定期上报节点和 Pod 的状态,保证 Pod 按照期望状态运行。
  • kubeadm: 它是一个官方提供的工具,用来快速部署和管理 Kubernetes 集群。常见用途是 kubeadm init 初始化控制平面节点,kubeadm join 添加工作节点。它主要负责生成证书、配置文件、启动必要的组件,让集群快速成型。
  • kubectl: 它是 Kubernetes 的命令行客户端工具,用来和 API Server 交互。你通过 kubectl 可以部署应用(kubectl apply -f)、查看集群状态(kubectl get)、排查问题(kubectl describekubectl logs)等,是日常管理 Kubernetes 集群的主要入口。
# 查看你的apt缓存里关于kube相关工具的版本
apt-cache madison kubeadm

# 安装kubelet, kubectl, kubeadm
sudo apt-get install -y kubelet=1.28.2-00 kubeadm=1.28.2-00 kubectl=1.28.2-00

# 查看安装是否成功
kubelet --version
kubeadm version
kubectl version

root@Dell:~# kubelet --version
Kubernetes v1.28.2
root@Dell:~# kubeadm version
kubeadm version: &version.Info{Major:"1", Minor:"28", GitVersion:"v1.28.2", GitCommit:"89a4ea3e1e4ddd7f7572286090359983e0387b2f", GitTreeState:"clean", BuildDate:"2023-09-13T09:34:32Z", GoVersion:"go1.20.8", Compiler:"gc", Platform:"linux/amd64"}
root@Dell:~# kubectl version
Client Version: v1.28.2
Kustomize Version: v5.0.4-0.20230601165947-6ce0bf390ce3
The connection to the server localhost:8080 was refused - did you specify the right host or port?

开启CRI插件

由于Kubernetes 1.24 及其以上的版本,Dockershim 被彻底移除,kubelet 不再直接支持 Docker,要使用CRI,所以需要启动CRI插件

CRI 全称是 Container Runtime Interface,中文一般叫 容器运行时接口
它是 Kubernetes 团队提出的一个 标准化接口,目的是让 Kubelet 可以通过统一的 gRPC API 调用底层的容器运行时(container runtime)。

方法是编辑 /etc/containerd/config.toml以开启CRI插件,直接替换成如下内容

(所有节点都要修改)

version = 2

root = "/var/lib/containerd"
state = "/run/containerd"
oom_score = 0

[grpc]
  address = "/run/containerd/containerd.sock"
  uid = 0
  gid = 0

[debug]
  level = "info"

[plugins]

  # !!!关键:开启 CRI 插件
  [plugins."io.containerd.grpc.v1.cri"]
    # 国内环境建议使用阿里云的 pause 镜像
    sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.9"

    [plugins."io.containerd.grpc.v1.cri".containerd]
      snapshotter = "overlayfs"
      default_runtime_name = "runc"

    # runc 运行时,使用 systemd cgroup,与 kubelet 默认一致
    [plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
      [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
        runtime_type = "io.containerd.runc.v2"
        [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
          SystemdCgroup = true

  # 可选:无需配置也能用。若你确实需要镜像加速器,再按需添加 registry.mirrors 段
  # [plugins."io.containerd.grpc.v1.cri".registry]
  #   [plugins."io.containerd.grpc.v1.cri".registry.mirrors]
  #     [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
  #       endpoint = ["https://registry-1.docker.io"]

# 重启并验证
sudo systemctl daemon-reload
sudo systemctl restart containerd

# 应该能看到 cri 插件是 ok
sudo ctr plugins ls | grep cri

Kubernetes 环境里,是用 crictl 来代替 docker 命令,常用命令:

Docker 命令 对应的 crictl 命令 说明
docker ps crictl ps 查看容器
docker ps -a crictl ps -a 查看所有容器
docker images crictl images 查看镜像
docker logs <container> crictl logs <container> 查看日志
docker inspect <container> crictl inspect <container> 查看详情
docker exec -it <id> bash crictl exec -it <id> bash 进入容器
docker stop <id> crictl stop <id> 停止容器
docker rm <id> crictl rm <id> 删除容器
docker rmi <image> crictl rmi <image> 删除镜像
docker pull <image> crictl pull <image> 拉取镜像

kubeadm init

master节点要进行kubeadm init,其他node节点要进行kubeadm join

# 在master节点执行如下操作,执行前先看一下下面的参数设置,可能需要修改参数的具体情况
kubeadm init \
      --apiserver-advertise-address=192.168.31.236 \
      --image-repository registry.aliyuncs.com/google_containers \
      --kubernetes-version v1.28.2 \
      --service-cidr=10.96.0.0/12 \
      --pod-network-cidr=10.224.0.0/16
  1. --apiserver-advertise-address=192.168.31.236
  • 指定 API Server 对外监听的 IP 地址,这里填你的master节点的ip
  1. --image-repository=registry.aliyuncs.com/google_containers
  • kubeadm 需要拉取一系列核心镜像,默认仓库是 k8s.gcr.io,配置成阿里云镜像源可以加速镜像下载。
  1. --kubernetes-version=v1.28.2
  • 指定要安装的 Kubernetes 版本
  • 集群内 所有节点的 kubelet / kubeadm / kubectl 版本必须一致或至少兼容(通常控制平面不能比节点版本低)。
  1. --service-cidr=10.96.0.0/12
  • 定义 Service 虚拟 IP(ClusterIP)所用的网段
  • 比如你创建一个 Service,它会被分配到 10.96.x.x 的一个地址(只在集群内部可访问)。
  • 默认就是 10.96.0.0/12,除非和你现有网络冲突,否则不用改。
  1. --pod-network-cidr=10.224.0.0/16
  • 定义 Pod 分配的虚拟网络地址段,所有 Pod 的 IP 都会从这里分配,比如 10.224.x.x
  • 这个网段必须和宿主机现有网络不冲突
  • 通过ip route自检,看看是否存在10.224.*的ip路由,有的话要清除掉后再使用,具体操作请问ai
  • Calico:默认 192.168.0.0/16,这里和本地的ip路由冲突了,所以我使用10.224.0.0/16,后面在安装 Calico 时同步修改 calico.yaml 中的 CALICO_IPV4POOL_CIDR,保证和 kubeadm init 时的一致。
root@Dell:~# kubeadm init \
>       --apiserver-advertise-address=192.168.31.236 \
>       --image-repository registry.aliyuncs.com/google_containers \
>       --kubernetes-version v1.28.2 \
>       --service-cidr=10.96.0.0/12 \
>       --pod-network-cidr=10.224.0.0/16
[init] Using Kubernetes version: v1.28.2
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [dell kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.31.236]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [dell localhost] and IPs [192.168.31.236 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [dell localhost] and IPs [192.168.31.236 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 13.051654 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node dell as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node dell as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]
[bootstrap-token] Using token: wn7o70.dn0o2l3kgxn49p40
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.31.236:6443 --token wn7o70.dn0o2l3kgxn49p40 \
        --discovery-token-ca-cert-hash sha256:fadef8fe950d5f0833a2e2575eed988a387d62aa1adebb1bebd527431b2b143d 

保存好最后输出的信息

kubeadm join 192.168.31.236:6443 --token wn7o70.dn0o2l3kgxn49p40 \
        --discovery-token-ca-cert-hash sha256:fadef8fe950d5f0833a2e2575eed988a387d62aa1adebb1bebd527431b2b143d
# master节点 配置 kubectl 命令环境
export KUBECONFIG=/etc/kubernetes/admin.conf

# (可选)把命令环境永久加到 root 的 ~/.bashrc 里,不需要每次手动export
if ! grep -qxF 'export KUBECONFIG=/etc/kubernetes/admin.conf' ~/.bashrc; then echo 'export KUBECONFIG=/etc/kubernetes/admin.conf' >> ~/.bashrc; fi && . ~/.bashrc

kubectl 本质上就是一个 客户端工具,它不会直接操作容器,而是去连接 Kubernetes API Server
它需要知道三件事:

  • API Server 地址(例如 192.168.31.236:6443
  • 认证方式(证书 / Token)
  • 命名空间、用户信息等

这些信息都存放在一个配置文件里,默认路径是:$HOME/.kube/config

因为 kubectl 默认会去当前用户的 $HOME/.kube/config 文件里找集群连接信息,而 kubeadm init 初始化出的配置文件在 /etc/kubernetes/admin.conf,不在默认路径,所以需要通过 export KUBECONFIG=/etc/kubernetes/admin.conf 显式指定,这样 kubectl 才能正确使用该配置访问并管理新建的 Kubernetes 集群。

kubeadm join

对于两个node节点执行kubeadm join

kubeadm join 192.168.31.236:6443 --token wn7o70.dn0o2l3kgxn49p40 \
        --discovery-token-ca-cert-hash sha256:fadef8fe950d5f0833a2e2575eed988a387d62aa1adebb1bebd527431b2b143d
# 在master节点上查看节点
root@Dell:~# kubectl get nodes
NAME                      STATUS     ROLES           AGE   VERSION
dell                      NotReady   control-plane   17h   v1.28.2
ps                        NotReady   <none>          16h   v1.28.2
shuoxing-precision-7530   NotReady   <none>          16h   v1.28.2

部署CNI网络插件calico

虽然现在kubernetes集群已经有1个master节点,2个worker节点,但是此时三个节点的状态都是NotReady的,原因是没有CNI网络插件,为了节点间的通信,需要安装cni网络插件,常用的cni网络插件有calico和flannel,两者区别为:flannel不支持复杂的网络策略,calico支持网络策略,因为今后还要配置kubernetes网络策略networkpolicy,所以本文选用的cni网络插件为calico!

现在去官网下载calico.yaml文件:

官网:https://projectcalico.docs.tigera.io/about/about-calico

image-20250916143446805

image-20250916143510716

image-20250916143558433

在master节点下载calico.yaml文件到本地

root@Dell:~# curl https://raw.githubusercontent.com/projectcalico/calico/v3.30.3/manifests/calico-typha.yaml -o calico.yaml
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
100  532k  100  532k    0     0   221k      0  0:00:02  0:00:02 --:--:--  221k
root@Dell:~# ls
calico.yaml  snap

编辑下载在本地的calico.yaml文件,CALICO_IPV4POOL_CIDR的IP段要和kubeadm初始化时候的pod网段一致,注意格式要对齐,不然会报错。

vim calico.yaml

image-20250917112709086

查看calico.yaml所需的镜像

root@Dell:~# grep image: calico.yaml
          image: docker.io/calico/cni:v3.30.3
          image: docker.io/calico/cni:v3.30.3
          image: docker.io/calico/node:v3.30.3
          image: docker.io/calico/node:v3.30.3
          image: docker.io/calico/kube-controllers:v3.30.3
      - image: docker.io/calico/typha:v3.30.3

这四个镜像都需要在所有节点下载,以k8s-master为例,如果拉取失败,可能需要去配置镜像源

crictl pull docker.io/calico/cni:v3.30.3
crictl pull docker.io/calico/node:v3.30.3
crictl pull docker.io/calico/kube-controllers:v3.30.3
crictl pull docker.io/calico/typha:v3.30.3

然后在master节点执行kubectl apply -f calico.yaml

root@Dell:~# kubectl apply -f calico.yaml
poddisruptionbudget.policy/calico-kube-controllers created
poddisruptionbudget.policy/calico-typha created
serviceaccount/calico-kube-controllers created
serviceaccount/calico-node created
serviceaccount/calico-cni-plugin created
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgpfilters.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/caliconodestatuses.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipreservations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/stagedglobalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/stagedkubernetesnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/stagednetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/tiers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/adminnetworkpolicies.policy.networking.k8s.io created
customresourcedefinition.apiextensions.k8s.io/baselineadminnetworkpolicies.policy.networking.k8s.io created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrole.rbac.authorization.k8s.io/calico-cni-plugin created
clusterrole.rbac.authorization.k8s.io/calico-tier-getter created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-cni-plugin created
clusterrolebinding.rbac.authorization.k8s.io/calico-tier-getter created
service/calico-typha created
daemonset.apps/calico-node created
deployment.apps/calico-kube-controllers created
deployment.apps/calico-typha created

查看calico pod

root@Dell:~# kubectl get pod -A | grep calico
kube-system   calico-kube-controllers-5cc564f9c5-xpr2s   1/1     Running   0          3m38s
kube-system   calico-node-52hr7                          1/1     Running   0          3m38s
kube-system   calico-node-k2cf9                          1/1     Running   0          3m38s
kube-system   calico-node-tsk4x                          1/1     Running   0          3m38s
kube-system   calico-typha-565c7f76dc-jsb8v              1/1     Running   0          3m38s

此时发现三个节点都是Ready状态了

root@Dell:~# kubectl get node
NAME                      STATUS   ROLES           AGE   VERSION
dell                      Ready    control-plane   42h   v1.28.2
ps                        Ready    <none>          41h   v1.28.2
shuoxing-precision-7530   Ready    <none>          40h   v1.28.2

测试一下集群是否搭建成功

简单部署一个 Nginx 服务用于测试 Kubernetes 集群,通过访问 (IP:暴露的端口) 得到 Nginx 的页面则说明服务部署成功。

# 创建一个nginx的服务
kubectl create deployment nginx --image=nginx:latest
# 验证pod是否正常运行,状态应为Runnig
kubectl get pods -l app=nginx
# 用 NodePort 的方式暴露,方便通过节点 IP + 端口访问:
kubectl expose deployment nginx --port=80 --target-port=80 --type=NodePort
# 查看服务详情:
kubectl get svc nginx
# 输出示例:
root@Dell:~# kubectl get svc nginx
NAME    TYPE       CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
nginx   NodePort   10.111.225.44   <none>        80:30842/TCP   11s

访问测试,在任意集群节点上(node/master 均可),用节点 IP + NodePort 访问:http://<NodeIP>:30842

image-20250917120317164

验证完后可以删除

博客内容均系原创,未经允许严禁转载!
暂无评论

发送评论 编辑评论


				
|´・ω・)ノ
ヾ(≧∇≦*)ゝ
(☆ω☆)
(╯‵□′)╯︵┴─┴
 ̄﹃ ̄
(/ω\)
∠( ᐛ 」∠)_
(๑•̀ㅁ•́ฅ)
→_→
୧(๑•̀⌄•́๑)૭
٩(ˊᗜˋ*)و
(ノ°ο°)ノ
(´இ皿இ`)
⌇●﹏●⌇
(ฅ´ω`ฅ)
(╯°A°)╯︵○○○
φ( ̄∇ ̄o)
ヾ(´・ ・`。)ノ"
( ง ᵒ̌皿ᵒ̌)ง⁼³₌₃
(ó﹏ò。)
Σ(っ °Д °;)っ
( ,,´・ω・)ノ"(´っω・`。)
╮(╯▽╰)╭
o(*////▽////*)q
>﹏<
( ๑´•ω•) "(ㆆᴗㆆ)
😂
😀
😅
😊
🙂
🙃
😌
😍
😘
😜
😝
😏
😒
🙄
😳
😡
😔
😫
😱
😭
💩
👻
🙌
🖕
👍
👫
👬
👭
🌚
🌝
🙈
💊
😶
🙏
🍦
🍉
😣
Source: github.com/k4yt3x/flowerhd
颜文字
Emoji
小恐龙
花!
上一篇