每天一点基础K8S–kubeadm构建多master k8s集群–version 1.20.6()

搭建条件

centos-stream-8
[root@localhost ~]# cat /etc/os-release 
NAME="CentOS Stream"

| 主机名 | IP地址 | role |
| master-worker-node-1 | 192.168.122.6/24 | control-plain,master,worker |
| master-worker-node-2 | 192.168.122.250/24 | control-plain,master,worker |
| master-worker-node-3 | 192.168.122.99/24 | control-plain,master,worker |
| only-worker-node-4 | 192.168.122.181/24 | worker |

kubelet、kubeadm、kubectl版本为1.20.6
runc:docker
nginx、keepalived
VIP:192.168.122.254/24

基础配置

因为三个节点都是master,
包括:主机名、IP地址、DNS、关闭SWAP、关闭selinux、关闭firewall、开启IP转发、配置yum源、配置NTP、安装基础软件包、配置主机互信
#!/bin/bash

echo "the ip is ${1}"
echo "the hostname is ${2}"
echo "the vrrp_role is ${3}"

function line() {
    echo -e "\033[33m###################${1}########################\033[0m"
    sleep 1
}
function check(){
  if [ $? == 0 ]
  then
    echo -e "\033[32m${1} ====> success! \033[0m"
    sleep 1
  else
    echo -e "\033[31m${1} ====> fail! \033[0m"
    exit 1
  fi
}
# 定义变量
ip1="192.168.122.6"
hostname1="master-worker-node-1"
vrrp_role1="master"

ip2="192.168.122.250"
hostname2="master-worker-node-2"
vrrp_role2="slave"

ip3="192.168.122.99"
hostname3="master-worker-node-3"
vrrp_role3="slave"

root_pass="1"

# 修改主机名
line "修改主机名"
hostnamectl set-hostname ${2}
check "修改主机名"

# 配置IP地址
#line "配置IP地址"
#nmcli con add con-name ens3-static ifname ens3 type ethernet ipv4.addresses ${1}/24 ipv4.gateway 10.64.84.1 ipv4.method manual 2&>/dev/null
#nmcli con up ens3 2&>/dev/null
#systemctl enable NetworkManager --now 2&>/dev/null
#check "配置IP地址"

# 配置DNS
#line "配置DNS"
#echo "nameserver 114.114.114.114" >> cat /etc/resolv.conf
#check "配置DNS"

# 关闭selinux 关闭firewall
line "关闭selinux 关闭firewall"
sed -i.bak "/^SELINUX=/s@SELINUX=.*@SELINUX=disable@g" /etc/selinux/config
systemctl disable firewalld --now
check "关闭selinux 关闭firewall"

# 配置hosts解析
line "配置hosts解析"
cat >> /etc/hosts <<eof
$ip1 $hostname1
$ip2 $hostname2
$ip3 $hostname3
eof
check "配置hosts解析"

# 修改内核参数
line "修改内核参数"
modprobe br_netfilter
cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
sysctl -p /etc/sysctl.d/k8s.conf
check "修改内核参数" 

# 配置yum源
line "配置yum源"
rm -rf /etc/yum.repos.d/* 
cat > /etc/yum.repos.d/centos8.repo <<eof
[appstream]
name=appstream
enable=1
gpgcheck=0
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos/8-stream/AppStream/x86_64/os/

[baseos]
name=baseos
enable=1
gpgcheck=0
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos/8-stream/BaseOS/x86_64/os/

[extras-command]
name=extras-command
enable=1
gpgcheck=0
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos/8-stream/extras/x86_64/extras-common/

[extras-os]
name=extras-os
enable=1
gpgcheck=0
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos/8-stream/extras/x86_64/os/

[openstakc-yoga]
name=openstack-yoga
enable=1
gpgcheck=0
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos/8-stream/cloud/x86_64/openstack-yoga/

[plus]
name=centos-plus
enable=1
gpgcheck=0
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos/8-stream/centosplus/x86_64/os/

[docker-ce]
enable=1
name=docker-ce
gpgcheck=0
baseurl=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/8/x86_64/stable/

[epel]
name=epel
enable=1
gpgcheck=0
baseurl=https://mirrors.tuna.tsinghua.edu.cn/epel/8/Everything/x86_64/

[K8S]
name = K8S
enable = 1
baseurl = https://mirrors.tuna.tsinghua.edu.cn/kubernetes/yum/repos/kubernetes-el7-x86_64/
gpgcheck = 0
eof
yum repolist
check "配置yum源"

# 配置NTP
line "配置NTP"
yum install -y chrony
sed -i.bak "/^server/s@.*@server ntp.aliyun.com iburst@g" /etc/chrony.conf
systemctl enable chronyd.service --now
chronyc sourcestats -v
check "配置NTP"

# 安装基础包
line "安装基础包"
yum install -y tcpdump sshpass yum-utils device-mapper-persistent-data lvm2  net-tools nfs-utils lrzsz gcc gcc-c++ make cmake libxml2-devel openssl-devel curl curl-devel unzip libaio-devel wget vim ncurses-devel autoconf epel-release socat conntrack ipvsadm
check "安装基础包"

# 配置主机间的互信
line "配置主机间的互信"
rm -rf /root/.ssh/id_*
ssh-keygen -t dsa -N ''  -f /root/.ssh/id_dsa -q
ssh-keygen -t rsa -N ''  -f /root/.ssh/id_rsa -q
sshpass -p $root_pass ssh-copy-id root@$ip1 -o StrictHostKeyChecking=no
sshpass -p $root_pass ssh-copy-id root@$ip2 -o StrictHostKeyChecking=no
sshpass -p $root_pass ssh-copy-id root@$ip3 -o StrictHostKeyChecking=no
check "配置主机间的互信"

# 关闭swap分区提升性能
line "关闭swap分区提升性能"
swapoff -a 2&>/dev/null
sed -i.bak "/centos-swap/s@.*@# &@g" /etc/fstab
check "关闭swap分区提升性能"
重启设备,让selinux和swap配置生效。

安装容器运行时docker

yum安装docker-ce、配置docker镜像加速、修改docker的Cgroup驱动为systemd
# 安装docker-ce
line "安装docker-ce"
yum install -y docker-ce-3:20.10.18-3.el8
check "安装docker-ce"

# 配置docker镜像加速和systemd
mkdir -p /etc/docker
cat > /etc/docker/docker.daemon <<eof
{
 "registry-mirrors": ["https://4wgtxa6q.mirror.aliyuncs.com","https://registry.docker-cn.com","https://docker.mirrors.ustc.edu.cn"],
 "exec-opts": ["native.cgroupdriver=systemd"]
}
eof
systemctl daemon-reload && systemctl enable docker && systemctl start docker
check "配置docker镜像加速和systemd"

安装kubelet、kubeadm、kubectl

yum安装kubelet-1.20.6、kubeadm-1.20.6、kubectl-1.20.6
# 安装kubelet kubectl kubeadm
line "安装kubelet kubectl kubeadm"
yum install -y kubelet-1.20.6-0 kubeadm-1.20.6-0 kubectl-1.20.6-0
systemctl enable kubelet
check "安装kubelet kubectl kubeadm"

安装nginx和keepalived

yum安装nginx和keepalived、修改nginx配置文件和keepalived配置文件
# 安装nginx和keepalived实现master节点高可用
line "安装nginx和keepalived实现master节点高可用"
yum install -y nginx keepalived 
check "安装nginx和keepalived实现master节点高可用"

# 要想实现多master节点,其实组建一个高可用的apiserver
# 修改nginx配置文件
line "修改nginx配置文件"
cat > /etc/nginx/nginx.conf <<eof
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;

include /usr/share/nginx/modules/*.conf;

events {
    worker_connections 1024;
}

# 四层负载均衡,为三台Master apiserver组件提供负载均衡
stream {

    log_format  main  \'$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent\';

    access_log  /var/log/nginx/k8s-access.log  main;

    upstream k8s-apiserver {
       server $ip1:6443 weight=5 max_fails=3 fail_timeout=30s;   # Master1 APISERVER IP:PORT
       server $ip2:6443 weight=5 max_fails=3 fail_timeout=30s;   # Master2 APISERVER IP:PORT
       server $ip3:6443 weight=5 max_fails=3 fail_timeout=30s;   # Master3 apiserver ip:port
    }

    server {
       listen 16443; # 由于nginx与master节点复用,这个监听端口不能是6443,否则会冲突
       proxy_pass k8s-apiserver;
    }
}

http {
    log_format  main  \'$remote_addr - $remote_user [$time_local] \"$request\" \'
                      \'$status $body_bytes_sent \"$http_referer\" \'
                      \'\"$http_user_agent\" \"$http_x_forwarded_for\"\';

    access_log  /var/log/nginx/access.log  main;

    sendfile            on;
    tcp_nopush          on;
    tcp_nodelay         on;
    keepalive_timeout   65;
    types_hash_max_size 2048;

    include             /etc/nginx/mime.types;
    default_type        application/octet-stream;

    server {
        listen       80 default_server;
        server_name  _;

        location / {
        }
    }
}
eof
check "修改nginx配置文件"

# 修改keepalived配置文件
line "修改keepalived配置文件"
cat > /etc/keepalived/keepalived.conf <<eof
global_defs {
   notification_email {
     acassen@firewall.loc
     failover@firewall.loc
     sysadmin@firewall.loc
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id NGINX_MASTER
}

vrrp_script check_nginx {
    script \"/etc/keepalived/check_nginx.sh\"
}

vrrp_instance VI_1 {
    state ${3}
    interface ens3  # 修改为实际网卡名
    virtual_router_id 10 # VRRP 路由 ID实例,每个实例是唯一的
    priority 100    # 优先级,备服务器设置 90
    advert_int 2    # 指定VRRP 心跳包通告间隔时间,默认1秒
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    # 虚拟IP
    virtual_ipaddress {
        192.168.122.254/24 dev ens3
    }
    track_script {
        check_nginx
    }
}
eof
cat > /etc/keepalived/check_nginx.sh <<eof
#!/bin/bash
counter=\$(ps -ef | grep nginx | grep sbin | egrep -cv \"grep | $$\")
if [ \$counter -eq 0 ]; then
systemctl restart nginx
sleep 3
counter=\$(ps -ef | grep sbin/nginx | grep -Ecv \"grep | \$\$\")
if [ \$counter -eq 0 ]; then
systemctl stop keepalived
fi
fi
eof
chmod u+x /etc/keepalived/check_nginx.sh
check "修改keepalived配置文件"

# 启动nginx和keepalive
line "启动nginx和keepalive"
systemctl daemon-reload && systemctl enable nginx --now   && systemctl enable keepalived 
check "启动nginx和keepalive"

检查

在master节点看到如下则说明keepalive正常
[root@master-worker-node-1 ~]# ip add show ens3
2: ens3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether 52:54:00:8d:61:59 brd ff:ff:ff:ff:ff:ff
    inet 192.168.122.6/24 brd 192.168.122.255 scope global dynamic noprefixroute ens3
       valid_lft 2589sec preferred_lft 2589sec
    inet 192.168.122.254/24 scope global secondary ens3
       valid_lft forever preferred_lft forever
    inet6 fe80::5054:ff:fe8d:6159/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever

测试切换

模拟当前的master节点keepalive异常
[root@master-worker-node-1 ~]# systemctl stop keepalived
[root@master-worker-node-1 ~]# 
[root@master-worker-node-1 ~]# exit
logout
Connection to 192.168.122.6 closed.

# VIP切换到node2
[22:31:41 remote-server root ~] # node2
Last login: Wed Nov 23 22:05:00 2022 from 192.168.122.1
[root@master-worker-node-2 ~]# ip add show ens3
2: ens3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether 52:54:00:f0:ee:5c brd ff:ff:ff:ff:ff:ff
    inet 192.168.122.250/24 brd 192.168.122.255 scope global dynamic noprefixroute ens3
       valid_lft 2381sec preferred_lft 2381sec
    inet 192.168.122.254/24 scope global secondary ens3
       valid_lft forever preferred_lft forever
    inet6 fe80::5054:ff:fef0:ee5c/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever

master节点恢复以后,VIP切换到master

配置kubeadm初始化配置文件

生成kubeadm默认配置文件,并进行针对性修改
cat > /root/kubeadm-config.yaml << eof
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: v1.20.6
controlPlaneEndpoint: 192.168.122.254:16443
imageRepository: registry.aliyuncs.com/google_containers
apiServer:
 certSANs:
 - 192.168.122.6
 - 192.168.122.250
 - 192.168.122.99
 - 192.168.122.254
networking:
  podSubnet: 10.244.0.0/16
  serviceSubnet: 10.96.0.0/16
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind:  KubeProxyConfiguration
mode: ipvs
eof

使用kubeadm提前拉取所需镜像

通过命令可以查看当前的kubeadm-config需要哪些镜像
[root@master-worker-node-1 ~]# kubeadm config images list --config kubeadm-config.yaml 
registry.aliyuncs.com/google_containers/kube-apiserver:v1.20.6
registry.aliyuncs.com/google_containers/kube-controller-manager:v1.20.6
registry.aliyuncs.com/google_containers/kube-scheduler:v1.20.6
registry.aliyuncs.com/google_containers/kube-proxy:v1.20.6
registry.aliyuncs.com/google_containers/pause:3.2
registry.aliyuncs.com/google_containers/etcd:3.4.13-0
registry.aliyuncs.com/google_containers/coredns:1.7.0
提前拉取镜像
[root@master-worker-node-1 ~]# kubeadm config images pull --config kubeadm-config.yaml 
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-apiserver:v1.20.6
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-controller-manager:v1.20.6
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-scheduler:v1.20.6
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-proxy:v1.20.6
[config/images] Pulled registry.aliyuncs.com/google_containers/pause:3.2
[config/images] Pulled registry.aliyuncs.com/google_containers/etcd:3.4.13-0
[config/images] Pulled registry.aliyuncs.com/google_containers/coredns:1.7.0

使用kubeadm添加master节点

初始化master节点

[root@master-worker-node-1 ~]# kubeadm init --config kubeadm-config.yaml --ignore-preflight-errors=SystemVerification
[init] Using Kubernetes version: v1.20.6
[preflight] Running pre-flight checks
        [WARNING FileExisting-tc]: tc not found in system path
        …………………………………………
        …………………………………………
        [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:

  kubeadm join 192.168.122.254:16443 --token hkgnxo.gd1r3ewzvgd6perv \
    --discovery-token-ca-cert-hash sha256:33bedb26e346a8acf9f544746955eebeb8f4b008966f7ea897a26f92c3146dc6 \
    --control-plane 

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.122.254:16443 --token hkgnxo.gd1r3ewzvgd6perv \
    --discovery-token-ca-cert-hash sha256:33bedb26e346a8acf9f544746955eebeb8f4b008966f7ea897a26f92c3146dc6 
        
生成kubectl的配置文件,相当于给kubectl授权,从而使在该节点上可以用kubectl命令对集群进行管理
  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

扩容K8S master节点

刚才初始化master-worker-node-1的时候,生成了一堆证书,其他master节点的证书可以利用这些证书
k8s的证书默认是放在/etc/kubernetes/pki下

[root@master-worker-node-1 pki]# pwd
/etc/kubernetes/pki
[root@master-worker-node-1 pki]# ls
apiserver.crt              apiserver.key                 ca.crt  front-proxy-ca.crt      front-proxy-client.key
apiserver-etcd-client.crt  apiserver-kubelet-client.crt  ca.key  front-proxy-ca.key      sa.key
apiserver-etcd-client.key  apiserver-kubelet-client.key  etcd    front-proxy-client.crt  sa.pub
将master-worker-node-1的证书拷贝到另外两个master节点
[root@master-worker-node-1 pki]# ssh root@192.168.122.250 mkdir -p /etc/kubernetes/pki/etcd
[root@master-worker-node-1 pki]# ssh root@192.168.122.250 mkdir -p ~/.kube
[root@master-worker-node-1 pki]# scp /etc/kubernetes/pki/ca.* root@192.168.122.250:/etc/kubernetes/pki
ca.crt                                                                                                100% 1066   277.3KB/s   00:00    
ca.key                                                                                                100% 1679   495.3KB/s   00:00    
[root@master-worker-node-1 pki]# scp /etc/kubernetes/pki/sa.* root@192.168.122.250:/etc/kubernetes/pki
sa.key                                                                                                100% 1679   428.8KB/s   00:00    
sa.pub                                                                                                100%  451    56.2KB/s   00:00    
[root@master-worker-node-1 pki]# scp /etc/kubernetes/pki/front-proxy-ca.* root@192.168.122.250:/etc/kubernetes/pki
front-proxy-ca.crt                                                                                    100% 1078   408.1KB/s   00:00    
front-proxy-ca.key                                                                                    100% 1675   689.4KB/s   00:00 
[root@master-worker-node-1 pki]# scp /etc/kubernetes/pki/etcd/ca.* root@192.168.122.250:/etc/kubernetes/pki/etcd
ca.crt                                                                                                100% 1058   195.1KB/s   00:00    
ca.key                                                                                                100% 1679   290.2KB/s   00:00    

[root@master-worker-node-1 pki]# ssh root@192.168.122.99 mkdir -p /etc/kubernetes/pki/etcd
[root@master-worker-node-1 pki]# ssh root@192.168.122.99 mkdir -p ~/.kube
[root@master-worker-node-1 pki]# scp /etc/kubernetes/pki/front-proxy-ca.* root@192.168.122.99:/etc/kubernetes/pki
front-proxy-ca.crt                                                                                    100% 1078   296.6KB/s   00:00    
front-proxy-ca.key                                                                                    100% 1675   309.2KB/s   00:00    
[root@master-worker-node-1 pki]# scp /etc/kubernetes/pki/sa.* root@192.168.122.99:/etc/kubernetes/pki
sa.key                                                                                                100% 1679   226.8KB/s   00:00    
sa.pub                                                                                                100%  451   161.5KB/s   00:00    
[root@master-worker-node-1 pki]# scp /etc/kubernetes/pki/ca.* root@192.168.122.99:/etc/kubernetes/pki
ca.crt                                                                                                100% 1066   329.8KB/s   00:00    
ca.key                                                                                                100% 1679   561.6KB/s   00:00    
[root@master-worker-node-1 pki]# scp /etc/kubernetes/pki/etcd/ca.* root@192.168.122.99:/etc/kubernetes/pki/etcd
ca.crt                                                                                                100% 1058   302.8KB/s   00:00    
ca.key                                                                                                100% 1679   583.0KB/s   00:00    
查看master节点加入的命令
[root@master-worker-node-1 ~]# kubeadm token create --print-join-command 
kubeadm join 192.168.122.254:16443 --token 68x43m.0d1hnv1e5tsgl3is     --discovery-token-ca-cert-hash sha256:33bedb26e346a8acf9f544746955eebeb8f4b008966f7ea897a26f92c3146dc6 
加入master-worker-node-2

[root@master-worker-node-2 ~]# kubeadm join 192.168.122.254:16443 --token 68x43m.0d1hnv1e5tsgl3is     --discovery-token-ca-cert-hash sha256:33bedb26e346a8acf9f544746955eebeb8f4b008966f7ea897a26f92c3146dc6 --control-plane 
加入master-worker-node-3

[root@master-worker-node-3 ~]# kubeadm join 192.168.122.254:16443 --token 68x43m.0d1hnv1e5tsgl3is     --discovery-token-ca-cert-hash sha256:33bedb26e346a8acf9f544746955eebeb8f4b008966f7ea897a26f92c3146dc6 --control-plan 
可以在master-worker-node-1上查看nodes信息

[root@master-worker-node-1 pki]# kubectl get nodes 
NAME                   STATUS     ROLES                  AGE   VERSION
master-worker-node-1   NotReady   control-plane,master   19m   v1.20.6
master-worker-node-2   NotReady   control-plane,master   12m   v1.20.6
master-worker-node-3   NotReady   control-plane,master   58s   v1.20.6
为了让master-worker-node-2和master-worker-node-3也能使用kubectl命令,可以为kubectl授权

[root@master-worker-node-1 ~]# scp ~/.kube/config root@192.168.122.250:~/.kube/
config                                                                                               100% 5568   630.3KB/s   00:00    
[root@master-worker-node-1 ~]# scp ~/.kube/config root@192.168.122.99:~/.kube/
config  

扩容K8S worker节点

[root@master-worker-node-1 ~]# kubeadm token create --print-join-command
kubeadm join 192.168.122.254:16443 --token 7orkzo.5715sgjmn3yxwl03     --discovery-token-ca-cert-hash sha256:644c04f9ba9608d3e216d639ae4255b68cbe4886022e626809e81755b44e3401 
[root@only-worker-node-4 ~]# kubeadm join 192.168.122.254:16443 --token 7orkzo.5715sgjmn3yxwl03     --discovery-token-ca-cert-hash sha256:644c04f9ba9608d3e216d639ae4255b68cbe4886022e626809e81755b44e3401 

安装calico网络插件

此时pod状态并未正常,是因为缺少网络插件
[root@master-worker-node-1 pki]# kubectl get pods -n kube-system 
NAME                                           READY   STATUS    RESTARTS   AGE
coredns-7f89b7bc75-bfljn                       0/1     Pending   0          21m
coredns-7f89b7bc75-fpjpz                       0/1     Pending   0          21m
etcd-master-worker-node-1                      1/1     Running   0          21m
etcd-master-worker-node-2                      1/1     Running   0          13m
etcd-master-worker-node-3                      1/1     Running   0          3m17s
kube-apiserver-master-worker-node-1            1/1     Running   0          21m
kube-apiserver-master-worker-node-2            1/1     Running   1          12m
kube-apiserver-master-worker-node-3            1/1     Running   0          3m17s
kube-controller-manager-master-worker-node-1   1/1     Running   2          21m
kube-controller-manager-master-worker-node-2   1/1     Running   0          11m
kube-controller-manager-master-worker-node-3   1/1     Running   0          3m17s
kube-proxy-k7r9t                               1/1     Running   0          3m21s
kube-proxy-ngd5b                               1/1     Running   0          21m
kube-proxy-wj7h8                               1/1     Running   0          14m
kube-scheduler-master-worker-node-1            1/1     Running   2          21m
kube-scheduler-master-worker-node-2            1/1     Running   0          11m
kube-scheduler-master-worker-node-3            1/1     Running   0          3m17s
calico.yaml可以去网上下载
[root@master-worker-node-1 ~]# kubectl get pods -A
NAMESPACE     NAME                                           READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-6949477b58-zn866       1/1     Running   0          3m52s
kube-system   calico-node-47ksq                              1/1     Running   0          3m53s
kube-system   calico-node-57xkc                              1/1     Running   0          3m53s
kube-system   calico-node-g4n7f                              1/1     Running   0          3m53s
kube-system   calico-node-gmt2s                              1/1     Running   0          3m53s
kube-system   coredns-7f89b7bc75-bfljn                       1/1     Running   0          10h
kube-system   coredns-7f89b7bc75-l2ndv                       1/1     Running   0          31s
kube-system   etcd-master-worker-node-1                      1/1     Running   0          10h
kube-system   etcd-master-worker-node-2                      1/1     Running   0          10h
kube-system   etcd-master-worker-node-3                      1/1     Running   0          10h
kube-system   kube-apiserver-master-worker-node-1            1/1     Running   0          10h
kube-system   kube-apiserver-master-worker-node-2            1/1     Running   1          10h
kube-system   kube-apiserver-master-worker-node-3            1/1     Running   0          10h
kube-system   kube-controller-manager-master-worker-node-1   1/1     Running   4          10h
kube-system   kube-controller-manager-master-worker-node-2   1/1     Running   2          10h
kube-system   kube-controller-manager-master-worker-node-3   1/1     Running   2          10h
kube-system   kube-proxy-k7r9t                               1/1     Running   0          10h
kube-system   kube-proxy-m8w7r                               1/1     Running   0          7m
kube-system   kube-proxy-ngd5b                               1/1     Running   0          10h
kube-system   kube-proxy-wj7h8                               1/1     Running   0          10h
kube-system   kube-scheduler-master-worker-node-1            1/1     Running   4          10h
kube-system   kube-scheduler-master-worker-node-2            1/1     Running   2          10h
kube-system   kube-scheduler-master-worker-node-3            1/1     Running   1          10h
[root@master-worker-node-1 ~]# kubectl get nodes 
NAME                   STATUS   ROLES                  AGE    VERSION
master-worker-node-1   Ready    control-plane,master   10h    v1.20.6
master-worker-node-2   Ready    control-plane,master   10h    v1.20.6
master-worker-node-3   Ready    control-plane,master   10h    v1.20.6
only-worker-node-4     Ready    <none>                 9m2s   v1.20.6


给pod添加一个label 好看点
[root@master-worker-node-1 ~]# kubectl label node only-worker-node-4 node-role.kubernetes.io/worker=only-worker
node/only-worker-node-4 labeled
[root@master-worker-node-1 ~]# kubectl get nodes 
NAME                   STATUS   ROLES                  AGE     VERSION
master-worker-node-1   Ready    control-plane,master   10h     v1.20.6
master-worker-node-2   Ready    control-plane,master   10h     v1.20.6
master-worker-node-3   Ready    control-plane,master   10h     v1.20.6
only-worker-node-4     Ready    worker                 9m16s   v1.20.6

测试环境

创建一个pod

[root@master-worker-node-1 pod-yaml]# cat test-busybox.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: test-busybox
  labels:
    func: test
spec:
  containers:
  - name: test-busybox
    image: busybox:latest
    imagePullPolicy: IfNotPresent
    command: ['/bin/sh','-c','sleep 46434'] 
    
[root@master-worker-node-1 pod-yaml]# kubectl apply -f test-busybox.yaml 
pod/test-busybox created

[root@master-worker-node-1 pod-yaml]# kubectl get pods -o wide
NAME           READY   STATUS    RESTARTS   AGE   IP            NODE                 NOMINATED NODE   READINESS GATES
test-busybox   1/1     Running   0          80s   10.244.54.2   only-worker-node-4   <none>           <none>
[root@master-worker-node-1 pod-yaml]# kubectl exec -it test-busybox sh
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
/ # ping -c 2 www.baidu.com
PING www.baidu.com (110.242.68.4): 56 data bytes
64 bytes from 110.242.68.4: seq=0 ttl=49 time=42.766 ms
64 bytes from 110.242.68.4: seq=1 ttl=49 time=42.831 ms

--- www.baidu.com ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 42.766/42.798/42.831 ms
测试CORE-DNS情况
/ # nslookup kubernetes.default.svc.cluster.local
Server:    10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

Name:      kubernetes.default.svc.cluster.local
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local


解析出的结果与service/core-dns的地址一致

[root@master-worker-node-1 ~]# kubectl get svc -n kube-system -o wide
NAME       TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                  AGE   SELECTOR
kube-dns   ClusterIP   10.96.0.10   <none>        53/UDP,53/TCP,9153/TCP   12h   k8s-app=kube-dns


[root@master-worker-node-1 ~]# kubectl get svc -n default -o wide 
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE   SELECTOR
kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   12h   <none>

延长apiserver证书有效期

默认ca证书的有效期是10年,但是通过kubeadm进行部署时签署的有效期为一年,一年以后,这个环境将无法调用。
[root@master-worker-node-1 ~]# openssl x509 -in /etc/kubernetes/pki/front-proxy-client.crt -noout -text | grep Not
            Not Before: Nov 23 15:19:28 2022 GMT
            Not After : Nov 23 15:19:29 2023 GMT
[root@master-worker-node-1 ~]# openssl x509 -in /etc/kubernetes/pki/apiserver.crt -noout -text | grep Not
            Not Before: Nov 23 15:19:27 2022 GMT
            Not After : Nov 23 15:19:27 2023 GMT
            
[root@master-worker-node-1 ~]# openssl x509 -in /etc/kubernetes/pki/ca.crt -noout -text | grep Not
            Not Before: Nov 23 15:19:27 2022 GMT
            Not After : Nov 20 15:19:27 2032 GMT
————————

搭建条件

centos-stream-8
[root@localhost ~]# cat /etc/os-release 
NAME="CentOS Stream"

| 主机名 | IP地址 | role |
| master-worker-node-1 | 192.168.122.6/24 | control-plain,master,worker |
| master-worker-node-2 | 192.168.122.250/24 | control-plain,master,worker |
| master-worker-node-3 | 192.168.122.99/24 | control-plain,master,worker |
| only-worker-node-4 | 192.168.122.181/24 | worker |

kubelet、kubeadm、kubectl版本为1.20.6
runc:docker
nginx、keepalived
VIP:192.168.122.254/24

基础配置

因为三个节点都是master,
包括:主机名、IP地址、DNS、关闭SWAP、关闭selinux、关闭firewall、开启IP转发、配置yum源、配置NTP、安装基础软件包、配置主机互信
#!/bin/bash

echo "the ip is ${1}"
echo "the hostname is ${2}"
echo "the vrrp_role is ${3}"

function line() {
    echo -e "\033[33m###################${1}########################\033[0m"
    sleep 1
}
function check(){
  if [ $? == 0 ]
  then
    echo -e "\033[32m${1} ====> success! \033[0m"
    sleep 1
  else
    echo -e "\033[31m${1} ====> fail! \033[0m"
    exit 1
  fi
}
# 定义变量
ip1="192.168.122.6"
hostname1="master-worker-node-1"
vrrp_role1="master"

ip2="192.168.122.250"
hostname2="master-worker-node-2"
vrrp_role2="slave"

ip3="192.168.122.99"
hostname3="master-worker-node-3"
vrrp_role3="slave"

root_pass="1"

# 修改主机名
line "修改主机名"
hostnamectl set-hostname ${2}
check "修改主机名"

# 配置IP地址
#line "配置IP地址"
#nmcli con add con-name ens3-static ifname ens3 type ethernet ipv4.addresses ${1}/24 ipv4.gateway 10.64.84.1 ipv4.method manual 2&>/dev/null
#nmcli con up ens3 2&>/dev/null
#systemctl enable NetworkManager --now 2&>/dev/null
#check "配置IP地址"

# 配置DNS
#line "配置DNS"
#echo "nameserver 114.114.114.114" >> cat /etc/resolv.conf
#check "配置DNS"

# 关闭selinux 关闭firewall
line "关闭selinux 关闭firewall"
sed -i.bak "/^SELINUX=/s@SELINUX=.*@SELINUX=disable@g" /etc/selinux/config
systemctl disable firewalld --now
check "关闭selinux 关闭firewall"

# 配置hosts解析
line "配置hosts解析"
cat >> /etc/hosts <<eof
$ip1 $hostname1
$ip2 $hostname2
$ip3 $hostname3
eof
check "配置hosts解析"

# 修改内核参数
line "修改内核参数"
modprobe br_netfilter
cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
sysctl -p /etc/sysctl.d/k8s.conf
check "修改内核参数" 

# 配置yum源
line "配置yum源"
rm -rf /etc/yum.repos.d/* 
cat > /etc/yum.repos.d/centos8.repo <<eof
[appstream]
name=appstream
enable=1
gpgcheck=0
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos/8-stream/AppStream/x86_64/os/

[baseos]
name=baseos
enable=1
gpgcheck=0
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos/8-stream/BaseOS/x86_64/os/

[extras-command]
name=extras-command
enable=1
gpgcheck=0
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos/8-stream/extras/x86_64/extras-common/

[extras-os]
name=extras-os
enable=1
gpgcheck=0
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos/8-stream/extras/x86_64/os/

[openstakc-yoga]
name=openstack-yoga
enable=1
gpgcheck=0
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos/8-stream/cloud/x86_64/openstack-yoga/

[plus]
name=centos-plus
enable=1
gpgcheck=0
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos/8-stream/centosplus/x86_64/os/

[docker-ce]
enable=1
name=docker-ce
gpgcheck=0
baseurl=https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/8/x86_64/stable/

[epel]
name=epel
enable=1
gpgcheck=0
baseurl=https://mirrors.tuna.tsinghua.edu.cn/epel/8/Everything/x86_64/

[K8S]
name = K8S
enable = 1
baseurl = https://mirrors.tuna.tsinghua.edu.cn/kubernetes/yum/repos/kubernetes-el7-x86_64/
gpgcheck = 0
eof
yum repolist
check "配置yum源"

# 配置NTP
line "配置NTP"
yum install -y chrony
sed -i.bak "/^server/s@.*@server ntp.aliyun.com iburst@g" /etc/chrony.conf
systemctl enable chronyd.service --now
chronyc sourcestats -v
check "配置NTP"

# 安装基础包
line "安装基础包"
yum install -y tcpdump sshpass yum-utils device-mapper-persistent-data lvm2  net-tools nfs-utils lrzsz gcc gcc-c++ make cmake libxml2-devel openssl-devel curl curl-devel unzip libaio-devel wget vim ncurses-devel autoconf epel-release socat conntrack ipvsadm
check "安装基础包"

# 配置主机间的互信
line "配置主机间的互信"
rm -rf /root/.ssh/id_*
ssh-keygen -t dsa -N ''  -f /root/.ssh/id_dsa -q
ssh-keygen -t rsa -N ''  -f /root/.ssh/id_rsa -q
sshpass -p $root_pass ssh-copy-id root@$ip1 -o StrictHostKeyChecking=no
sshpass -p $root_pass ssh-copy-id root@$ip2 -o StrictHostKeyChecking=no
sshpass -p $root_pass ssh-copy-id root@$ip3 -o StrictHostKeyChecking=no
check "配置主机间的互信"

# 关闭swap分区提升性能
line "关闭swap分区提升性能"
swapoff -a 2&>/dev/null
sed -i.bak "/centos-swap/s@.*@# &@g" /etc/fstab
check "关闭swap分区提升性能"
重启设备,让selinux和swap配置生效。

安装容器运行时docker

yum安装docker-ce、配置docker镜像加速、修改docker的Cgroup驱动为systemd
# 安装docker-ce
line "安装docker-ce"
yum install -y docker-ce-3:20.10.18-3.el8
check "安装docker-ce"

# 配置docker镜像加速和systemd
mkdir -p /etc/docker
cat > /etc/docker/docker.daemon <<eof
{
 "registry-mirrors": ["https://4wgtxa6q.mirror.aliyuncs.com","https://registry.docker-cn.com","https://docker.mirrors.ustc.edu.cn"],
 "exec-opts": ["native.cgroupdriver=systemd"]
}
eof
systemctl daemon-reload && systemctl enable docker && systemctl start docker
check "配置docker镜像加速和systemd"

安装kubelet、kubeadm、kubectl

yum安装kubelet-1.20.6、kubeadm-1.20.6、kubectl-1.20.6
# 安装kubelet kubectl kubeadm
line "安装kubelet kubectl kubeadm"
yum install -y kubelet-1.20.6-0 kubeadm-1.20.6-0 kubectl-1.20.6-0
systemctl enable kubelet
check "安装kubelet kubectl kubeadm"

安装nginx和keepalived

yum安装nginx和keepalived、修改nginx配置文件和keepalived配置文件
# 安装nginx和keepalived实现master节点高可用
line "安装nginx和keepalived实现master节点高可用"
yum install -y nginx keepalived 
check "安装nginx和keepalived实现master节点高可用"

# 要想实现多master节点,其实组建一个高可用的apiserver
# 修改nginx配置文件
line "修改nginx配置文件"
cat > /etc/nginx/nginx.conf <<eof
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;

include /usr/share/nginx/modules/*.conf;

events {
    worker_connections 1024;
}

# 四层负载均衡,为三台Master apiserver组件提供负载均衡
stream {

    log_format  main  \'$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent\';

    access_log  /var/log/nginx/k8s-access.log  main;

    upstream k8s-apiserver {
       server $ip1:6443 weight=5 max_fails=3 fail_timeout=30s;   # Master1 APISERVER IP:PORT
       server $ip2:6443 weight=5 max_fails=3 fail_timeout=30s;   # Master2 APISERVER IP:PORT
       server $ip3:6443 weight=5 max_fails=3 fail_timeout=30s;   # Master3 apiserver ip:port
    }

    server {
       listen 16443; # 由于nginx与master节点复用,这个监听端口不能是6443,否则会冲突
       proxy_pass k8s-apiserver;
    }
}

http {
    log_format  main  \'$remote_addr - $remote_user [$time_local] \"$request\" \'
                      \'$status $body_bytes_sent \"$http_referer\" \'
                      \'\"$http_user_agent\" \"$http_x_forwarded_for\"\';

    access_log  /var/log/nginx/access.log  main;

    sendfile            on;
    tcp_nopush          on;
    tcp_nodelay         on;
    keepalive_timeout   65;
    types_hash_max_size 2048;

    include             /etc/nginx/mime.types;
    default_type        application/octet-stream;

    server {
        listen       80 default_server;
        server_name  _;

        location / {
        }
    }
}
eof
check "修改nginx配置文件"

# 修改keepalived配置文件
line "修改keepalived配置文件"
cat > /etc/keepalived/keepalived.conf <<eof
global_defs {
   notification_email {
     acassen@firewall.loc
     failover@firewall.loc
     sysadmin@firewall.loc
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id NGINX_MASTER
}

vrrp_script check_nginx {
    script \"/etc/keepalived/check_nginx.sh\"
}

vrrp_instance VI_1 {
    state ${3}
    interface ens3  # 修改为实际网卡名
    virtual_router_id 10 # VRRP 路由 ID实例,每个实例是唯一的
    priority 100    # 优先级,备服务器设置 90
    advert_int 2    # 指定VRRP 心跳包通告间隔时间,默认1秒
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    # 虚拟IP
    virtual_ipaddress {
        192.168.122.254/24 dev ens3
    }
    track_script {
        check_nginx
    }
}
eof
cat > /etc/keepalived/check_nginx.sh <<eof
#!/bin/bash
counter=\$(ps -ef | grep nginx | grep sbin | egrep -cv \"grep | $$\")
if [ \$counter -eq 0 ]; then
systemctl restart nginx
sleep 3
counter=\$(ps -ef | grep sbin/nginx | grep -Ecv \"grep | \$\$\")
if [ \$counter -eq 0 ]; then
systemctl stop keepalived
fi
fi
eof
chmod u+x /etc/keepalived/check_nginx.sh
check "修改keepalived配置文件"

# 启动nginx和keepalive
line "启动nginx和keepalive"
systemctl daemon-reload && systemctl enable nginx --now   && systemctl enable keepalived 
check "启动nginx和keepalive"

检查

在master节点看到如下则说明keepalive正常
[root@master-worker-node-1 ~]# ip add show ens3
2: ens3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether 52:54:00:8d:61:59 brd ff:ff:ff:ff:ff:ff
    inet 192.168.122.6/24 brd 192.168.122.255 scope global dynamic noprefixroute ens3
       valid_lft 2589sec preferred_lft 2589sec
    inet 192.168.122.254/24 scope global secondary ens3
       valid_lft forever preferred_lft forever
    inet6 fe80::5054:ff:fe8d:6159/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever

测试切换

模拟当前的master节点keepalive异常
[root@master-worker-node-1 ~]# systemctl stop keepalived
[root@master-worker-node-1 ~]# 
[root@master-worker-node-1 ~]# exit
logout
Connection to 192.168.122.6 closed.

# VIP切换到node2
[22:31:41 remote-server root ~] # node2
Last login: Wed Nov 23 22:05:00 2022 from 192.168.122.1
[root@master-worker-node-2 ~]# ip add show ens3
2: ens3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether 52:54:00:f0:ee:5c brd ff:ff:ff:ff:ff:ff
    inet 192.168.122.250/24 brd 192.168.122.255 scope global dynamic noprefixroute ens3
       valid_lft 2381sec preferred_lft 2381sec
    inet 192.168.122.254/24 scope global secondary ens3
       valid_lft forever preferred_lft forever
    inet6 fe80::5054:ff:fef0:ee5c/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever

master节点恢复以后,VIP切换到master

配置kubeadm初始化配置文件

生成kubeadm默认配置文件,并进行针对性修改
cat > /root/kubeadm-config.yaml << eof
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: v1.20.6
controlPlaneEndpoint: 192.168.122.254:16443
imageRepository: registry.aliyuncs.com/google_containers
apiServer:
 certSANs:
 - 192.168.122.6
 - 192.168.122.250
 - 192.168.122.99
 - 192.168.122.254
networking:
  podSubnet: 10.244.0.0/16
  serviceSubnet: 10.96.0.0/16
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind:  KubeProxyConfiguration
mode: ipvs
eof

使用kubeadm提前拉取所需镜像

通过命令可以查看当前的kubeadm-config需要哪些镜像
[root@master-worker-node-1 ~]# kubeadm config images list --config kubeadm-config.yaml 
registry.aliyuncs.com/google_containers/kube-apiserver:v1.20.6
registry.aliyuncs.com/google_containers/kube-controller-manager:v1.20.6
registry.aliyuncs.com/google_containers/kube-scheduler:v1.20.6
registry.aliyuncs.com/google_containers/kube-proxy:v1.20.6
registry.aliyuncs.com/google_containers/pause:3.2
registry.aliyuncs.com/google_containers/etcd:3.4.13-0
registry.aliyuncs.com/google_containers/coredns:1.7.0
提前拉取镜像
[root@master-worker-node-1 ~]# kubeadm config images pull --config kubeadm-config.yaml 
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-apiserver:v1.20.6
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-controller-manager:v1.20.6
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-scheduler:v1.20.6
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-proxy:v1.20.6
[config/images] Pulled registry.aliyuncs.com/google_containers/pause:3.2
[config/images] Pulled registry.aliyuncs.com/google_containers/etcd:3.4.13-0
[config/images] Pulled registry.aliyuncs.com/google_containers/coredns:1.7.0

使用kubeadm添加master节点

初始化master节点

[root@master-worker-node-1 ~]# kubeadm init --config kubeadm-config.yaml --ignore-preflight-errors=SystemVerification
[init] Using Kubernetes version: v1.20.6
[preflight] Running pre-flight checks
        [WARNING FileExisting-tc]: tc not found in system path
        …………………………………………
        …………………………………………
        [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:

  kubeadm join 192.168.122.254:16443 --token hkgnxo.gd1r3ewzvgd6perv \
    --discovery-token-ca-cert-hash sha256:33bedb26e346a8acf9f544746955eebeb8f4b008966f7ea897a26f92c3146dc6 \
    --control-plane 

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.122.254:16443 --token hkgnxo.gd1r3ewzvgd6perv \
    --discovery-token-ca-cert-hash sha256:33bedb26e346a8acf9f544746955eebeb8f4b008966f7ea897a26f92c3146dc6 
        
生成kubectl的配置文件,相当于给kubectl授权,从而使在该节点上可以用kubectl命令对集群进行管理
  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

扩容K8S master节点

刚才初始化master-worker-node-1的时候,生成了一堆证书,其他master节点的证书可以利用这些证书
k8s的证书默认是放在/etc/kubernetes/pki下

[root@master-worker-node-1 pki]# pwd
/etc/kubernetes/pki
[root@master-worker-node-1 pki]# ls
apiserver.crt              apiserver.key                 ca.crt  front-proxy-ca.crt      front-proxy-client.key
apiserver-etcd-client.crt  apiserver-kubelet-client.crt  ca.key  front-proxy-ca.key      sa.key
apiserver-etcd-client.key  apiserver-kubelet-client.key  etcd    front-proxy-client.crt  sa.pub
将master-worker-node-1的证书拷贝到另外两个master节点
[root@master-worker-node-1 pki]# ssh root@192.168.122.250 mkdir -p /etc/kubernetes/pki/etcd
[root@master-worker-node-1 pki]# ssh root@192.168.122.250 mkdir -p ~/.kube
[root@master-worker-node-1 pki]# scp /etc/kubernetes/pki/ca.* root@192.168.122.250:/etc/kubernetes/pki
ca.crt                                                                                                100% 1066   277.3KB/s   00:00    
ca.key                                                                                                100% 1679   495.3KB/s   00:00    
[root@master-worker-node-1 pki]# scp /etc/kubernetes/pki/sa.* root@192.168.122.250:/etc/kubernetes/pki
sa.key                                                                                                100% 1679   428.8KB/s   00:00    
sa.pub                                                                                                100%  451    56.2KB/s   00:00    
[root@master-worker-node-1 pki]# scp /etc/kubernetes/pki/front-proxy-ca.* root@192.168.122.250:/etc/kubernetes/pki
front-proxy-ca.crt                                                                                    100% 1078   408.1KB/s   00:00    
front-proxy-ca.key                                                                                    100% 1675   689.4KB/s   00:00 
[root@master-worker-node-1 pki]# scp /etc/kubernetes/pki/etcd/ca.* root@192.168.122.250:/etc/kubernetes/pki/etcd
ca.crt                                                                                                100% 1058   195.1KB/s   00:00    
ca.key                                                                                                100% 1679   290.2KB/s   00:00    

[root@master-worker-node-1 pki]# ssh root@192.168.122.99 mkdir -p /etc/kubernetes/pki/etcd
[root@master-worker-node-1 pki]# ssh root@192.168.122.99 mkdir -p ~/.kube
[root@master-worker-node-1 pki]# scp /etc/kubernetes/pki/front-proxy-ca.* root@192.168.122.99:/etc/kubernetes/pki
front-proxy-ca.crt                                                                                    100% 1078   296.6KB/s   00:00    
front-proxy-ca.key                                                                                    100% 1675   309.2KB/s   00:00    
[root@master-worker-node-1 pki]# scp /etc/kubernetes/pki/sa.* root@192.168.122.99:/etc/kubernetes/pki
sa.key                                                                                                100% 1679   226.8KB/s   00:00    
sa.pub                                                                                                100%  451   161.5KB/s   00:00    
[root@master-worker-node-1 pki]# scp /etc/kubernetes/pki/ca.* root@192.168.122.99:/etc/kubernetes/pki
ca.crt                                                                                                100% 1066   329.8KB/s   00:00    
ca.key                                                                                                100% 1679   561.6KB/s   00:00    
[root@master-worker-node-1 pki]# scp /etc/kubernetes/pki/etcd/ca.* root@192.168.122.99:/etc/kubernetes/pki/etcd
ca.crt                                                                                                100% 1058   302.8KB/s   00:00    
ca.key                                                                                                100% 1679   583.0KB/s   00:00    
查看master节点加入的命令
[root@master-worker-node-1 ~]# kubeadm token create --print-join-command 
kubeadm join 192.168.122.254:16443 --token 68x43m.0d1hnv1e5tsgl3is     --discovery-token-ca-cert-hash sha256:33bedb26e346a8acf9f544746955eebeb8f4b008966f7ea897a26f92c3146dc6 
加入master-worker-node-2

[root@master-worker-node-2 ~]# kubeadm join 192.168.122.254:16443 --token 68x43m.0d1hnv1e5tsgl3is     --discovery-token-ca-cert-hash sha256:33bedb26e346a8acf9f544746955eebeb8f4b008966f7ea897a26f92c3146dc6 --control-plane 
加入master-worker-node-3

[root@master-worker-node-3 ~]# kubeadm join 192.168.122.254:16443 --token 68x43m.0d1hnv1e5tsgl3is     --discovery-token-ca-cert-hash sha256:33bedb26e346a8acf9f544746955eebeb8f4b008966f7ea897a26f92c3146dc6 --control-plan 
可以在master-worker-node-1上查看nodes信息

[root@master-worker-node-1 pki]# kubectl get nodes 
NAME                   STATUS     ROLES                  AGE   VERSION
master-worker-node-1   NotReady   control-plane,master   19m   v1.20.6
master-worker-node-2   NotReady   control-plane,master   12m   v1.20.6
master-worker-node-3   NotReady   control-plane,master   58s   v1.20.6
为了让master-worker-node-2和master-worker-node-3也能使用kubectl命令,可以为kubectl授权

[root@master-worker-node-1 ~]# scp ~/.kube/config root@192.168.122.250:~/.kube/
config                                                                                               100% 5568   630.3KB/s   00:00    
[root@master-worker-node-1 ~]# scp ~/.kube/config root@192.168.122.99:~/.kube/
config  

扩容K8S worker节点

[root@master-worker-node-1 ~]# kubeadm token create --print-join-command
kubeadm join 192.168.122.254:16443 --token 7orkzo.5715sgjmn3yxwl03     --discovery-token-ca-cert-hash sha256:644c04f9ba9608d3e216d639ae4255b68cbe4886022e626809e81755b44e3401 
[root@only-worker-node-4 ~]# kubeadm join 192.168.122.254:16443 --token 7orkzo.5715sgjmn3yxwl03     --discovery-token-ca-cert-hash sha256:644c04f9ba9608d3e216d639ae4255b68cbe4886022e626809e81755b44e3401 

安装calico网络插件

此时pod状态并未正常,是因为缺少网络插件
[root@master-worker-node-1 pki]# kubectl get pods -n kube-system 
NAME                                           READY   STATUS    RESTARTS   AGE
coredns-7f89b7bc75-bfljn                       0/1     Pending   0          21m
coredns-7f89b7bc75-fpjpz                       0/1     Pending   0          21m
etcd-master-worker-node-1                      1/1     Running   0          21m
etcd-master-worker-node-2                      1/1     Running   0          13m
etcd-master-worker-node-3                      1/1     Running   0          3m17s
kube-apiserver-master-worker-node-1            1/1     Running   0          21m
kube-apiserver-master-worker-node-2            1/1     Running   1          12m
kube-apiserver-master-worker-node-3            1/1     Running   0          3m17s
kube-controller-manager-master-worker-node-1   1/1     Running   2          21m
kube-controller-manager-master-worker-node-2   1/1     Running   0          11m
kube-controller-manager-master-worker-node-3   1/1     Running   0          3m17s
kube-proxy-k7r9t                               1/1     Running   0          3m21s
kube-proxy-ngd5b                               1/1     Running   0          21m
kube-proxy-wj7h8                               1/1     Running   0          14m
kube-scheduler-master-worker-node-1            1/1     Running   2          21m
kube-scheduler-master-worker-node-2            1/1     Running   0          11m
kube-scheduler-master-worker-node-3            1/1     Running   0          3m17s
calico.yaml可以去网上下载
[root@master-worker-node-1 ~]# kubectl get pods -A
NAMESPACE     NAME                                           READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-6949477b58-zn866       1/1     Running   0          3m52s
kube-system   calico-node-47ksq                              1/1     Running   0          3m53s
kube-system   calico-node-57xkc                              1/1     Running   0          3m53s
kube-system   calico-node-g4n7f                              1/1     Running   0          3m53s
kube-system   calico-node-gmt2s                              1/1     Running   0          3m53s
kube-system   coredns-7f89b7bc75-bfljn                       1/1     Running   0          10h
kube-system   coredns-7f89b7bc75-l2ndv                       1/1     Running   0          31s
kube-system   etcd-master-worker-node-1                      1/1     Running   0          10h
kube-system   etcd-master-worker-node-2                      1/1     Running   0          10h
kube-system   etcd-master-worker-node-3                      1/1     Running   0          10h
kube-system   kube-apiserver-master-worker-node-1            1/1     Running   0          10h
kube-system   kube-apiserver-master-worker-node-2            1/1     Running   1          10h
kube-system   kube-apiserver-master-worker-node-3            1/1     Running   0          10h
kube-system   kube-controller-manager-master-worker-node-1   1/1     Running   4          10h
kube-system   kube-controller-manager-master-worker-node-2   1/1     Running   2          10h
kube-system   kube-controller-manager-master-worker-node-3   1/1     Running   2          10h
kube-system   kube-proxy-k7r9t                               1/1     Running   0          10h
kube-system   kube-proxy-m8w7r                               1/1     Running   0          7m
kube-system   kube-proxy-ngd5b                               1/1     Running   0          10h
kube-system   kube-proxy-wj7h8                               1/1     Running   0          10h
kube-system   kube-scheduler-master-worker-node-1            1/1     Running   4          10h
kube-system   kube-scheduler-master-worker-node-2            1/1     Running   2          10h
kube-system   kube-scheduler-master-worker-node-3            1/1     Running   1          10h
[root@master-worker-node-1 ~]# kubectl get nodes 
NAME                   STATUS   ROLES                  AGE    VERSION
master-worker-node-1   Ready    control-plane,master   10h    v1.20.6
master-worker-node-2   Ready    control-plane,master   10h    v1.20.6
master-worker-node-3   Ready    control-plane,master   10h    v1.20.6
only-worker-node-4     Ready    <none>                 9m2s   v1.20.6


给pod添加一个label 好看点
[root@master-worker-node-1 ~]# kubectl label node only-worker-node-4 node-role.kubernetes.io/worker=only-worker
node/only-worker-node-4 labeled
[root@master-worker-node-1 ~]# kubectl get nodes 
NAME                   STATUS   ROLES                  AGE     VERSION
master-worker-node-1   Ready    control-plane,master   10h     v1.20.6
master-worker-node-2   Ready    control-plane,master   10h     v1.20.6
master-worker-node-3   Ready    control-plane,master   10h     v1.20.6
only-worker-node-4     Ready    worker                 9m16s   v1.20.6

测试环境

创建一个pod

[root@master-worker-node-1 pod-yaml]# cat test-busybox.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: test-busybox
  labels:
    func: test
spec:
  containers:
  - name: test-busybox
    image: busybox:latest
    imagePullPolicy: IfNotPresent
    command: ['/bin/sh','-c','sleep 46434'] 
    
[root@master-worker-node-1 pod-yaml]# kubectl apply -f test-busybox.yaml 
pod/test-busybox created

[root@master-worker-node-1 pod-yaml]# kubectl get pods -o wide
NAME           READY   STATUS    RESTARTS   AGE   IP            NODE                 NOMINATED NODE   READINESS GATES
test-busybox   1/1     Running   0          80s   10.244.54.2   only-worker-node-4   <none>           <none>
[root@master-worker-node-1 pod-yaml]# kubectl exec -it test-busybox sh
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
/ # ping -c 2 www.baidu.com
PING www.baidu.com (110.242.68.4): 56 data bytes
64 bytes from 110.242.68.4: seq=0 ttl=49 time=42.766 ms
64 bytes from 110.242.68.4: seq=1 ttl=49 time=42.831 ms

--- www.baidu.com ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 42.766/42.798/42.831 ms
测试CORE-DNS情况
/ # nslookup kubernetes.default.svc.cluster.local
Server:    10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

Name:      kubernetes.default.svc.cluster.local
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local


解析出的结果与service/core-dns的地址一致

[root@master-worker-node-1 ~]# kubectl get svc -n kube-system -o wide
NAME       TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                  AGE   SELECTOR
kube-dns   ClusterIP   10.96.0.10   <none>        53/UDP,53/TCP,9153/TCP   12h   k8s-app=kube-dns


[root@master-worker-node-1 ~]# kubectl get svc -n default -o wide 
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE   SELECTOR
kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   12h   <none>

延长apiserver证书有效期

默认ca证书的有效期是10年,但是通过kubeadm进行部署时签署的有效期为一年,一年以后,这个环境将无法调用。
[root@master-worker-node-1 ~]# openssl x509 -in /etc/kubernetes/pki/front-proxy-client.crt -noout -text | grep Not
            Not Before: Nov 23 15:19:28 2022 GMT
            Not After : Nov 23 15:19:29 2023 GMT
[root@master-worker-node-1 ~]# openssl x509 -in /etc/kubernetes/pki/apiserver.crt -noout -text | grep Not
            Not Before: Nov 23 15:19:27 2022 GMT
            Not After : Nov 23 15:19:27 2023 GMT
            
[root@master-worker-node-1 ~]# openssl x509 -in /etc/kubernetes/pki/ca.crt -noout -text | grep Not
            Not Before: Nov 23 15:19:27 2022 GMT
            Not After : Nov 20 15:19:27 2032 GMT