centos7安装k8s-1.25

11/26/2022 k8s

安装版本为1.25,已经不支持docker需安装containerd 准备两台或以上机器。

# 所有机器设为静态IP

vim /etc/sysconfig/network-scripts/ifcfg-eth0

TYPE="Ethernet"
PROXY_METHOD="none"
BROWSER_ONLY="no"
BOOTPROTO="static"
DEFROUTE="yes"
IPV4_FAILURE_FATAL="no"
IPV6INIT="yes"
IPV6_AUTOCONF="yes"
IPV6_DEFROUTE="yes"
IPV6_FAILURE_FATAL="no"
IPV6_ADDR_GEN_MODE="stable-privacy"
NAME="eth0"
UUID="e59abb76-b2a3-4051-977b-5d86652cff85"
DEVICE="eth0"
ONBOOT="yes"
IPADDR="192.168.137.58"
PREFIX="24"
GATEWAY="192.168.137.1"
DNS1="192.168.137.1"
IPV6_PRIVACY="no"

systemctl restart network
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24

# 停用selinux

#临时关闭
setenforce 0

#永久关闭
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config

#获取状态
getenforce
1
2
3
4
5
6
7
8

# 配置机器主机名,所有机器都相同

#在k8s-master设置
hostnamectl set-hostname k8s-master

#在k8s-node1设置
hostnamectl set-hostname k8s-node1
1
2
3
4
5

# 配置host,所有机器都相同

vim /etc/hosts

192.168.137.58 k8s-master
192.168.137.172 k8s-node1
1
2
3
4

# 配置主机间免密访问

[root@k8s-master ~]# ssh-keygen #一直回车

[root@k8s-node1 ~]# ssh-keygen #一直回车

[root@k8s-master ~]# ssh-copy-id k8s-node1
Are you sure you want to continue connecting (yes/no)? yes

[root@k8s-node1 ~]# ssh-copy-id k8s-master
Are you sure you want to continue connecting (yes/no)? yes

[root@k8s-master ~]# ssh k8s-node1
Last login: Sat Nov 26 22:47:32 2022 from desktop-bmvosv7.mshome.net

[root@k8s-node1 ~]# ssh k8s-master
Last login: Sat Nov 26 22:42:21 2022 from desktop-bmvosv7.mshome.net
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15

# 关闭交换分区,所有机器都相同

#临时关闭
swapoff -a

#永久关闭
vim /etc/fstab
#/dev/mapper/centos-swap swap swap defaults 0 0 
1
2
3
4
5
6

# 修改机器内核参数,所有机器都相同

yum install -y ipset ipvsdam
配置ipvsadm模块加载方式
添加需要加载的模块
cat > /etc/sysconfig/modules/ipvs.modules << EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules
bash /etc/sysconfig/modules/ipvs.modules
lsmod | grep -e ip_vs -e ns_conntrack
modprobe br_netfilter

cat > /etc/sysctl.d/k8s.conf <<EOF 
net.bridge.bridge-nf-call-ip6tables = 1 
net.bridge.bridge-nf-call-iptables = 1 
net.ipv4.ip_forward = 1 
EOF

sysctl -p /etc/sysctl.d/k8s.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24

# 关闭防火墙,所有机器都相同

systemctl stop firewalld && systemctl disable firewalld
1

# 配置docker-ce安装源,所有机器都相同

yum -y install yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
sed -i 's+download.docker.com+mirrors.aliyun.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo
yum makecache fast
1
2
3
4

# 配置k8s安装源,所有机器都相同

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
EOF

yum makecache fast
1
2
3
4
5
6
7
8
9

# 配置时间同步,所有机器都相同

yum -y install chrony
systemctl enable chronyd
systemctl start chronyd

#修改配置文件
vim /etc/chrony.conf
server s1a.time.edu.cn iburst
server ntp.aliyun.com iburst

timedatectl set-timezone Asia/Shanghai

systemctl enable chronyd
systemctl restart chronyd

# 查看时间同步状态
timedatectl status
# 开启网络时间同步
timedatectl set-ntp true
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18

# 安装基础包,所有机器都相同

yum install -y net-tools nfs-utils lrzsz gcc gcc-c++ make cmake libxml2-devel openssl-devel curl curl-devel unzip ntp libaio-devel ncurses-devel autoconf automake zlib-devel python-devel epel-release openssh-server socat ipvsadm conntrack telnet ipvsadm
1

# 安装containerd,所有机器都相同

yum install -y containerd.io-1.6.6

#修改配置文件
containerd config default > /etc/containerd/config.toml

vim /etc/containerd/config.toml
SystemdCgroup = true
sandbox_image="registry.aliyuncs.com/google_containers/pause:3.7"
systemctl enable containerd --now

cat <<EOF > /etc/crictl.yaml
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
EOF

systemctl restart containerd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18

# 安装docker-ce,用户构建dockerfile,不参与k8s,所有机器都相同

yum -y install docker-ce
systemctl enable docker --now
1
2

# 配置docker加速,所有机器都相同

mkdir -p /etc/docker
tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors": ["https://2s0nvfyb.mirror.aliyuncs.com"]
}
EOF
systemctl daemon-reload
systemctl restart docker
1
2
3
4
5
6
7
8

# 配置containerd加速,所有机器都相同

vim /etc/containerd/config.toml
config_path = "/etc/containerd/certs.d"

mkdir -p /etc/containerd/certs.d/docker.io/
vim /etc/containerd/certs.d/docker.io/hosts.toml
[host."https://2s0nvfyb.mirror.aliyuncs.com"]
 capabilities = ["pull"]
 
systemctl restart containerd
1
2
3
4
5
6
7
8
9

# 安装k8s,所有机器都相同

yum install -y kubelet-1.25.0 kubeadm-1.25.0 kubectl-1.25.0
systemctl enable kubelet
1
2

# 初始化k8s配置文件

#设置容器运行时
[root@k8s-master ~]# crictl config runtime-endpoint /run/containerd/containerd.sock
[root@k8s-node1 ~]# crictl config runtime-endpoint /run/containerd/containerd.sock

[root@k8s-master ~]# kubeadm config print init-defaults > kubeadm.yaml
[root@k8s-master ~]# vim kubeadm.yaml

advertiseAddress: 192.168.137.58  #控制节点

nodeRegistration:
  criSocket: unix:///run/containerd/containerd.sock
  imagePullPolicy: IfNotPresent
  name: k8s-master
  
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers

kubernetesVersion: 1.25.0
networking:
  dnsDomain: cluster.local
  podSubnet: 10.244.0.0/16  #增加
  serviceSubnet: 10.96.0.0/12
  
#文件最后增加(复制时,要带着---)
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31

# 拉取镜像(可以新建脚本运行),所有机器都相同

ctr -n k8s.io i pull -k registry.aliyuncs.com/google_containers/pause:3.7 
ctr -n k8s.io i pull -k registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.9.3 
ctr -n k8s.io i pull -k registry.cn-hangzhou.aliyuncs.com/google_containers/etcd-amd64:3.4.7-0 
ctr -n k8s.io i pull -k registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.4-0 
ctr -n k8s.io i pull -k registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.25.0 
ctr -n k8s.io i pull -k registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.25.0 
ctr -n k8s.io i pull -k registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.25.0 
ctr -n k8s.io i pull -k registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.25.0 
ctr -n k8s.io i pull -k registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.8
1
2
3
4
5
6
7
8
9

# 初始化k8s

[root@k8s-master ~]# kubeadm init --config=kubeadm.yaml --ignore-preflight-errors=SystemVerification

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.137.58:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:b851e622634ae6e89cc5f9ca324e215c4393b24832bf1ea7f8c0501bdda51ef8
	
[root@k8s-master ~]# mkdir -p $HOME/.kube
[root@k8s-master ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-master ~]# chown $(id -u):$(id -g) $HOME/.kube/config

[root@k8s-node1 ~]# kubeadm join 192.168.137.58:6443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:b851e622634ae6e89cc5f9ca324e215c4393b24832bf1ea7f8c0501bdda51ef8

[root@k8s-master ~]# kubectl get nodes
NAME         STATUS     ROLES           AGE     VERSION
k8s-master   NotReady   control-plane   5m41s   v1.25.0
k8s-node1    NotReady   <none>          5s      v1.25.0

#给node1打标签
[root@k8s-master ~]# kubectl label nodes k8s-node1 node-role.kubernetes.io/work=work
node/k8s-node1 labeled
[root@k8s-master ~]# kubectl get nodes
NAME         STATUS     ROLES           AGE     VERSION
k8s-master   NotReady   control-plane   10m     v1.25.0
k8s-node1    NotReady   work            4m43s   v1.25.0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25

# 拉取Calico网络组件镜像,所有机器都相同(可以新建脚本运行)

ctr -n k8s.io i pull -k docker.io/calico/cni:v3.18.0
ctr -n k8s.io i pull -k docker.io/calico/kube-controllers:v3.18.0
ctr -n k8s.io i pull -k docker.io/calico/node:v3.18.0
ctr -n k8s.io i pull -k docker.io/calico/pod2daemon-flexvol:v3.18.0
1
2
3
4

# 运行Calico组件

[root@k8s-master ~]# kubectl apply -f https://gitee.com/thomas408003146/docs/raw/master/calico.yaml
[root@k8s-master ~]# kubectl get nodes
NAME         STATUS   ROLES           AGE   VERSION
k8s-master   Ready    control-plane   25m   v1.25.0
k8s-node1    Ready    work            19m   v1.25.0
1
2
3
4
5

# 测试k8s的pod

[root@k8s-master ~]# kubectl run busybox --image docker.io/library/busybox:1.28 --image-pull-policy=IfNotPresent --restart=Never --rm -it busybox -- sh
If you don't see a command prompt, try pressing enter.
/ # ping www.baidu.com
PING www.baidu.com (14.215.177.39): 56 data bytes
64 bytes from 14.215.177.39: seq=0 ttl=55 time=4.391 ms
64 bytes from 14.215.177.39: seq=1 ttl=55 time=4.343 ms
64 bytes from 14.215.177.39: seq=2 ttl=55 time=4.203 ms
64 bytes from 14.215.177.39: seq=3 ttl=55 time=4.483 ms
64 bytes from 14.215.177.39: seq=4 ttl=55 time=4.502 ms

--- www.baidu.com ping statistics ---
5 packets transmitted, 5 packets received, 0% packet loss
round-trip min/avg/max = 4.203/4.384/4.502 ms

/ # nslookup kubernetes.default.svc.cluster.local
Server:    10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

Name:      kubernetes.default.svc.cluster.local
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local
/ # 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
Last Updated: 1/20/2023, 10:44:54 AM