安装系统

安装Docker

1.下载官方repo

1
curl -o /etc/yum.repos.d/docker-ce.repo https://download.docker.com/linux/centos/docker-ce.repo

不太确认openEuler 23.09基于哪一版改的,就用centos8吧,目前看能用

1
sed -i 's/$releasever/8/g' /etc/yum.repos.d/docker-ce.repo

2. 安装

1
yum install -y docker-ce

3. 设置国内镜像加速

1
sudo mkdir -p /etc/docker
1
2
3
4
5
6
7
8
9
10
sudo tee /etc/docker/daemon.json <<-'EOF'
{
"registry-mirrors": [
"https://dockerproxy.com",
"https://hub-mirror.c.163.com",
"https://mirror.baidubce.com",
"https://ccr.ccs.tencentyun.com"
]
}
EOF

4. 启动docker

1
systemctl daemon-reload && systemctl start docker && systemctl enable docker

安装cri-dockerd

1. 下载最新版cri-dockerd rpm包
网络条件好的话直接使用wget下载,网络条件一般的话可以在github上面先下载再上传到虚拟机
下载地址:cri-dockerd国内地址
2. 安装cri-docker

1
chmod +x cri-dockerd-0.3.9-3.el8.x86_64.rpm && rpm -ivh cri-dockerd-0.3.9-3.el8.x86_64.rpm

3. 启动cri-docker服务

1
systemctl start cri-docker && systemctl enable cri-docker

4. cri-dockerd设置国内镜像加速

1
nano /usr/lib/systemd/system/cri-docker.service

找到第10行 ExecStart=
修改为

1
ExecStart=/usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9

如下:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket

[Service]
Type=notify
#ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd://
ExecStart=/usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always

# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
# Both the old, and new location are accepted by systemd 229 and up, so using the old location
# to make them work for either version of systemd.
StartLimitBurst=3

# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
# this option work for either version of systemd.
StartLimitInterval=60s

# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity

# Comment TasksMax if your systemd version does not support it.
# Only systemd 226 and above support this option.
TasksMax=infinity
Delegate=yes
KillMode=process

[Install]
WantedBy=multi-user.target

重启Docker组件

1
systemctl daemon-reload && systemctl restart docker cri-docker.socket cri-docker

检查Docker组件状态

1
systemctl status docker cir-docker.socket cri-docker
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
Unit cir-docker.socket could not be found.
● docker.service - Docker Application Container Engine
Loaded: loaded (/usr/lib/systemd/system/docker.service; enabled; preset: disabled)
Active: active (running) since Thu 2024-01-18 15:40:54 CST; 24min ago
TriggeredBy: ● docker.socket
Docs: https://docs.docker.com
Main PID: 1262 (dockerd)
Tasks: 22
Memory: 319.0M
CGroup: /system.slice/docker.service
└─1262 /usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock

1月 18 15:42:33 k8s-node-1 dockerd[1262]: time="2024-01-18T15:42:33.982657049+08:00" level=warning msg="IPv4 forwarding is disabled. Networking will not work."
1月 18 15:42:34 k8s-node-1 dockerd[1262]: time="2024-01-18T15:42:34.051017109+08:00" level=warning msg="IPv4 forwarding is disabled. Networking will not work."
1月 18 15:42:34 k8s-node-1 dockerd[1262]: time="2024-01-18T15:42:34.202929075+08:00" level=info msg="ignoring event" container=3f7750a04ad51e9e188dc52c2e496e821a5d212486c59968c9524b797f21023d module=libcontainerd namespace=moby topic=/tasks>
1月 18 15:42:38 k8s-node-1 dockerd[1262]: time="2024-01-18T15:42:38.000834506+08:00" level=warning msg="IPv4 forwarding is disabled. Networking will not work."
1月 18 15:42:38 k8s-node-1 dockerd[1262]: time="2024-01-18T15:42:38.769874363+08:00" level=warning msg="IPv4 forwarding is disabled. Networking will not work."
1月 18 15:43:06 k8s-node-1 dockerd[1262]: time="2024-01-18T15:43:06.086291590+08:00" level=warning msg="IPv4 forwarding is disabled. Networking will not work."
1月 18 15:43:06 k8s-node-1 dockerd[1262]: time="2024-01-18T15:43:06.762726236+08:00" level=warning msg="IPv4 forwarding is disabled. Networking will not work."
1月 18 15:43:06 k8s-node-1 dockerd[1262]: time="2024-01-18T15:43:06.936934129+08:00" level=info msg="ignoring event" container=c4153a7465765f810eb360c193b924fce037dab328067ab3d902c1db2e6bd416 module=libcontainerd namespace=moby topic=/tasks>
1月 18 15:43:07 k8s-node-1 dockerd[1262]: time="2024-01-18T15:43:07.268700964+08:00" level=warning msg="IPv4 forwarding is disabled. Networking will not work."
1月 18 15:43:07 k8s-node-1 dockerd[1262]: time="2024-01-18T15:43:07.392654967+08:00" level=warning msg="IPv4 forwarding is disabled. Networking will not work."

● cri-docker.service - CRI Interface for Docker Application Container Engine
Loaded: loaded (/usr/lib/systemd/system/cri-docker.service; enabled; preset: disabled)
Active: active (running) since Thu 2024-01-18 15:40:54 CST; 24min ago
TriggeredBy: ● cri-docker.socket
Docs: https://docs.mirantis.com
Main PID: 1866 (cri-dockerd)
Tasks: 21
Memory: 78.7M
CGroup: /system.slice/cri-docker.service
└─1866 /usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9

1月 18 15:42:16 k8s-node-1 cri-dockerd[1866]: time="2024-01-18T15:42:16+08:00" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:10.244.1.0/24,},}"
1月 18 15:42:17 k8s-node-1 cri-dockerd[1866]: time="2024-01-18T15:42:17+08:00" level=info msg="Pulling the image without credentials. Image: registry.aliyuncs.com/google_containers/pause:3.9"
1月 18 15:42:18 k8s-node-1 cri-dockerd[1866]: time="2024-01-18T15:42:18+08:00" level=info msg="Stop pulling image registry.aliyuncs.com/google_containers/pause:3.9: Status: Downloaded newer image for registry.aliyuncs.com/google_containers/>
1月 18 15:42:18 k8s-node-1 cri-dockerd[1866]: time="2024-01-18T15:42:18+08:00" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/0a70c49dc956704388871abce0d4525b03cd8fc1c5a01e9f36a0aee03b5cf287/resolv.conf as [>
1月 18 15:42:32 k8s-node-1 cri-dockerd[1866]: time="2024-01-18T15:42:32+08:00" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/883f418c2813b6049bb63d25e6bc8f1ec1d5d0cf7d5c2a855056139990458ef0/resolv.conf as [>
1月 18 15:42:33 k8s-node-1 cri-dockerd[1866]: time="2024-01-18T15:42:33+08:00" level=info msg="Stop pulling image docker.io/flannel/flannel-cni-plugin:v1.2.0: Status: Downloaded newer image for flannel/flannel-cni-plugin:v1.2.0"
1月 18 15:42:37 k8s-node-1 cri-dockerd[1866]: time="2024-01-18T15:42:37+08:00" level=info msg="Stop pulling image registry.aliyuncs.com/google_containers/kube-proxy:v1.28.6: Status: Downloaded newer image for registry.aliyuncs.com/google_co>
1月 18 15:42:54 k8s-node-1 cri-dockerd[1866]: time="2024-01-18T15:42:54+08:00" level=info msg="Pulling image docker.io/flannel/flannel:v0.24.1: a17abc86bef5: Downloading [=> ] 196.2kB/5.953MB"
1月 18 15:43:04 k8s-node-1 cri-dockerd[1866]: time="2024-01-18T15:43:04+08:00" level=info msg="Pulling image docker.io/flannel/flannel:v0.24.1: 020fedbf387d: Downloading [==> ] 48.7kB/833kB"
1月 18 15:43:06 k8s-node-1 cri-dockerd[1866]: time="2024-01-18T15:43:06+08:00" level=info msg="Stop pulling image docker.io/flannel/flannel:v0.24.1: Status: Downloaded newer image for flannel/flannel:v0.24.1"

安装kubernetes组件

环境准备

修改主机名

根据实际情况设置hostname名称

1
hostnamectl set-hostname k8s-master && exec bash

关闭swap分区

1
2
3
4
# 如果有的话,关闭swap分区
swapoff -a
vi /etc/fstab # 永久关闭swap分区,注释掉fstab中包含swap的这一行
# /dev/mapper/centos-swap swap swap defaults 0 0

关闭firewalld,selinux

1
systemctl stop firewalld && systemctl disable firewalld && setenforce 0 && sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config

转发 IPv4 并让 iptables 看到桥接流

1
2
3
4
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
1
modprobe overlay && modprobe br_netfilter

设置所需的 sysctl 参数,参数在重新启动后保持不变

1
2
3
4
5
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF

应用 sysctl 参数而不重新启动

1
sudo sysctl --system
1
lsmod | grep br_netfilter
1
lsmod | grep overlay
1
sysctl net.bridge.bridge-nf-call-iptables net.bridge.bridge-nf-call-ip6tables net.ipv4.ip_forward

重启服务器
如果selinux之前是开的,需要重启服务器

配置kubernetes源

此操作会覆盖 /etc/yum.repos.d/kubernetes.repo 中现存的所有配置

1
2
3
4
5
6
7
8
9
cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://pkgs.k8s.io/core:/stable:/v1.28/rpm/
enabled=1
gpgcheck=1
gpgkey=https://pkgs.k8s.io/core:/stable:/v1.28/rpm/repodata/repomd.xml.key
#exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
EOF

安装kubelet、kubeadm、kubectl、kubernetes-cni

1
yum install -y kubelet kubeadm kubectl kubernetes-cni && systemctl enable kubelet.service

初始化集群

替换--apiserver-advertise-address参数为节点IP

1
2
3
4
5
6
kubeadm init --node-name=k8s-master \
--image-repository=registry.aliyuncs.com/google_containers \
--cri-socket=unix:///var/run/cri-dockerd.sock \
--apiserver-advertise-address=192.168.2.200 \
--pod-network-cidr=10.244.0.0/16 \
--service-cidr=10.96.0.0/12

如果init时仍提示iptables错误请执行

1
echo "1">/proc/sys/net/bridge/bridge-nf-call-iptables && echo "1">/proc/sys/net/ipv4/ip_forward

有如下输出就部署成功了

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.2.200:6443 --token spoehm.qdor5eelbq3xmfth \
--discovery-token-ca-cert-hash sha256:2dfb9813b111f61b1f639135f4c309ac74653860b3160850aea37fcef4a64a3d

按照输出,执行下面的命令

1
mkdir -p $HOME/.kube && sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config && sudo chown $(id -u):$(id -g) $HOME/.kube/config

查看node

1
kubectl  get nodes

如果初始化失败需要重新初始化是,对集群进行重置

1
kubeadm reset --cri-socket=unix:///var/run/cri-dockerd.sock

安装网络组件

以上步骤执行完后,使用 kubectl get nodes 查看,此时节点状态为NotReady状态,需要安装网络插件
在此处下载 kube-flannel.yml
kube-flannel
直接apply:

1
kubectl apply -f kube-flannel.yml

也可直接复制下面的:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
apiVersion: v1
kind: Namespace
metadata:
labels:
k8s-app: flannel
pod-security.kubernetes.io/enforce: privileged
name: kube-flannel
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: flannel
name: flannel
namespace: kube-flannel
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: flannel
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- apiGroups:
- networking.k8s.io
resources:
- clustercidrs
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: flannel
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-flannel
---
apiVersion: v1
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
kind: ConfigMap
metadata:
labels:
app: flannel
k8s-app: flannel
tier: node
name: kube-flannel-cfg
namespace: kube-flannel
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app: flannel
k8s-app: flannel
tier: node
name: kube-flannel-ds
namespace: kube-flannel
spec:
selector:
matchLabels:
app: flannel
k8s-app: flannel
template:
metadata:
labels:
app: flannel
k8s-app: flannel
tier: node
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
containers:
- args:
- --ip-masq
- --kube-subnet-mgr
command:
- /opt/bin/flanneld
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: EVENT_QUEUE_DEPTH
value: "5000"
image: docker.io/flannel/flannel:v0.24.1
name: kube-flannel
resources:
requests:
cpu: 100m
memory: 50Mi
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
privileged: false
volumeMounts:
- mountPath: /run/flannel
name: run
- mountPath: /etc/kube-flannel/
name: flannel-cfg
- mountPath: /run/xtables.lock
name: xtables-lock
hostNetwork: true
initContainers:
- args:
- -f
- /flannel
- /opt/cni/bin/flannel
command:
- cp
image: docker.io/flannel/flannel-cni-plugin:v1.2.0
name: install-cni-plugin
volumeMounts:
- mountPath: /opt/cni/bin
name: cni-plugin
- args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
command:
- cp
image: docker.io/flannel/flannel:v0.24.1
name: install-cni
volumeMounts:
- mountPath: /etc/cni/net.d
name: cni
- mountPath: /etc/kube-flannel/
name: flannel-cfg
priorityClassName: system-node-critical
serviceAccountName: flannel
tolerations:
- effect: NoSchedule
operator: Exists
volumes:
- hostPath:
path: /run/flannel
name: run
- hostPath:
path: /opt/cni/bin
name: cni-plugin
- hostPath:
path: /etc/cni/net.d
name: cni
- configMap:
name: kube-flannel-cfg
name: flannel-cfg
- hostPath:
path: /run/xtables.lock
type: FileOrCreate
name: xtables-lock

flannel 的pod启动正常后,如果节点还是Notready,需要安装kubernetes-cni

1
yum install -y kubernetes-cni && ls -lh /opt/cni/bin

配置Node

按照以上1 - 4.3步骤安装node基础环境
完成后输入以下命令加入集群:

此处尾部必须加上--cri-socket /var/run/cri-dockerd.sock指定容器,不然会报错。

1
2
3
kubeadm join 192.168.2.200:6443 --token spoehm.qdor5eelbq3xmfth \
--discovery-token-ca-cert-hash sha256:2dfb9813b111f61b1f639135f4c309ac74653860b3160850aea37fcef4a64a3d \
--cri-socket /var/run/cri-dockerd.sock

如果join时提示iptables错误请执行

1
echo "1">/proc/sys/net/bridge/bridge-nf-call-iptables && echo "1">/proc/sys/net/ipv4/ip_forward

部署dashboard(只在master执行)

参考 kubernetes 第五章节

修改端口范围

编辑 kube-apiserver.yaml文件

1
nano /etc/kubernetes/manifests/kube-apiserver.yaml

找到 --service-cluster-ip-range 这一行,在这一行的下一行增加 如下内容

1
- --service-node-port-range=1-65535

最后修改如下

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
apiVersion: v1
kind: Pod
metadata:
annotations:
kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint: 192.168.2.200:6443
creationTimestamp: null
labels:
component: kube-apiserver
tier: control-plane
name: kube-apiserver
namespace: kube-system
spec:
containers:
- command:
- kube-apiserver
- --advertise-address=192.168.2.200
- --allow-privileged=true
- --authorization-mode=Node,RBAC
- --client-ca-file=/etc/kubernetes/pki/ca.crt
- --enable-admission-plugins=NodeRestriction
- --enable-bootstrap-token-auth=true
- --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt
- --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt
- --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key
- --etcd-servers=https://127.0.0.1:2379
- --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt
- --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt
- --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key
- --requestheader-allowed-names=front-proxy-client
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
- --requestheader-extra-headers-prefix=X-Remote-Extra-
- --requestheader-group-headers=X-Remote-Group
- --requestheader-username-headers=X-Remote-User
- --secure-port=6443
- --service-account-issuer=https://kubernetes.default.svc.cluster.local
- --service-account-key-file=/etc/kubernetes/pki/sa.pub
- --service-account-signing-key-file=/etc/kubernetes/pki/sa.key
- --service-cluster-ip-range=10.96.0.0/12
- --service-node-port-range=1-65535
- --tls-cert-file=/etc/kubernetes/pki/apiserver.crt
- --tls-private-key-file=/etc/kubernetes/pki/apiserver.key
image: registry.aliyuncs.com/google_containers/kube-apiserver:v1.28.6
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 8
httpGet:
host: 192.168.2.200
path: /livez
port: 6443
scheme: HTTPS
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
name: kube-apiserver
readinessProbe:
failureThreshold: 3
httpGet:
host: 192.168.2.200
path: /readyz
port: 6443
scheme: HTTPS
periodSeconds: 1
timeoutSeconds: 15
resources:
requests:
cpu: 250m
startupProbe:
failureThreshold: 24
httpGet:
host: 192.168.2.200
path: /livez
port: 6443
scheme: HTTPS
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
volumeMounts:
- mountPath: /etc/ssl/certs
name: ca-certs
readOnly: true
- mountPath: /etc/pki
name: etc-pki
readOnly: true
- mountPath: /etc/kubernetes/pki
name: k8s-certs
readOnly: true
hostNetwork: true
priority: 2000001000
priorityClassName: system-node-critical
securityContext:
seccompProfile:
type: RuntimeDefault
volumes:
- hostPath:
path: /etc/ssl/certs
type: DirectoryOrCreate
name: ca-certs
- hostPath:
path: /etc/pki
type: DirectoryOrCreate
name: etc-pki
- hostPath:
path: /etc/kubernetes/pki
type: DirectoryOrCreate
name: k8s-certs
status: {}

最后 重启 kubelet

1
systemctl daemon-reload && systemctl restart kubelet