我是靠谱客的博主 淡淡镜子,最近开发中收集的这篇文章主要介绍K8s脱坑前的挣扎——k8s单节点群集部署(3)Master及Node部署一、部署Master组件二、部署Node组件,觉得挺不错的,现在分享给大家,希望可以做个参考。
概述
前言:在之前,已经部署好了Etcd以及Flannel实现了集群中的不同节点主机创建的Docker容器都具有全集群唯一的虚拟IP地址,下面将部署Master以及Node实现单master的二进制群集
文章目录
- 一、部署Master组件
- 二、部署Node组件
- 1.node1部署
- 2.node2配置
一、部署Master组件
- 需要部署的组件
- 部署APIServer组件(token,csv)
- 部署controller-manager(指定apiserver证书)和scheduler组件
- 在master上生成api-server证书
[root@master01 k8s]# rz -E
rz waiting to receive.
[root@master01 k8s]# unzip master.zip //包含三个组件的脚本
Archive: master.zip
inflating: apiserver.sh
inflating: controller-manager.sh
inflating: scheduler.sh
[root@master01 k8s]# ls
apiserver.sh etcd-sert etcd-v3.3.10-linux-amd64.tar.gz master.zip
cfssl.sh etcd.sh flannel-v0.10.0-linux-amd64.tar.gz scheduler.sh
controller-manager.sh etcd-v3.3.10-linux-amd64 kubernetes-server-linux-amd64.tar.gz
[root@master01 k8s]# chmod +x controller-manager.sh
[root@master01 k8s]# mkdir k8s-sert //创建k8s证书目录
[root@master01 k8s]# cd k8s-sert/
[root@master01 k8s-sert]# rz -E
rz waiting to receive.
[root@master01 k8s-sert]# ls
k8s-cert.sh //生成证书的脚本
- 脚本的内容如下
[root@master01 k8s-sert]# vim k8s-cert.sh
cat > ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
cat > ca-csr.json <<EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
#-----------------------
cat > server-csr.json <<EOF
{
"CN": "kubernetes",
"hosts": [
"10.0.0.1",
"192.168.170.129",
"192.168.170.100",
"192.168.170.134",
"192.168.170.131",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
#-----------------------
cat > admin-csr.json <<EOF
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
#-----------------------
cat > kube-proxy-csr.json <<EOF
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
- 需要修改的地方
cat > server-csr.json <<EOF
{
"CN": "kubernetes",
"127.0.0.1",
"192.168.170.128", //master01
"192.168.170.129", //master02
"192.168.170.100", //vip 公共访问入口
"192.168.170.134", //lb (master)
"192.168.170.131", //lb (backup)
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
- 执行脚本,产生8张证书
[root@master01 k8s-sert]# bash k8s-cert.sh
[root@master01 k8s-sert]# ls *pem //8张证书
admin-key.pem admin.pem ca-key.pem ca.pem kube-proxy-key.pem kube-proxy.pem server-key.pem server.pem
[root@master01 k8s]# mkdir -p /opt/kubernetes/{cfg,bin,ssl} //创建命令、配置和证书文件
[root@master01 k8s-sert]# cp ca*pem server*pem /opt/kubernetes/ssl/ //将证书复制过去
[root@master01 k8s-sert]# ls /opt/kubernetes/ssl/
ca-key.pem ca.pem server-key.pem server.pem
- 解压k8s压缩包
[root@master01 k8s]# tar zxvf kubernetes-server-linux-amd64.tar.gz
//解压k8s压缩包
[root@master01 k8s]# cd kubernetes/server/bin/
[root@master01 bin]# ls
apiextensions-apiserver kube-apiserver.docker_tag kube-proxy
cloud-controller-manager kube-apiserver.tar kube-proxy.docker_tag
cloud-controller-manager.docker_tag kube-controller-manager kube-proxy.tar
cloud-controller-manager.tar kube-controller-manager.docker_tag kube-scheduler
hyperkube kube-controller-manager.tar kube-scheduler.docker_tag
kubeadm kubectl kube-scheduler.tar
kube-apiserver kubelet mounter
//复制关键命令文件
[root@master01 bin]# cp kube-apiserver kubectl kube-controller-manager kube-scheduler /opt/kubernetes/bin/
[root@master01 bin]# ls /opt/kubernetes/bin/
kube-apiserver kube-controller-manager kubectl kube-scheduler
- 创建token令牌并开启apiserver
//创建token
[root@master01 bin]# head -c 16 /dev/urandom | od -An -t x | tr -d ' '
73af5869be7dea86e14a328bb99da139
[root@master01 bin]# vim /opt/kubernetes/cfg/token.csv
73af5869be7dea86e14a328bb99da139,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
序列号,用户名,id,角色
//二进制文件,token,证书都准备好,开启apiserver
[root@master01 bin]# cd /root/k8s/
[root@master01 k8s]# bash apiserver.sh 192.168.170.128 https://192.168.170.128:2379,https://192.168.170.145:2379,https://192.168.170.136:2379
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-apiserver.service to /usr/lib/systemd/system/kube-apiserver.service.
//apiserver的配置文件
[root@master k8s-cert]# cat /opt/kubernetes/cfg/kube-apiserver
KUBE_APISERVER_OPTS="--logtostderr=true
--v=4
--etcd-servers=https://192.168.170.128:2379,https://192.168.170.145:2379,https://192.168.170.136:2379
--bind-address=192.168.170.128
--secure-port=6443
--advertise-address=192.168.170.128
--allow-privileged=true
--service-cluster-ip-range=10.0.0.0/24
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction
//验证模式
--authorization-mode=RBAC,Node
--kubelet-https=true
//开启bootstrap角色授权
--enable-bootstrap-token-auth
//指定令牌路径
--token-auth-file=/opt/kubernetes/cfg/token.csv
--service-node-port-range=30000-50000
--tls-cert-file=/opt/kubernetes/ssl/server.pem
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem
--client-ca-file=/opt/kubernetes/ssl/ca.pem
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem
--etcd-cafile=/opt/etcd/ssl/ca.pem
--etcd-certfile=/opt/etcd/ssl/server.pem
--etcd-keyfile=/opt/etcd/ssl/server-key.pem"
- 检查是否开启
//检查进程是否启动成功
[root@master01 k8s]# ps aux | grep kube
root 49907 129 16.1 396200 300716 ? Ssl 01:01 0:06 /opt/kubernetes/bin/kube-apiserver --logtostderr=true --v=4 --etcd-servers=https://192.168.170.128:2379,https://192.168.170.145:2379,https://192.168.170.136:2379 --bind-address=192.168.170.128 --secure-port=6443 --advertise-address=192.168.170.128 --allow-privileged=true --service-cluster-ip-range=10.0.0.0/24 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction --authorization-mode=RBAC,Node --kubelet-https=true --enable-bootstrap-token-auth --token-auth-file=/opt/kubernetes/cfg/token.csv --service-node-port-range=30000-50000 --tls-cert-file=/opt/kubernetes/ssl/server.pem --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem --client-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem --etcd-cafile=/opt/etcd/ssl/ca.pem --etcd-certfile=/opt/etcd/ssl/server.pem --etcd-keyfile=/opt/etcd/ssl/server-key.pem
root 49917 0.0 0.0 112676 980 pts/2 R+ 01:01 0:00 grep --color=auto kube
[root@master01 k8s]# netstat -ntap | grep 6443 //监听的https端口
tcp 0 0 192.168.170.128:6443 0.0.0.0:* LISTEN 49907/kube-apiserve
tcp 0 0 192.168.170.128:50398 192.168.170.128:6443 ESTABLISHED 49907/kube-apiserve
tcp 0 0 192.168.170.128:6443 192.168.170.128:50398 ESTABLISHED 49907/kube-apiserve
[root@master01 k8s]# netstat -ntap | grep 8080
tcp 0 0 127.0.0.1:8080 0.0.0.0:* LISTEN 49907/kube-apiserve
- 启动服务
//启动scheduler服务
[root@master01 k8s]# ./scheduler.sh 127.0.0.1
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-scheduler.service to /usr/lib/systemd/system/kube-scheduler.service.
[root@master01 k8s]# ps aux | grep ku
postfix 48921 0.0 0.1 91732 1940 ? S 00:09 0:00 pickup -l -t unix -u
root 49907 6.1 16.7 399020 313628 ? Ssl 01:01 0:12 /opt/kubernetes/bin/kube-apiserver --logtostderr=true --v=4 --etcd-servers=https://192.168.170.128:2379,https://192.168.170.145:2379,https://192.168.170.136:2379 --bind-address=192.168.170.128 --secure-port=6443 --advertise-address=192.168.170.128 --allow-privileged=true --service-cluster-ip-range=10.0.0.0/24 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction --authorization-mode=RBAC,Node --kubelet-https=true --enable-bootstrap-token-auth --token-auth-file=/opt/kubernetes/cfg/token.csv --service-node-port-range=30000-50000 --tls-cert-file=/opt/kubernetes/ssl/server.pem --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem --client-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem --etcd-cafile=/opt/etcd/ssl/ca.pem --etcd-certfile=/opt/etcd/ssl/server.pem --etcd-keyfile=/opt/etcd/ssl/server-key.pem
root 49999 1.5 1.0 45616 20104 ? Ssl 01:04 0:00 /opt/kubernetes/bin/kube-scheduler --logtostderr=true --v=4 --master=127.0.0.1:8080 --leader-elect
root 50019 0.0 0.0 112680 984 pts/2 S+ 01:05 0:00 grep --color=auto ku
//启动controller-manager
[root@master01 k8s]# ./controller-manager.sh 127.0.0.1
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-controller-manager.service to /usr/lib/systemd/system/kube-controller-manager.service.
- 查看状态
//查看master节点状态
[root@master01 k8s]# /opt/kubernetes/bin/kubectl get cs
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-2 Healthy {"health":"true"}
etcd-1 Healthy {"health":"true"}
etcd-0 Healthy {"health":"true"}
二、部署Node组件
- 需要部署的组件
- 生成kubeconfig(bootstrap,kubeconfig和kube-proxy,kubeconfig)
- 部署kubelet组件
- 部署kube-proxy组件
- kubelet:是master在node节点上的agent,可以管理本机运行容器的生命周期。例如创建容器、Pod挂载数据卷、下载secret、获取容器和节点状态等工作,kubelet 将每个 Pod转换成一组容器
- kube-proxy:在node节点上实现pod网络代理,维护网络规划和四层负载均衡工作
具体步骤
1.node1部署
- 在master上把kubelet、kube-proxy的执行文件拷贝到node节点上去
[root@master01 ~]# cd k8s/kubernetes/server/bin/
[root@master01 bin]# ls
apiextensions-apiserver kube-apiserver.docker_tag kube-proxy
cloud-controller-manager kube-apiserver.tar kube-proxy.docker_tag
cloud-controller-manager.docker_tag kube-controller-manager kube-proxy.tar
cloud-controller-manager.tar kube-controller-manager.docker_tag kube-scheduler
hyperkube kube-controller-manager.tar kube-scheduler.docker_tag
kubeadm kubectl kube-scheduler.tar
kube-apiserver kubelet mounter
[root@master01 bin]# scp kubelet kube-proxy root@192.168.170.145:/opt/kubernetes/bin
root@192.168.170.145's password:
kubelet 100% 168MB 107.3MB/s 00:01
kube-proxy 100% 48MB 104.1MB/s 00:00
[root@master01 bin]# scp kubelet kube-proxy root@192.168.170.136:/opt/kubernetes/bin
root@192.168.170.136's password:
kubelet 100% 168MB 113.8MB/s 00:01
kube-proxy 100% 48MB 118.5MB/s 00:00
- 将本地的kubelet和proxy执行脚本上传过来
[root@node1 ~]# ls //kubelet和proxy执行脚本
node.zip 。。省略部分内容
[root@node1 ~]# unzip node.zip
Archive: node.zip
inflating: proxy.sh
inflating: kubelet.sh
- 在master上创建bootstrap.kubeconfig、kube-proxy kubeconfig文件,并且推送给node
[root@master01 bin]# cd /root/k8s/
[root@master01 k8s]# mkdir kubeconfig //创建工作目录
[root@master01 k8s]# cd kubeconfig/
[root@master01 kubeconfig]# rz -E
rz waiting to receive.
[root@master01 kubeconfig]# ls
kubeconfig.sh
//获取token信息
[root@master01 kubeconfig]# cat /opt/kubernetes/cfg/token.csv
73af5869be7dea86e14a328bb99da139,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
[root@master01 kubeconfig]# vim kubeconfig.sh
# 设置客户端认证参数
kubectl config set-credentials kubelet-bootstrap
--token=73af5869be7dea86e14a328bb99da139
//设置环境变量让系统识别
[root@master01 kubeconfig]# vim /etc/profile
export PATH=$PATH:/opt/kubernetes/bin/
[root@master01 kubeconfig]# source /etc/profile
[root@master01 kubeconfig]# kubectl get cs //可以使用了
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-0 Healthy {"health":"true"}
etcd-1 Healthy {"health":"true"}
etcd-2 Healthy {"health":"true"}
//生成配置文件
[root@master01 kubeconfig]# bash kubeconfig.sh 192.168.170.128 /root/k8s/k8s-sert/ //指定证书位置
Cluster "kubernetes" set.
User "kubelet-bootstrap" set.
Context "default" created.
Switched to context "default".
Cluster "kubernetes" set.
User "kube-proxy" set.
Context "default" created.
Switched to context "default".
//拷贝配置文件到node节点
[root@master01 kubeconfig]# scp bootstrap.kubeconfig kube-proxy.kubeconfig root@192.168.170.145:/opt/kubernetes/cfg/
root@192.168.170.145's password:
bootstrap.kubeconfig 100% 2169 623.8KB/s 00:00
kube-proxy.kubeconfig 100% 6271 5.1MB/s 00:00
[root@master01 kubeconfig]# scp bootstrap.kubeconfig kube-proxy.kubeconfig root@192.168.170.136:/opt/kubernetes/cfg/
root@192.168.170.136's password:
bootstrap.kubeconfig 100% 2169 1.8MB/s 00:00
kube-proxy.kubeconfig 100% 6271 3.5MB/s 00:00
//kubeconfig作用:指明node如何加入群集(指定的组件,通过端口,地址申请到证书)包含token令牌 ,node启动的时候会自动发现kubeconfig所给的信息联系apiserver
//创建bootstrap角色赋予权限用于连接apiserver请求签名
[root@master01 kubeconfig]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
clusterrolebinding.rbac.authorization.k8s.io/kubelet-bootstrap created
- 在node1启动kubelet组件
[root@node1 ~]# bash kubelet.sh 192.168.170.145
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
[root@node1 ~]# ps aux | grep kube //检查kubelet服务
root 58765 0.1 0.5 530716 20768 ? Ssl 01:59 0:43 /opt/kubernetes/bin/flanneld --ip-masq --etcd-endpoints=https://192.168.170.128:2379,https://192.168.170.145:2379,https://192.168.170.136:2379 -etcd-cafile=/opt/etcd/ssl/ca.pem -etcd-certfile=/opt/etcd/ssl/server.pem -etcd-keyfile=/opt/etcd/ssl/server-key.pem
root 115634 4.5 1.2 472996 47984 ? Ssl 13:07 0:00 /opt/kubernetes/bin/kubelet --logtostderr=true --v=4 --hostname-override=192.168.170.145 --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig --bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig --config=/opt/kubernetes/cfg/kubelet.config --cert-dir=/opt/kubernetes/ssl --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0
root 115666 0.0 0.0 112728 968 pts/3 R+ 13:07 0:00 grep --color=auto kube
- kubelet启动会自动联系apiserver申请证书
- 此时在master查看node节点是否发请求过来
[root@master01 kubeconfig]# kubectl get csr
NAME AGE REQUESTOR CONDITION
node-csr-H0F0xwU_d_l9M12_2r1o08WtKpsUPp12iPyjebO5qjE 3m kubelet-bootstrap Pending
//Pending表示等待群集给该节点办颁发证书
- 使用kubectl命令同意颁发
[root@master01 kubeconfig]# kubectl certificate approve node-csr-H0F0xwU_d_l9M12_2r1o08WtKpsUPp12iPyjebO5qjE
certificatesigningrequest.certificates.k8s.io/node-csr-H0F0xwU_d_l9M12_2r1o08WtKpsUPp12iPyjebO5qjE approved
[root@master01 kubeconfig]# kubectl get csr //属于已颁发状态
NAME AGE REQUESTOR CONDITION
node-csr-H0F0xwU_d_l9M12_2r1o08WtKpsUPp12iPyjebO5qjE 5m38s kubelet-bootstrap Approved,Issued
[root@master01 kubeconfig]# kubectl get node //第一个节点成功被添加
NAME STATUS ROLES AGE VERSION
192.168.170.145 Ready <none> 104s v1.12.3
- 在node节点启动proxy服务
[root@node1 ~]# bash proxy.sh 192.168.170.145
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-proxy.service to /usr/lib/systemd/system/kube-proxy.service.
[root@node1 ~]# systemctl status kube-proxy.service
● kube-proxy.service - Kubernetes Proxy
Loaded: loaded (/usr/lib/systemd/system/kube-proxy.service; enabled; vendor preset: disabled)
Active: active (running) since 四 2020-04-30 13:15:30 CST; 9s ago
Main PID: 118884 (kube-proxy)
。。。省略部分内容
2.node2配置
- 把node1现成的/opt/kubernetes目录复制到node2节点进行修改即可
- 注意:node1节点申请的证书需要删除,node2需要自己找apiserver申请证书
[root@node1 ~]# scp -r /opt/kubernetes/ root@192.168.170.136:/opt/
The authenticity of host '192.168.170.136 (192.168.170.136)' can't be established.
ECDSA key fingerprint is SHA256:JeaBUe61GG4gKkTO9vXNHu1Kiqgfm5bTA/zPJ1liJH0.
ECDSA key fingerprint is MD5:89:32:2b:8b:ca:b4:78:6a:38:1e:79:30:1f:1d:05:85.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '192.168.170.136' (ECDSA) to the list of known hosts.
root@192.168.170.136's password:
flanneld 100% 241 316.4KB/s 00:00
bootstrap.kubeconfig 100% 2169 3.0MB/s 00:00
kube-proxy.kubeconfig 100% 6271 7.6MB/s 00:00
kubelet 100% 379 509.8KB/s 00:00
kubelet.config 100% 269 258.7KB/s 00:00
kubelet.kubeconfig 100% 2298 1.1MB/s 00:00
kube-proxy 100% 191 171.5KB/s 00:00
mk-docker-opts.sh 100% 2139 2.1MB/s 00:00
scp: /opt//kubernetes/bin/flanneld: Text file busy
kubelet 100% 168MB 122.6MB/s 00:01
kube-proxy 100% 48MB 116.5MB/s 00:00
kubelet.crt 100% 2197 751.5KB/s 00:00
kubelet.key 100% 1675 1.7MB/s 00:00
kubelet-client-2020-04-30-13-12-46.pem 100% 1277 530.6KB/s 00:00
kubelet-client-current.pem 100% 1277 399.7KB/s 00:00
//把kubelet,kube-proxy的service文件拷贝到node2中
[root@node1 ~]# scp /usr/lib/systemd/system/{kubelet,kube-proxy}.service root@192.168.170.136:/usr/lib/systemd/system/
root@192.168.170.136's password:
kubelet.service 100% 264 189.4KB/s 00:00
kube-proxy.service 100% 231 167.5KB/s 00:00
- 进入node2进行修改
//首先删除复制过来的证书,等会node2会自行申请证书
[root@node2 ssl]# cd /opt/kubernetes/ssl/
[root@node2 ssl]# ls
kubelet-client-2020-04-30-13-12-46.pem kubelet-client-current.pem kubelet.crt kubelet.key
[root@node2 ssl]# rm -rf *
[root@node2 ssl]# ls
//修改配置文件kubelet kubelet.config kube-proxy
[root@node2 ssl]# cd ../cfg/
[root@node2 cfg]# vim kubelet
KUBELET_OPTS="--logtostderr=true
--v=4
--hostname-override=192.168.170.136 //修改地址
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig
--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig
--config=/opt/kubernetes/cfg/kubelet.config
--cert-dir=/opt/kubernetes/ssl
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"
[root@node2 cfg]# vim kubelet.config
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 192.168.170.136 //修改地址
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS:
- 10.0.0.2
clusterDomain: cluster.local.
failSwapOn: false
authentication:
anonymous:
enabled: true
[root@node2 cfg]# vim kube-proxy
KUBE_PROXY_OPTS="--logtostderr=true
--v=4
--hostname-override=192.168.170.136 //修改地址
--cluster-cidr=10.0.0.0/24
--proxy-mode=ipvs
--kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig"
- 修改完毕后,直接启动服务即可
[root@node2 cfg]# systemctl start kubelet.service
[root@node2 cfg]# systemctl enable kubelet.service
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
[root@node2 cfg]# systemctl start kube-proxy.service
[root@node2 cfg]# systemctl enable kube-proxy.service
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-proxy.service to /usr/lib/systemd/system/kube-proxy.service.
- 将node2加入群集中
[root@master01 kubeconfig]# kubectl get csr
NAME AGE REQUESTOR CONDITION
node-csr-H0F0xwU_d_l9M12_2r1o08WtKpsUPp12iPyjebO5qjE 21m kubelet-bootstrap Approved,Issued
node-csr-pVJBEG0A6r4fTl78W6wxAelraDaf6dHUWMtmDFCDVrg 38s kubelet-bootstrap Pending
[root@master01 kubeconfig]# kubectl certificate approve node-csr-pVJBEG0A6r4fTl78W6wxAelraDaf6dHUWMtmDFCDVrg
certificatesigningrequest.certificates.k8s.io/node-csr-pVJBEG0A6r4fTl78W6wxAelraDaf6dHUWMtmDFCDVrg approved
- 查看群集节点和状态信息
[root@master01 kubeconfig]# kubectl get node
NAME STATUS ROLES AGE VERSION
192.168.170.136 Ready <none> 15s v1.12.3
192.168.170.145 Ready <none> 17m v1.12.3
//两个节点都为Ready状态
- 单master群集部署完毕
最后
以上就是淡淡镜子为你收集整理的K8s脱坑前的挣扎——k8s单节点群集部署(3)Master及Node部署一、部署Master组件二、部署Node组件的全部内容,希望文章能够帮你解决K8s脱坑前的挣扎——k8s单节点群集部署(3)Master及Node部署一、部署Master组件二、部署Node组件所遇到的程序开发问题。
如果觉得靠谱客网站的内容还不错,欢迎将靠谱客网站推荐给程序员好友。
本图文内容来源于网友提供,作为学习参考使用,或来自网络收集整理,版权属于原作者所有。
发表评论 取消回复