概述
文章目录
- 前言
- 集群部署
- ①环境准备(VMware)
- ②内核优化(可选项)
- ③一键脚本
- ④检查
- ⑤master(初始化)
- ⑥flannel(master)
- ⑦node 节点加入群集
- ⑧测试 pod 资源创建
- harbor
前言
前几天在公司使用
Kubeadm
布了一套单节点K8S
集群,先是在VMware
上测试整理出了一键脚本,然后上华为云服务器直接刷也没问题。现归纳总结如下,给小伙伴们分享一下:
集群部署
①环境准备(VMware)
VMware® Workstation 16 Pro:16.1.2 build-17966106
ISO映像文件:CentOS7.9
内存:4GB(每台计算机应分配2GB或更多的RAM,尤其是master控制节点)
处理器:2×4
硬盘:40GB(harbor可配置多点)
网络适配器:NAT
主机名 | IP地址 |
---|---|
master1 | 192.168.126.11 |
harbor | 192.168.126.12 |
node1 | 192.168.126.13 |
node2 | 192.168.126.14 |
node3 | 192.168.126.15 |
node4 | 192.168.126.16 |
#MAC地址和product_uuid对于每个节点而言也要是唯一的,否则安装过程中可能会失败!
#可使用ip link或ifconfig -a查看网络接口的MAC地址;
#可使用cat /sys/class/dmi/id/product_uuid查看product_uuid;
#每个节点需要具有唯一的主机名;
[root@localhost ~]# hostnamectl set-hostname master1
[root@localhost ~]# su
[root@master1 ~]#
[root@localhost ~]# hostnamectl set-hostname harbor
[root@localhost ~]# su
[root@harbor ~]#
[root@localhost ~]# hostnamectl set-hostname node1
[root@localhost ~]# su
[root@node1 ~]#
[root@localhost ~]# hostnamectl set-hostname node2
[root@localhost ~]# su
[root@node2 ~]#
[root@localhost ~]# hostnamectl set-hostname node3
[root@localhost ~]# su
[root@node3 ~]#
[root@localhost ~]# hostnamectl set-hostname node4
[root@localhost ~]# su
[root@node4 ~]#
#确保集群中所有主机网络互通!
②内核优化(可选项)
echo "kernel.sysrq=1
kernel.panic=10
kernel.panic_on_oops=1
kernel.panic_on_io_nmi=1
kernel.unknown_nmi_panic=1
kernel.panic_on_unrecovered_nmi=1
net.core.netdev_max_backlog=1000
net.core.somaxconn=1024
net.ipv4.tcp_tw_reuse=1
net.ipv4.tcp_tw_recycle=1
net.ipv4.tcp_fin_timeout=30
net.ipv4.tcp_keepalive_time=30
net.ipv4.tcp_max_tw_buckets=5000
net.ipv4.tcp_max_syn_backlog=1024
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
# Receive socket buffer size
net.core.wmem_default=262144
net.core.wmem_max = 4194304
# Send socket buffer size
net.ipv4.tcp_rmem = 4096 262144 4194304
net.ipv4.tcp_wmem = 4096 262144 4194304
# TCP socket buffer size
# Network port range 65000
fs.file-max=655350
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl = 10
net.ipv4.tcp_timestamps = 0
vm.swappiness = 0
kernel.pid_max=99999" >> /etc/sysctl.conf
#limit参数修改;
echo "* hard core 0
* soft core 0
* hard nofile 65535
* soft nofile 65535" >> /etc/security/limits.conf
sysctl -p
#soft为软限制,hard为硬限制,修改了Linux最大文件描述符(当前用户所能打开的最大文件描述符)。
③一键脚本
- harbor主机不需要刷这个脚本;
- 最好不要超过两台主机同时启动该脚本,下载速度会变慢。
#!/bin/bash
#----配置阿里云在线YUM源仓库----
cd /etc/yum.repos.d/
mkdir yum.repos.bak
mv -f *.repo yum.repos.bak/
wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
yum clean all && yum makecache fast
yum -y update
#----修改hosts文件(有不同之处手动修改此处)----
echo "192.168.126.11 master
192.168.126.12 harbor
192.168.126.13 node01
192.168.126.14 node02
192.168.126.15 node03
192.168.126.16 node04" >> /etc/hosts
#----关闭swap系统交换区才能使得kubelet正常工作----
swapoff -a
#----禁用防火墙----
systemctl stop firewalld && systemctl disable firewalld
#----禁用Selinux,将其设置为许可模式,这是允许容器访问主机文件系统所必需的----
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
#----使得iptables能够正确查看桥接流量的请求,默认情况下网桥是运行是数据链路层,需要将ipv4转发至iptables链----
cat <<EOF | tee /etc/modules-load.d/k8s.conf
br_netfilter
EOF
cat <<EOF | tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system
#----在所有节点中安装Docker、Kubeadm、kubelet、kubectl----
#kubeadm:引导集群;
#kubelet:是在集群中的所有机器上运行的组件,运行诸如启动pod和容器之类的操作;
#kubectl:用于与集群通信;
#以下已命令已指定版本,有兼容等问题,更改需谨慎!
#安装一些必要的系统工具以支持;
yum install -y yum-utils device-mapper-persistent-data lvm2
sleep 2s
#安装源,添加软件源信息;
yum install -y wget && wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
sleep 2s
#安装docker,超过18.09版本会报错!
yum -y install docker-ce-18.06.1.ce-3.el7
#重载参数并重启docker以生效;
systemctl start docker && systemctl enable docker
systemctl daemon-reload && systemctl restart docker
#定义kubernetes源,下载所需要的kulete与kubeadm;指定安装源;
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
#指定安装版本;
yum install -y kubelet-1.15.0 kubeadm-1.15.0 kubectl-1.15.0 --disableexcludes=kubernetes
#kubeadm需要kubelet保持启动;检查安装。
systemctl enable kubelet
④检查
[root@master1 ~]# docker -v
Docker version 18.06.1-ce, build e68fc7a
[root@master1 ~]# rpm -qa | grep kube
kubelet-1.15.0-0.x86_64
kubernetes-cni-0.8.7-0.x86_64
kubectl-1.15.0-0.x86_64
kubeadm-1.15.0-0.x86_64
#主要软件:
docker-18.06.1-ce
kubelet-1.15.0-0.x86_64
kubernetes-cni-0.8.7-0.x86_64
kubectl-1.15.0-0.x86_64
kubeadm-1.15.0-0.x86_64
⑤master(初始化)
#apiserver指本地地址;镜像仓库下载地址;K8S版本;集群地址段落;pod地址段落;
kubeadm init
--apiserver-advertise-address=192.168.126.11
--image-repository registry.aliyuncs.com/google_containers
--kubernetes-version v1.15.0
--service-cidr=10.1.0.0/16
--pod-network-cidr=10.244.0.0/16
#仔细阅读过程(提示),记录复制以下kubeadm join项,后续需要用以添加node节点;
kubeadm join 192.168.126.11:6443 --token 1n8imu.tieklrmm309w9hae
--discovery-token-ca-cert-hash sha256:5bcedc57c1f97df39e3ddea1af98790ad8eabcbf989e1f8274ec6d55979dfc1c
#根据提示使用以下命令:创建家目录;指定配置文件放入家目录中;指定对应当应属组。
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
⑥flannel(master)
#注:集群中所有节点都需要获取flannel镜像!
docker pull quay.io/coreos/flannel:v0.11.0
#配置kube-flannel.yml,里面改相应参数即可;
cat > /opt/kube-flannel.yml << EOF
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-flannel-ds-amd64
namespace: kube-system
labels:
tier: node
app: flannel
spec:
template:
metadata:
labels:
tier: node
app: flannel
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: amd64
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.11.0
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.11.0
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-flannel-ds-arm64
namespace: kube-system
labels:
tier: node
app: flannel
spec:
template:
metadata:
labels:
tier: node
app: flannel
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: arm64
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.11.0
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.11.0
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-flannel-ds-arm
namespace: kube-system
labels:
tier: node
app: flannel
spec:
template:
metadata:
labels:
tier: node
app: flannel
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: arm
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.11.0
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.11.0
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-flannel-ds-ppc64le
namespace: kube-system
labels:
tier: node
app: flannel
spec:
template:
metadata:
labels:
tier: node
app: flannel
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: ppc64le
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.11.0
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.11.0
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-flannel-ds-s390x
namespace: kube-system
labels:
tier: node
app: flannel
spec:
template:
metadata:
labels:
tier: node
app: flannel
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: s390x
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.11.0
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.11.0
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
EOF
#将kube-flannel.yml中的配置应用至pod;
[root@master ~]# kubectl apply -f kube-flannel.yml
#验证一下,ready就表明flannel起作用,网络通了。
[root@master1 ~]# kubectl get nodes
NAME
STATUS
ROLES
AGE
VERSION
master1
Ready
master
11h
v1.15.0
⑦node 节点加入群集
#node节点pull完flannel镜像后直接逐一执行以下命令,前文此项有记录;
kubeadm join 192.168.126.11:6443 --token 1n8imu.tieklrmm309w9hae
--discovery-token-ca-cert-hash sha256:5bcedc57c1f97df39e3ddea1af98790ad8eabcbf989e1f8274ec6d55979dfc1c
#----等待一会儿后回到master节点检查----
[root@master1 ~]# kubectl get nodes
NAME
STATUS
ROLES
AGE
VERSION
master1
Ready
master
31m
v1.15.0
node1
Ready
<none>
35s
v1.15.0
node2
Ready
<none>
41s
v1.15.0
node3
Ready
<none>
46s
v1.15.0
node4
Ready
<none>
2m16s
v1.15.0
#查看组件状态
[root@master1 ~]# kubectl get cs
NAME
STATUS
MESSAGE
ERROR
scheduler
Healthy
ok
controller-manager
Healthy
ok
etcd-0
Healthy
{"health":"true"}
#初始化后node节点加入的命令忘了不要紧,以下为标准格式:
kubeadm join --token <token> <control-plane-host>:<control-plane-port> --discovery-token-ca-cert-hash sha256:<hash>
#如果需要令牌,可以kubeadm token list获得令牌;
#默认情况下,令牌会在24小时后过期;
#如果要在当前令牌过期后将节点加入集群, 则可以通过在控制平面节点上运行以下命令来创建新令牌:
kubeadm token create
#如果你没有`--discovery-token-ca-cert-hash`的值,则可以通过在控制平面节点上执行以下命令链来获取它:
openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null |
openssl dgst -sha256 -hex | sed 's/^.* //'
⑧测试 pod 资源创建
#列出“kube-system”命名空间下的所有pod信息(详细)
[root@master1 ~]# kubectl get pods -n kube-system -o wide
NAME
READY
STATUS
RESTARTS
AGE
IP
NODE
NOMINATED NODE
READINESS GATES
coredns-bccdc95cf-4h6jc
1/1
Running
0
59m
10.244.0.2
master1
<none>
<none>
coredns-bccdc95cf-zkbxg
1/1
Running
0
59m
10.244.0.3
master1
<none>
<none>
etcd-master1
1/1
Running
0
58m
192.168.126.11
master1
<none>
<none>
kube-apiserver-master1
1/1
Running
0
58m
192.168.126.11
master1
<none>
<none>
kube-controller-manager-master1
1/1
Running
0
58m
192.168.126.11
master1
<none>
<none>
kube-flannel-ds-amd64-6vbkp
1/1
Running
0
31m
192.168.126.11
master1
<none>
<none>
kube-flannel-ds-amd64-gcqgw
1/1
Running
1
30m
192.168.126.16
node4
<none>
<none>
kube-flannel-ds-amd64-jb2x9
1/1
Running
1
28m
192.168.126.15
node3
<none>
<none>
kube-flannel-ds-amd64-k8lft
1/1
Running
1
28m
192.168.126.13
node1
<none>
<none>
kube-flannel-ds-amd64-xxvp7
1/1
Running
1
28m
192.168.126.14
node2
<none>
<none>
kube-proxy-2j4rx
1/1
Running
0
28m
192.168.126.14
node2
<none>
<none>
kube-proxy-mj2pq
1/1
Running
0
28m
192.168.126.15
node3
<none>
<none>
kube-proxy-t5k96
1/1
Running
0
59m
192.168.126.11
master1
<none>
<none>
kube-proxy-tzvnb
1/1
Running
0
30m
192.168.126.16
node4
<none>
<none>
kube-proxy-zrgmf
1/1
Running
0
28m
192.168.126.13
node1
<none>
<none>
kube-scheduler-master1
1/1
Running
0
58m
192.168.126.11
master1
<none>
<none>
#创建资源名为nginx,指定nginx镜像,为最新版本,端口号为80,指定副本数量为4
[root@master1 ~]# kubectl run nginx --image=nginx:latest --port 80 --replicas=4
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
deployment.apps/nginx created
#查看资源(等待一会儿)
[root@master1 ~]# kubectl get pods
NAME
READY
STATUS
RESTARTS
AGE
nginx-5ff9d6cc77-kx7bb
1/1
Running
0
20s
nginx-5ff9d6cc77-r9f9z
1/1
Running
0
20s
nginx-5ff9d6cc77-vw4nx
1/1
Running
0
20s
nginx-5ff9d6cc77-zkbp6
1/1
Running
0
20s
#查看分配地址
[root@master1 ~]# kubectl get pods -o wide
NAME
READY
STATUS
RESTARTS
AGE
IP
NODE
NOMINATED NODE
READINESS GATES
nginx-5ff9d6cc77-kx7bb
1/1
Running
0
47s
10.244.1.4
node4
<none>
<none>
nginx-5ff9d6cc77-r9f9z
1/1
Running
0
47s
10.244.3.3
node2
<none>
<none>
nginx-5ff9d6cc77-vw4nx
1/1
Running
0
47s
10.244.2.3
node3
<none>
<none>
nginx-5ff9d6cc77-zkbp6
1/1
Running
0
47s
10.244.4.4
node1
<none>
<none>
#暴露端口提供服务
[root@master1 ~]# kubectl expose deployment nginx --port=80 --target-port=80 --name=nginx-service --type=NodePort
service/nginx-service exposed
#--target-port=80为ClusterIP的port,资源名称为nginx-service
#--type=NodePort表示为暴露类型,任意节点均可访问nginx
#查看服务,访问方式对内为80,对外为32261
[root@master1 ~]# kubectl get svc
NAME
TYPE
CLUSTER-IP
EXTERNAL-IP
PORT(S)
AGE
kubernetes
ClusterIP
10.1.0.1
<none>
443/TCP
68m
nginx-service
NodePort
10.1.108.236
<none>
80:32261/TCP
24s
#外部宿主机访问192.168.126.11:32261以测试;
#内部node访问10.1.108.236测试;
#查看关联后端的节点
[root@master1 ~]# kubectl get endpoints
NAME
ENDPOINTS
AGE
kubernetes
192.168.126.11:6443
70m
nginx-service
10.244.1.4:80,10.244.2.3:80,10.244.3.3:80 + 1 more...
2m16s
harbor
准备好的一键脚本:
#!/bin/bash
#----配置阿里云在线YUM源仓库----
cd /etc/yum.repos.d/
mkdir yum.repos.bak
mv -f *.repo yum.repos.bak/
wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
yum clean all && yum makecache fast
yum -y update
#----修改hosts文件(有不同之处手动修改此处)----
echo "192.168.126.11 master
> 192.168.126.12 harbor
> 192.168.126.13 node01
> 192.168.126.14 node02
> 192.168.126.15 node03
> 192.168.126.16 node04" >> /etc/hosts
#----禁用防火墙及Selinux----
systemctl stop firewalld && systemctl disable firewalld
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
#----安装docker----
#安装一些必要的系统工具以支持;
yum install -y yum-utils device-mapper-persistent-data lvm2
sleep 2s
#安装源,添加软件源信息;
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
sleep 2s
#安装docker;
yum makecache fast
yum -y install docker-ce
#重载参数并重启docker以生效。
systemctl start docker && systemctl enable docker
systemctl daemon-reload && systemctl restart docker
#----安装docker compose----
curl -L https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64 -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
#----安装harbor----
curl -L https://github.com/goharbor/harbor/releases/download/v1.2.2/harbor-offline-installer-v1.2.2.tgz -o /usr/local/harbor-offline-installer-v1.2.2.tgz
cd /usr/local/
tar zxvf harbor-offline-installer-v1.2.2.tgz
#配置harbor参数文件,hostname修改为本地IP,用于访问用户界面和register服务;
cd harbor/
vim harbor.cfg
5 hostname = 192.168.126.12
#启动harbor
./install.sh
...
...
[Step 4]: starting Harbor ...
Creating network "harbor_harbor" with the default driver
Creating harbor-log ... done
Creating harbor-db
... done
Creating harbor-adminserver ... done
Creating registry
... done
Creating harbor-ui
... done
Creating harbor-jobservice
... done
Creating nginx
... done
✔ ----Harbor has been installed and started successfully.----
Now you should be able to visit the admin portal at http://192.168.126.12.
For more details, please visit https://github.com/vmware/harbor .
#打开谷歌浏览器访问http://192.168.126.12登录harbor的web页面
新建项目方便后续上传镜像使用:
后续操作再push、pull就行了。
最后
以上就是秀丽灯泡为你收集整理的使用 Kubeadm 搭建单节点集群(已上云)(含 harbor)的全部内容,希望文章能够帮你解决使用 Kubeadm 搭建单节点集群(已上云)(含 harbor)所遇到的程序开发问题。
如果觉得靠谱客网站的内容还不错,欢迎将靠谱客网站推荐给程序员好友。
本图文内容来源于网友提供,作为学习参考使用,或来自网络收集整理,版权属于原作者所有。
发表评论 取消回复