kubernetes(六) 基于kubeadm构建高可用k8s集群
2021-01-29 12:14
标签:子网 manage 获取 其它 link targe 官网 路径 反向 github地址: https://github.com/kubernetes/kubernetes/ pod使用接口技术 pod:k8s中运行容器的最小单元 service: 实现了从宿主机外层访问k8s内部不同的容器的访问方式,还实现了pod的动态发现;因此可以说Service是k8s内部的负载均衡器 master: 主节点 Node节点 kube-proxy: https://k8smeetup.github.io/docs/admin/kube-proxy/ 维护node节点上的网络规则,实现用户访问请求的转发,其实就是转发给service,需要管理员指定service和NodePort的对应关系 创建baseimages镜像仓库 访问任意主机的https://192.168.56.12:30002 即可 参考用例: https://kubernetes.io/zh/docs/concepts/workloads/controllers/deployment/ haproxy和keepalived配置 测试访问 http://192.168.56.11:8080/app/ 在harbor创建linux公开仓库 kubernetes(六) 基于kubeadm构建高可用k8s集群 标签:子网 manage 获取 其它 link targe 官网 路径 反向 原文地址:https://blog.51cto.com/13812615/2507249
官方网站: kubernets.io基于kubeadm创建k8s集群
基础概念
k8s组成部分介绍
组件官网参考: https://kubernetes.io/zh/docs/concepts/overview/components/部署k8s集群
集群规划
角色
主机名
ip地址
软件
master-1
centos7-node1
192.168.56.11
docker,kube-comtroller-manager,kube-apiserver,kube-schduler
master-2
centos7-node2
192.168.56.12
docker,kube-comtroller-manager,kube-apiserver,kube-schduler
master-3
centos7-node3
192.168.56.13
docker,kube-comtroller-manager,kube-apiserver,kube-schduler
ha-1
centos7-node4
192.168.56.14
haproxy,keepalived
ha-2
centos7-node5
192.168.56.15
haproxy,keepalived
harbor-1
centos7-node6
192.168.56.16
docker,docker-compose,harbor
node-1
centos7-node7
192.168.56.17
kubelet,kube-proxy
node-2
centos7-node8
192.168.56.18
kubelet,kube-proxy
基础环境准备
$ wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo && yum -y install epel-release
$ wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker.repo
$ sed -i ‘/SELINUX=enforcing/SELINUX=disable/g‘ /etc/selinux/config
$ systemctl stop firewalld && systemctl disable firewalld
$ sed -i ‘/swap/s/^\(.*\)$/#\1/g‘ /etc/fstab
$ swapoff -a
$ yum install chrony -y && systemctl enable chronyd && systemctl start chronyd && timedatectl set-timezone Asia/Shanghai && timedatectl set-ntp yes
$ cat /etc/hosts
192.168.56.11 centos7-node1
192.168.56.12 centos7-node2
192.168.56.13 centos7-node3
192.168.56.14 centos7-node4
192.168.56.15 centos7-node5
192.168.56.16 centos7-node6
192.168.56.17 centos7-node7
192.168.56.18 centos7-node8
192.168.56.16 harbor.magedu.com
$ modprobe br_netfilter
$modprobe br_netfilter && cat > /etc/sysctl.d/k8s.conf
yum -y install ipvsadm ipset
cat > /etc/sysconfig/modules/ipvs.modules
$mkdir -p /etc/docker && yum -y install docker-ce
$ tee /etc/docker/daemon.json
部署harbor与haproxy反向代理
$ yum -y install epel-release docker-compose
$ wget https://github.com/goharbor/harbor/releases/download/v2.0.0/harbor-offline-installer-v2.0.0.tgz
$ tar xf harbor-offline-installer-v2.0.0.tgz -C /usr/local/src && cd /usr/local/src/harbor
$ cp harbor.yml.tmpl harbor.yml
$ vim harbor.yml
hostname: harbor.magedu.com
http:
# port for http, default is 80. If https enabled, this port will redirect to https port
port: 80
# https related config
#https:
# https port for harbor, default is 443
# port: 443
# The path of cert and key files for nginx
# certificate: /your/certificate/path
# private_key: /your/private/key/path
$ ./install.sh
$ yum -y install keepalived haproxy
$ vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 192.168.200.1
smtp_connect_timeout 30
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
vrrp_strict
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_instance VI_1 {
state MASTER
interface ens33
virtual_router_id 56
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.56.110 dev ens33 label ens33:1
}
}
$ systemctl enable keepalived && systemctl start keepalived
$ vim /etc/haproxy/haproxy.cfg
listen k8s-api-6443
bind 192.168.56.110:6443
mode tcp
server centos7-node1 192.168.56.11:6443 check inter 3s fall 3 rise 5
server centos7-node2 192.168.56.12:6443 check inter 3s fall 3 rise 5
server centos7-node3 192.168.56.13:6443 check inter 3s fall 3 rise 5
$ systemctl restart haproxy
$ ss -tnl | grep 6443 # 测试存活
node节点和master节点部署
$ cat /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=kubernetes
baseurl=https://mirrors.tuna.tsinghua.edu.cn/kubernetes/yum/repos/kubernetes-el7-$basearch
enabled=1
EOF
$ yum install -y kubelet-1.17.2 kubeadm-1.17.2 kubectl-1.17.2
$ systemctl enable kubelet && systemctl start kubelet
master节点初始化集群
$ mkdir /data/scripts -p
$ kubeadm completion bash > /data/scripts/kubeadm_completion.sh && chmod +x /data/scripts/kubeadm_completion.sh
$ vim /etc/profile
source /data/scripts/kubeadm_completion.sh
$ source /etc/profile
$ kubeadm config print default #打印默认初始化配置
$ kubeadm config images list --kubernetes-version v1.17.2 # 打印需要下载的软件
$ kubeadm init --apiserver-advertise-address=192.168.56.11 --apiserver-bind-port=6443 --control-plane-endpoint=192.168.56.110 --kubernetes-version=v1.17.2 --ignore-preflight-errors=swap --image-repository=registry.cn-hangzhou.aliyuncs.com/google_containers --pod-network-cidr=10.10.0.0/16 --service-cidr=172.26.0.0/16 --service-dns-domain=linux.local #记住返回的信息
You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:
kubeadm join 192.168.56.14:6443 --token iou3pg.8q2f13dbw8z2l4lm --discovery-token-ca-cert-hash sha256:e13d02eea0bd631ba8cae228a31b3cc783686544761de1b3c4d514f313f501c3 --control-plane
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.56.14:6443 --token iou3pg.8q2f13dbw8z2l4lm --discovery-token-ca-cert-hash sha256:e13d02eea0bd631ba8cae228a31b3cc783686544761de1b3c4d514f313f501c3
$ kubeadm init phase upload-certs --upload-certs #生成controplane信息
6c3a44aee4b3fabb5beb44ab696fee6043c77d3461cee1f2c9e80058aa42d493
#### 需要在其他两个master节点执行
$ kubeadm join 192.168.56.14:6443 --token iou3pg.8q2f13dbw8z2l4lm --discovery-token-ca-cert-hash sha256:e13d02eea0bd631ba8cae228a31b3cc783686544761de1b3c4d514f313f501c3 --control-plane --certificate-key 6c3a44aee4b3fabb5beb44ab696fee6043c77d3461cee1f2c9e80058aa42d493
$ kubectl get nodes #状态不ok。需要安装网络插件
NAME STATUS ROLES AGE VERSION
centos7-node1 NotReady master 37m v1.17.2
centos7-node2 NotReady master 25m v1.17.2
centos7-node3 NotReady master 18m v1.17.2
$ cat kubeadm-1.17.2.yml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 48h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.56.11
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: centos7-node1
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: 192.168.56.110
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.17.2
networking:
dnsDomain: linux.local
podSubnet: 10.10.0.0/16
serviceSubnet: 172.26.0.0/16
scheduler: {}
$ kubeadm init --config kubeadm-1.17.2.yml #其中一个master上一次,然后重新加入其他master节点即可
验证集群状态
$ vim /etc/hosts
# GitHub Start
52.74.223.119 github.com
192.30.253.119 gist.github.com
54.169.195.247 api.github.com
185.199.111.153 assets-cdn.github.com
151.101.76.133 raw.githubusercontent.com
151.101.76.133 gist.githubusercontent.com
151.101.76.133 cloud.githubusercontent.com
151.101.76.133 camo.githubusercontent.com
151.101.76.133 avatars0.githubusercontent.com
151.101.76.133 avatars1.githubusercontent.com
151.101.76.133 avatars2.githubusercontent.com
151.101.76.133 avatars3.githubusercontent.com
151.101.76.133 avatars4.githubusercontent.com
151.101.76.133 avatars5.githubusercontent.com
151.101.76.133 avatars6.githubusercontent.com
151.101.76.133 avatars7.githubusercontent.com
151.101.76.133 avatars8.githubusercontent.com
# GitHub End
$ wget wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
$ vim kube-flannel.yml #子网与service一致
net-conf.json: |
{
"Network": "10.10.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
$ kubectl apply -f kube-flannel.yml
$ kubectl get nodes #master状态全部Ready
NAME STATUS ROLES AGE VERSION
centos7-node1 Ready master 38m v1.17.2
centos7-node2 Ready master 26m v1.17.2
centos7-node3 Ready master 19m v1.17.2
验证node节点状态
$ cat /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=kubernetes
baseurl=https://mirrors.tuna.tsinghua.edu.cn/kubernetes/yum/repos/kubernetes-el7-$basearch
enabled=1
EOF
$ yum install -y kubelet-1.17.2 kubeadm-1.17.2
$ systemctl enable kubelet && systemctl start kubelet
node节点加入master
$ kubeadm join 192.168.56.14:6443 --token iou3pg.8q2f13dbw8z2l4lm --discovery-token-ca-cert-hash sha256:e13d02eea0bd631ba8cae228a31b3cc783686544761de1b3c4d514f313f501c3
创建POD测试网络环境
$ kubectl run net-test1 --image=alpine --replicas=3 sleep 360000 #创建测试
$ kubectl run net-test2 --image=alpine --replicas=3 sleep 360000
$ kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
net-test1 1/1 Running 1 17h 10.10.5.3 centos7-node8
部署web服务Dashboard
$ docker pull kubernetesui/dashboard:v2.0.0-rc6
$ docker pull kubernetesui/metrics-scraper:v1.0.3
$ docker tag kubernetesui/dashboard:v2.0.0-rc6 harbor.magedu.com/baseimages/dashboard:v2.0.0-rc6
$ docker tag 3327f0dbcb4a harbor.magedu.com/baseimages/metrics-scraper:v1.0.3 #根据tag打镜像
$ docker login
$ docker push harbor.magedu.com/baseimages/dashboard:v2.0.0-rc6
$ docker push harbor.magedu.com/baseimages/metrics-scraper:v1.0.3
# 修改配置
$ wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc6/aio/deploy/recommended.yaml
$ vim dashboard-2.0.0-rc6.yml #新增service的端口暴露与内网harbor
image: harbor.magedu.com/baseimages/dashboard:v2.0.0-rc6 #两处修改
image: harbor.magedu.com/baseimages/metrics-scraper:v1.0.3 #两处修改
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort
ports:
- port: 443
targetPort: 8443
nodePort: 30002
selector:
k8s-app: kubernetes-dashboard
$ vim admin-user.yml
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
$ kubectl apply -f dashboard-2.0.0-rc6.yml
$ kubectl apply -f admin-user.yml
$ kubectl get pod -A
$ kubectl get svc -A
$ kubectl get secret -A | grep admin-user
kubernetes-dashboard admin-user-token-5vvwn kubernetes.io/service-account-token 3 6m17s
$ kubectl describe secret admin-user-token-5vvwn -n kubernetes-dashboard 获取token即可
k8s集群升级
kubeadm升级方式
升级步骤
$ yum list --showduplicates | grep kubeadm
$ yum -y install kubeadm-1.17.4
$ kubeadm upgrade plan #检查升级计划
$ yum -y install kubeadm-1.17.4
$ kubeadm upgrade apply v1.17.4
$ yum -y install kubelet-1.17.4 kubectl-1.17.4
$ systemctl daemon-reload && systemctl restart kubelet
$ yum -y install kubeadm-1.17.4
$ kubeadm upgrade node --kubelet-version 1.17.4
$ yum -y install kubelet-1.17.4
$ systemctl daemon-reload && systemctl restart kubelet
$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
centos7-node1 Ready master 31h v1.17.4
centos7-node2 Ready master 30h v1.17.4
centos7-node3 NotReady master 30h v1.17.4
centos7-node7 Ready
测试运行Nginx+Tomcat
$ docker pull nginx:1.14.2
$ docker tag nginx:1.14.2 harbor.magedu.com/baseimages/nginx:1.14.2
$ docker login harbor.magedu.com
$ docker push harbor.magedu.com/baseimages/nginx:1.14.2 #上传基础镜像到本地仓库
$ mkdir ~/kubeadm_demo/nginx && vim ~/kubeadm_demo/nginx/nginx.yml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
labels:
app: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: harbor.magedu.com/baseimages/nginx:1.14.2
ports:
- containerPort: 80
---
kind: Service
apiVersion: v1
metadata:
labels:
app: magedu-nginx-service-label
name: magedu-nginx-service
namespace: default
spec:
type: NodePort
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
nodePort: 30004
selector:
app: nginx
$ kubectl apply -f ~/kubeadm_demo/nginx/nginx.yml
$ kubectl get pod -A
$ kubectl get svc -A
$ kubectl logs nginx-deployment-79dbb87ff9-w5f87 -f #查看nginx-deployment 访问日志
$ docker pull tomcat
$ docker run --name tomcat -d -p 8080:8080 tomcat
$ docker exec -it tomcat bash
root@7785ba4b14d2:/usr/local/tomcat# cd webapps
root@7785ba4b14d2:/usr/local/tomcat/webapps# echo "tomcat.app" > app/index.html
$ mkdir ~/kubeadm_demo/tomcat && vim ~/kubeadm_demo/tomcat/Dockerfile
FROM tomcat
ADD ./app /usr/local/tomcat/webapps/app/
$ cd ~/kubeadm_demo/tomcat && mkdir app && echo "tomcat APP" > app/index.html
$ docker build -t harbor.magedu.com/linux/tomcat:app . #构建镜像
$ docker run --name t1 -it --rm -p 8080:8080 harbor.magedu.com/linux/tomcat:app #测试镜像是否有误
$ docker push harbor.magedu.com/linux/tomcat:app
[root@centos7-node1 tomcat]# vim tomcat.yml
apiVersion: apps/v1
kind: Deployment
metadata:
name: tomcat-deployment
labels:
app: tomcat
spec:
replicas: 1
selector:
matchLabels:
app: tomcat
template:
metadata:
labels:
app: tomcat
spec:
containers:
- name: tomcat
image: harbor.magedu.com/linux/tomcat:app
ports:
- containerPort: 80
---
kind: Service
apiVersion: v1
metadata:
labels:
app: magedu-tomcat-service-label
name: magedu-tomcat-service
namespace: default
spec:
type: NodePort
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
#nodePort: 30005
selector:
app: tomcat
$ cd ~/kubeadm_demo/nginx && vim Dockerfile
FROM nginx:1.14.2
ADD default.conf /etc/nginx/conf.d/
$ vim default.conf
server {
listen 80;
server_name localhost;
location /app {
proxy_pass http://magedu-tomcat-service; #tomcat的svc
}
}
$ docker build -t harbor.magedu.com/baseimages/nginx:v0.1 .
$ docker push harbor.magedu.com/baseimages/nginx:v0.1
$ vim nginx.yml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
labels:
app: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: harbor.magedu.com/baseimages/nginx:v0.1 #修改镜像
ports:
- containerPort: 80
---
kind: Service
apiVersion: v1
metadata:
labels:
app: magedu-nginx-service-label
name: magedu-nginx-service
namespace: default
spec:
type: NodePort
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
nodePort: 30004
selector:
app: nginx
$ kubectl apply -f nginx.yml #此时访问 http://192.168.56.11:30004/app/index.html #即可
token过期问题
kubeadm token generate
kubeadm token create
kubeadm token delete
文章标题:kubernetes(六) 基于kubeadm构建高可用k8s集群
文章链接:http://soscw.com/index.php/essay/48672.html