Kubernetes单节点部署二进制k8s群集

2020-12-24 04:29

阅读:622

标签:flann   管理员   algo   启动服务   bin   ant   docker   yml   成功   

Kubernetes单节点部署二进制k8s群集
Kubernetes单节点部署二进制k8s群集

#环境
| master      | 192.168.100.170 | kube-apiserver、kube-scheduler、controller-manager、etcd | 2G+4CPU  |
| node1       | 192.168.100.180 | kube-apiserver、kube-scheduler、controller-manager、etcd | 2G+4CPU  |
| node2       | 192.168.100.190 | kube-apiserver、kube-scheduler、controller-manager、etcd | 2G+4CPU  |

| etcd           | ca.pem,server.pem,server-key.pem         |
| flannel        | ca.pem,server.pem,server-key.pem         |
| kube-apiserver | ca.pem,server.pem,server-key.pem         |
| kubelet        | ca.pem,ca-key.pem                         |
| kube-proxy     | ca.pem,kube-proxy.pem,kube-proxy-key.pem |
| kubectl        | ca.pem,admin-pem,admin-key.pem           |

一: Etcd群集部署---------------------------------------------------------
hostnamectl set-hostaname master
hostnamectl set-hostaname node1
hostnamectl set-hostaname node2
iptables -F
setenforce 0

//master部署------------------------------

1.master主机创建k8s文件夹并上传etcd脚本,下载cffssl官方证书生成工具
mkdir k8s && cd k8s
//上传脚本etcd-cert.sh  etcd.sh
ls
etcd-cert.sh  etcd.sh

2.下载证书制作工具
k8s]# vim cfssl.sh
curl -L https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -o /usr/local/bin/cfssl
curl -L https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -o /usr/local/bin/cfssljson
curl -L https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -o /usr/local/bin/cfssl-certinfo
chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson /usr/local/bin/cfssl-certinfo

bash cfssl.sh
ls /usr/local/bin/
cfssl  cfssl-certinfo  cfssljson

3.开始制作证书
#cfssl 生成证书工具   cfssljson通过传入json文件生成证书 cfssl-certinfo查看证书信息
#定义ca证书
k8s]# cd etcd-cert
[root@master etcd-cert]# cat > ca-config.json  ca-csr.json  server-csr.json  flannel.sh /opt/kubernetes/cfg/flanneld

FLANNEL_OPTIONS="--etcd-endpoints=${ETCD_ENDPOINTS} -etcd-cafile=/opt/etcd/ssl/ca.pem -etcd-certfile=/opt/etcd/ssl/server.pem -etcd-keyfile=/opt/etcd/ssl/server-key.pem"

EOF

cat /usr/lib/systemd/system/flanneld.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service

[Service]
Type=notify
EnvironmentFile=/opt/kubernetes/cfg/flanneld
ExecStart=/opt/kubernetes/bin/flanneld --ip-masq \$FLANNEL_OPTIONS
ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=on-failure

[Install]
WantedBy=multi-user.target

EOF

systemctl daemon-reload
systemctl enable flanneld
systemctl restart flanneld
EOF

3.开启flannel网络功能
[root@node1 ~]# bash flannel.sh https://192.168.100.170:2379,https://192.168.100.170:2379,https://192.168.100.180:2379

4.配置docker连接flannel
[root@node1 ~]# bash flannel.sh https://192.168.100.170:2379,https://192.168.100.180:2379,https://192.168.100.190:2379
    Created symlink from /etc/systemd/system/multi-user.target.wants/flanneld.service to /usr/lib/systemd/system/flanneld.service.

5.配置docker连接flannel
[root@node1 ~]# vim /usr/lib/systemd/system/docker.service

[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
EnvironmentFile=/run/flannel/subnet.env         "添加行"
ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS -H fd:// --containerd=/run/containerd/containerd.sock    "添加$DOCKER_NETWORK_OPTIONS"
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always

[root@localhost ~]# cat /run/flannel/subnet.env
DOCKER_OPT_BIP="--bip=172.17.42.1/24"
DOCKER_OPT_IPMASQ="--ip-masq=false"
DOCKER_OPT_MTU="--mtu=1450"
//说明:bip指定启动时的子网
DOCKER_NETWORK_OPTIONS=" --bip=172.17.42.1/24 --ip-masq=false --mtu=1450" 
6.重启docker服务
[root@node1 ~]# systemctl daemon-reload
[root@node1 ~]# systemctl restart docker

7.查看flannel网络
[root@node1 ~]# ifconfig

###node2
[root@node1 ~]# tar zxvf flannel-v0.10.0-linux-amd64.tar.gz 
flanneld
mk-docker-opts.sh
README.md

1.创建k8s工作目录
[root@node1 ~]# mkdir /opt/kubernetes/{cfg,bin,ssl} -p
[root@node1 ~]# mv mk-docker-opts.sh flanneld /opt/kubernetes/bin/

2.编写服务脚本与
[root@node1 ~]# cat > flannel.sh /opt/kubernetes/cfg/flanneld

FLANNEL_OPTIONS="--etcd-endpoints=${ETCD_ENDPOINTS} -etcd-cafile=/opt/etcd/ssl/ca.pem -etcd-certfile=/opt/etcd/ssl/server.pem -etcd-keyfile=/opt/etcd/ssl/server-key.pem"

EOF

cat /usr/lib/systemd/system/flanneld.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service

[Service]
Type=notify
EnvironmentFile=/opt/kubernetes/cfg/flanneld
ExecStart=/opt/kubernetes/bin/flanneld --ip-masq \$FLANNEL_OPTIONS
ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=on-failure

[Install]
WantedBy=multi-user.target

EOF

systemctl daemon-reload
systemctl enable flanneld
systemctl restart flanneld
EOF

3.开启flannel网络功能
[root@node1 ~]# bash flannel.sh https://192.168.100.170:2379,https://192.168.100.170:2379,https://192.168.100.180:2379

4.配置docker连接flannel
[root@node1 ~]# bash flannel.sh https://192.168.100.170:2379,https://192.168.100.180:2379,https://192.168.100.190:2379
    Created symlink from /etc/systemd/system/multi-user.target.wants/flanneld.service to /usr/lib/systemd/system/flanneld.service.

5.配置docker连接flannel
[root@node1 ~]# vim /usr/lib/systemd/system/docker.service

[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
EnvironmentFile=/run/flannel/subnet.env         "添加行"
ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS -H fd:// --containerd=/run/containerd/containerd.sock    "添加$DOCKER_NETWORK_OPTIONS"
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always

[root@localhost ~]# cat /run/flannel/subnet.env
DOCKER_OPT_BIP="--bip=172.17.42.1/24"
DOCKER_OPT_IPMASQ="--ip-masq=false"
DOCKER_OPT_MTU="--mtu=1450"
//说明:bip指定启动时的子网
DOCKER_NETWORK_OPTIONS=" --bip=172.17.42.1/24 --ip-masq=false --mtu=1450" 
6.重启docker服务
[root@node1 ~]# systemctl daemon-reload
[root@node1 ~]# systemctl restart docker

7.查看flannel网络
[root@node2 ~]# ifconfig

#####测试ping通对方docker0网卡 证明flannel起到路由作用
[root@node1 ~]# docker run -it centos:7 /bin/bash

[root@5f9a65565b53 /]# yum install net-tools -y
[root@5f9a65565b53 /]# ifconfig
eth0: flags=4163  mtu 1450
        inet 172.17.84.2  netmask 255.255.255.0  broadcast 172.17.84.255
        ether 02:42:ac:11:54:02  txqueuelen 0  (Ethernet)
        RX packets 18192  bytes 13930229 (13.2 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 6179  bytes 337037 (329.1 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

lo: flags=73  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        loop  txqueuelen 1  (Local Loopback)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

[root@node2 ~]# docker run -it centos:7 /bin/bash

[root@abbc159a6378 /]# yum install net-tools -y
[root@abbc159a6378 /]# ifconfig
eth0: flags=4163  mtu 1450
        inet 172.17.36.2  netmask 255.255.255.0  broadcast 172.17.84.255
        ether 02:42:ac:11:54:02  txqueuelen 0  (Ethernet)
        RX packets 18192  bytes 13930229 (13.2 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 6179  bytes 337037 (329.1 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

#测试
[root@abbc159a6378 /]# ping 172.17.84.2
[root@5f9a65565b53 /]# ping 172.17.36.2
"容器相互能ping通就说明容器间能跨宿主机相互访问"

四: 部署master组件
//在master上操作,api-server生成证书
1、master节点操作,api-server生成证书
[root@localhost k8s]# unzip master.zip
[root@localhost k8s]# mkdir /opt/kubernetes/{cfg,bin,ssl} -p "创建配置文件目录,脚本目录,证书目录"
[root@localhost k8s]# mkdir k8s-cert
[root@localhost k8s]# cd k8s-cert/
[root@localhost k8s-cert]# ls           "上传k8s-cert.sh到这里"
k8s-cert.sh
[root@master k8s-cert]# cat k8s-cert.sh
cat > ca-config.json  ca-csr.json  server-csr.json  admin-csr.json  kube-proxy-csr.json    31s   v1.12.3
#‘//如果有一个节点noready,检查kubelet,如果很多节点noready,那就检查apiserver,如果没问题再检查VIP地址,keepalived‘

#---------------------------node1节点操作,启动proxy服务
[root@node1 ~]# ls
anaconda-ks.cfg    flannel-v0.10.0-linux-amd64.tar.gz  node.zip
docker-install.sh  initial-setup-ks.cfg                proxy.sh
flannel.sh         kubelet.sh                          README.md
[root@node1 ~]# bash proxy.sh 192.168.100.180
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-proxy.service to /usr/lib/systemd/system/kube-proxy.service.
[root@node1 ~]# systemctl status kube-proxy.service
● kube-proxy.service - Kubernetes Proxy
   Loaded: loaded (/usr/lib/systemd/system/kube-proxy.service; enabled; vendor preset: disabled)
   Active: active (running) since 二 2020-09-29 12:04:50 CST; 9s ago
 Main PID: 34171 (kube-proxy)
    Tasks: 0
   Memory: 8.2M
   CGroup: /system.slice/kube-proxy.service
           ? 34171 /opt/kubernetes/bin/kube-proxy --logtostderr=true --v=4 -...

#部署node2
#----------------------------在node01节点操作
#把现成的/opt/kubernetes目录复制到其他node节点进行修改即可
[root@node1 ~]# scp -r /opt/kubernetes/ root@192.168.100.190:/opt/

#把kubelet,kube-proxy的service文件拷贝到node2中
[root@node1 ~]# scp /usr/lib/systemd/system/{kubelet,kube-proxy}.service root@192.168.100.190:/usr/lib/systemd/system/
root@192.168.100.190‘s ‘password: 
kubelet.service                              100%  264   159.9KB/s   00:00    
kube-proxy.service                           100%  231   302.4KB/s   00:00    
[root@node1 ~]# systemctl enable kubelet.service

#------------------------------node2操作
1、修改三个配置文件的IP地址
#首先删除复制过来的证书,等会node02会自行申请证书
[root@node2 ~]#  cd kubeconfig/
[root@node2 kubeconfig]# cd /opt/kubernetes/ssl/
[root@node2 ssl]# ls
kubelet-client-2020-09-29-12-03-29.pem  kubelet.crt
kubelet-client-current.pem              kubelet.key
[root@node2 ssl]# rm -rf *
[root@node2 ssl]# ls
[root@node2 ssl]# cd ../cfg/

2、启动服务并查看状态
#修改配置文件kubelet  kubelet.config kube-proxy(三个配置文件)
[root@node2 cfg]# vim kubelet

KUBELET_OPTS="--logtostderr=true --v=4 --hostname-override=192.168.100.190 \           "改成node2地址"
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig --bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig --config=/opt/kubernetes/cfg/kubelet.config --cert-dir=/opt/kubernetes/ssl --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"

[root@node2 cfg]# vim kubelet.config
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 192.168.100.190                    "node2地址"
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS:
- 10.0.0.2
clusterDomain: cluster.local.
failSwapOn: false
authentication:
  anonymous:
    enabled: true

[root@node2 cfg]# vim kube-proxy

KUBE_PROXY_OPTS="--logtostderr=true --v=4 --hostname-override=192.168.100.190 \       "node2的地址"
--cluster-cidr=10.0.0.0/24 --proxy-mode=ipvs --kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig"

#启动服务
[root@node2 cfg]# systemctl start kubelet.service
[root@node2 cfg]# systemctl enable kubelet.service
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
[root@node2 cfg]# systemctl start kube-proxy.service
[root@node2 cfg]# systemctl enable kube-proxy.service
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-proxy.service to /usr/lib/systemd/system/kube-proxy.service.

3.master上操作查看请求并同意node02证书
//在master上操作查看请求Pending
[root@master kubeconfig]# kubectl get csr
NAME                                                   AGE   REQUESTOR           CONDITION
node-csr-Q22FXrUtwbkKu5b0LQcMbbyXYMuCMkGKUyH0ME1x2ow   47s   kubelet-bootstrap   Pending
node-csr-lk45yzxFkiUhV8b36fmhmFsZdqtD8JUWV1Vkiq9w7Nw   12m   kubelet-bootstrap   Approved,Issued
[root@master kubeconfig]# kubectl get node
NAME              STATUS   ROLES    AGE     VERSION
192.168.100.180   Ready       6m26s   v1.12.3

[root@master kubeconfig]# kubectl certificate approve node-csr-Q22FXrUtwbkKu5b0LQcMbbyXYMuCMkGKUyH0ME1x2ow              "授权允许请求加入群集"
certificatesigningrequest.certificates.k8s.io/node-csr-Q22FXrUtwbkKu5b0LQcMbbyXYMuCMkGKUyH0ME1x2ow approved

"master查看群集中的节点"
[root@master kubeconfig]# kubectl get node
NAME              STATUS   ROLES    AGE     VERSION
192.168.100.180   Ready       8m52s   v1.12.3
192.168.100.190   Ready       43s     v1.12.3

Kubernetes单节点部署二进制k8s群集

标签:flann   管理员   algo   启动服务   bin   ant   docker   yml   成功   

原文地址:https://blog.51cto.com/14625831/2548608


评论


亲,登录后才可以留言!