编程开源技术交流,分享技术与知识

网站首页 > 开源技术 正文

k8s-你学会了吗?-下半部署部分(k8s downwardapi)

wxchong 2024-08-07 01:44:23 开源技术 29 ℃ 0 评论

由于头条篇幅限制,文章分为上下两部分呈现给大家。感兴趣可以关注头号,有更多精彩文章。

7.2.4 k8s集群部署-部署flannel网络

flannel是overlay网络的一种,也是将源数据包封装在别一种网络包里面进行路由转发和通信,目前已经支持UDP、VXLAN、AWS、VPC、和GCE路由等数据转发方式。多主机容器网络通信其他主流方案:隧道方案(Weave、OpenSwitch)、路由方案(Calico)等。

1. 写入分配子网段到etcd,供flanneld使用

//在主节点先执行

[root@k8s-master flannel]# cd /root/software/ssl/

[root@k8s-master ssl]# etcdctl -ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem --endpoints="https://192.168.168.91:2379,https://192.168.168.92:2379,https://192.168.168.93:2379" set /coreos.com/network/config '{"Network":"172.17.0.0/16","Backend":{"Type":"vxlan"} }'

2.解压flannel二进制并分别拷贝到node节点

[root@k8s-master ssl]#cd

[root@k8s-master ~]#tar zxf flannel-v0.9.1-linux-amd64.tar.gz

[root@k8s-master ~]#scp flanneld mk-docker-opts.sh 192.168.168.92:/opt/kubernetes/bin/

[root@k8s-master ~]# scp flanneld mk-docker-opts.sh 192.168.168.93:/opt/kubernetes/bin/

2. 配置flannel

[root@k8s-node1 ~]# vim /opt/kubernetes/cfg/flanneld

FLANNEL_OPTIONS="--etcd-endpoints=https://192.168.168.91:2379,https://192.168.168.92:2379,https://192.168.168.93:2379 -etcd-cafile=/opt/kubernetes/ssl/ca.pem -etcd-certfile=/opt/kubernetes/ssl/server.pem -etcd-keyfile=/opt/kubernetes/ssl/server-key.pem"

//创建systemd脚本文件管理flanneld

[root@k8s-node1 ~]# cat <<EOF >/usr/lib/systemd/system/flanneld.service

[Unit]

Description=Flanneld overlay address etcd agent

After=network-online.target network.target

Before=docker.service

[Service]

Type=notify

EnvironmentFile=/opt/kubernetes/cfg/flanneld

ExecStart=/opt/kubernetes/bin/flanneld --ip-masq $FLANNEL_OPTIONS

ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env

Restart=on-failure

[Install]

WantedBy=multi-user.target

EOF

//配置Docker启动指定网段,修改Docker配置脚本文件

[root@k8s-node1 ~]#vim /usr/lib/systemd/system/docker.service

//新添加配置文件,目的就是让Docker网桥分发的ip地址要与flanned网桥在同一个网段。

EnvironmentFile=/run/flannel/subnet.env

//添加$ DOCKER_NETWORK_OPTIONS变量,要调用flanned网桥的ip地址

ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS

3.重启k8s-node1 flannel

[root@k8s-node1 ~]# systemctl start flanneld

[root@k8s-node1 ~]# systemctl daemon-reload

[root@k8s-node1 ~]# systemctl restart docker

[root@k8s-node1 ~]# ifconfig

docker0: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500

inet 172.17.73.1 netmask 255.255.255.0 broadcast 172.17.73.255

ether 02:42:64:c2:d1:4b txqueuelen 0 (Ethernet)

RX packets 0 bytes 0 (0.0 B)

RX errors 0 dropped 0 overruns 0 frame 0

TX packets 0 bytes 0 (0.0 B)

TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0

flannel.1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1450

inet 172.17.73.0 netmask 255.255.255.255 broadcast 0.0.0.0

inet6 fe80::7818:e0ff:fe4a:589e prefixlen 64 scopeid 0x20<link>

ether 7a:18:e0:4a:58:9e txqueuelen 0 (Ethernet)

RX packets 0 bytes 0 (0.0 B)

RX errors 0 dropped 0 overruns 0 frame 0

TX packets 0 bytes 0 (0.0 B)

TX errors 0 dropped 8 overruns 0 carrier 0 collisions 0

[root@k8s-node1 ~]# scp /usr/lib/systemd/system/flanneld.service 192.168.168.93:/usr/lib/systemd/system/

[root@k8s-node1 ~]# scp /opt/kubernetes/cfg/flanneld 192.168.168.93:/opt/kubernetes/cfg/

[root@k8s-node1 ~]# scp /usr/lib/systemd/system/docker.service 192.168.168.93:/usr/lib/systemd/system/

4.重启k8s-node2 flannel

[root@k8s-node2 ~]# systemctl start flanneld

[root@k8s-node2 ~]# systemctl daemon-reload

[root@k8s-node2 ~]# [root@k8s-node2 ~]# ifconfig //查看flannel是否与docker在同一网段

docker0: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500

inet 172.17.16.1 netmask 255.255.255.0 broadcast 172.17.16.255

ether 02:42:11:8d:bd:9e txqueuelen 0 (Ethernet)

RX packets 0 bytes 0 (0.0 B)

RX errors 0 dropped 0 overruns 0 frame 0

TX packets 0 bytes 0 (0.0 B)

TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0

flannel.1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1450

inet 172.17.16.0 netmask 255.255.255.255 broadcast 0.0.0.0

inet6 fe80::7448:27ff:fef5:b4bd prefixlen 64 scopeid 0x20<link>

ether 76:48:27:f5:b4:bd txqueuelen 0 (Ethernet)

RX packets 0 bytes 0 (0.0 B)

RX errors 0 dropped 0 overruns 0 frame 0

TX packets 0 bytes 0 (0.0 B)

TX errors 0 dropped 27 overruns 0 carrier 0 collisions 0

5.测试flanneld安装是否成功

[root@k8s-node2 ~]# ping 172.17.73.1 //ping node1节点的docker0网桥ip即可

PING 172.17.73.1 (172.17.73.1) 56(84) bytes of data.

64 bytes from 172.17.73.1: icmp_seq=1 ttl=64 time=0.239 ms

64 bytes from 172.17.73.1: icmp_seq=2 ttl=64 time=0.187 m

至此node节点flannel配置完成。

7.2.5 k8s集群部署-部署Kubernetes-master组件

1.添加kubectl命令环境

[root@k8s-master ~]# cp kubectl /opt/kubernetes/bin/

[root@k8s-master ~]# chmod +x /opt/kubernetes/bin/kubectl

2. 创建TLS Bootstrapping Token

kubernetes 集群自己为kubelete 颁发证书,而不是手动配置kubelete证书,去引导kubelete生成证书

[root@k8s-master ~]# cd /opt/kubernetes/

[root@k8s-master kubernetes]# export BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')

[root@k8s-master kubernetes]# cat > token.csv <<EOF

${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"

EOF

3. 创建kubelet kubeconfig

[root@k8s-master kubernetes]# export KUBE_APISERVER="https://192.168.168.91:6443"

//设置集群参数

[root@k8s-master ssl]# cd /root/software/ssl/

[root@k8s-master ssl]# kubectl config set-cluster kubernetes \

--certificate-authority=./ca.pem \

--embed-certs=true \

--server=${KUBE_APISERVER} \

--kubeconfig=bootstrap.kubeconfig

Cluster "kubernetes" set.

//设置客户端认证参数

[root@k8s-master ssl]# kubectl config set-credentials kubelet-bootstrap \

--token=${BOOTSTRAP_TOKEN} \

--kubeconfig=bootstrap.kubeconfig

User "kubelet-bootstrap" set.

//设置上下文参数

[root@k8s-master ssl]# kubectl config set-context default \

--cluster=kubernetes \

--user=kubelet-bootstrap \

--kubeconfig=bootstrap.kubeconfig

Context "default" created.

//设置默认上下文

[root@k8s-master ssl]# kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

Switched to context "default".

4. 创建kuby-proxy kubeconfig

[root@k8s-master ssl]# kubectl config set-cluster kubernetes \

--certificate-authority=./ca.pem \

--embed-certs=true \

--server=${KUBE_APISERVER} \

--kubeconfig=kube-proxy.kubeconfig

Cluster "kubernetes" set.

[root@k8s-master ssl]# kubectl config set-credentials kube-proxy \

--client-certificate=./kube-proxy.pem \

--client-key=./kube-proxy-key.pem \

--embed-certs=true \

--kubeconfig=kube-proxy.kubeconfig

User "kube-proxy" set.

[root@k8s-master ssl]# kubectl config set-context default \

--cluster=kubernetes \

--user=kube-proxy \

--kubeconfig=kube-proxy.kubeconfig

Context "default" created.

[root@k8s-master ssl]# kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

Switched to context "default".

5.部署kube-apiserver组件

[root@k8s-master ~]#unzip master.zip

Archive: master.zip

inflating: kube-apiserver

inflating: kube-controller-manager

inflating: kube-scheduler

inflating: apiserver.sh

inflating: controller-manager.sh

inflating: scheduler.sh

replace kubectl? [y]es, [n]o, [A]ll, [N]one, [r]ename: y

inflating: kubectl

[root@k8s-master ~]# mv kube-controller-manager kube-scheduler kube-apiserver /opt/kubernetes/bin/

[root@k8s-master ~]# chmod +x /opt/kubernetes/bin/*

[root@k8s-master ~]# cp /opt/kubernetes/token.csv /opt/kubernetes/cfg/

[root@k8s-master ~]# chmod +x *.sh

[root@k8s-master ~]#./apiserver.sh 192.168.168.91 https://192.168.168.91:2379,https://192.168.168.92:2379,https://192.168.168.93:2379

Created symlink from /etc/systemd/system/multi-user.target.wants/kube-apiserver.service to /usr/lib/systemd/system/kube-apiserver.service.

//传入两个参数,启动api组件脚本

6.部署kube-controller-manager组件

[root@k8s-master ~]# sh controller-manager.sh 127.0.0.1

7.部署kube-scheduler组件

[root@k8s-master ~]# sh scheduler.sh 127.0.0.1

8. 检测组件运行是否正常

[root@k8s-master kubernetes]# kubectl get cs

NAME STATUS MESSAGE ERROR

scheduler Healthy ok

controller-manager Healthy ok

etcd-0 Healthy {"health": "true"}

etcd-2 Healthy {"health": "true"}

etcd-1 Healthy {"health": "true"}

7.2.6 k8s集群部署-部署Kubernetes-node组件

1.准备环境部署节点2

[root@k8s-master ~]# cd /root/software/ssl/

[root@k8s-master ssl]# scp *kubeconfig 192.168.168.92:/opt/kubernetes/cfg/

[root@k8s-master ssl]# scp *kubeconfig 192.168.168.93:/opt/kubernetes/cfg/

[root@k8s-master ssl]# cd

[root@k8s-master ~]# scp -r ./node.zip 192.168.168.92:/root/

[root@k8s-master ~]#scp -r ./node.zip 192.168.168.93:/root/

[root@k8s-node1 ~]# unzip node.zip

[root@k8s-node1 ~]# mv kubelet kube-proxy /opt/kubernetes/bin/

[root@k8s-node1 ~]# chmod +x /opt/kubernetes/bin/*

[root@k8s-node2 ~]# unzip node.zip

[root@k8s-node2 ~]# mv kubelet kube-proxy /opt/kubernetes/bin/

[root@k8s-node2 ~]# chmod +x /opt/kubernetes/bin/*

// master 主节点执行

[root@k8s-master ~]# kubectl create clusterrolebinding kubelet-bootstrap \

--clusterrole=system:node-bootstrapper \

--user=kubelet-bootstrap master

2.部署kube-kubelet组件,node2节点也同样执行

//执行kubelet安装脚本,生成kubelet的配置文件。

[root@k8s-node1 node]#sh kubelet.sh 192.168.168.92 192.168.168.25

3.部署kube-proxy组件,node2节点也同样执行

//执行kube-proxy安装脚本

[root@k8s-node1 ~]# sh proxy.sh 192.168.168.92

4.查看node节点组件是否安装成功

[root@k8s-node1 ~]# ps -ef | grep kube

root 60588 1 1 15:31 ? 00:00:20 /opt/kubernetes/bin/kubelet --logtostderr=true --v=4 --address=192.168.168.92 --hostname-override=192.168.168.92 --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig --experimental-bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig --cert-dir=/opt/kubrnetes/ssl --allow-privileged=true --cluster-dns=192.168.168.25 --cluster-domain=cluster.local --fail-swap-on=false --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0

root 60776 1 0 15:32 ? 00:00:03 /opt/kubernetes/bin/kube-proxy --logtostderr=true --v=4 --hostname-override=192.168.168.92 --kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig

7.2.7 查看自动签发的证书

1.查看请求证书请求节点

[root@k8s-master ~]# kubectl get csr

NAME AGE REQUESTOR CONDITION

node-csr-BZ3POcwU6NyvCZ2TGzqqaD2uYR1zHbMUjmdNWhjwJgE 45s kubelet-bootstrap Pending

node-csr-M8rbZpIYMYHtXxbFH9iBtlnQogAvwT7WfyF9V70k3zQ 48s kubelet-bootstrap Pending

2.允许节点加入集群

[root@k8s-master ~]# kubectl certificate approve node-csr-M8rbZpIYMYHtXxbFH9iBtlnQogAvwT7WfyF9V70k3zQ

[root@k8s-master ~]# kubectl certificate approve node-csr-BZ3POcwU6NyvCZ2TGzqqaD2uYR1zHbMUjmdNWhjwJgE

3.查看并验证节点是否成功

[root@k8s-master ~]# kubectl get node

NAME STATUS ROLES AGE VERSION

192.168.168.92 Ready <none> 41m v1.9.0

192.168.168.93 Ready <none> 41m v1.9.0

至此,k8s集群安装完成。

Tags:

本文暂时没有评论,来添加一个吧(●'◡'●)

欢迎 发表评论:

最近发表
标签列表