diff --git a/doc/v1.26.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md.md b/doc/v1.26.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md.md
new file mode 100644
index 0000000..9633c5f
--- /dev/null
+++ b/doc/v1.26.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md.md
@@ -0,0 +1,3579 @@
+# 二进制安装Kubernetes(k8s) v1.26.0 IPv4/IPv6双栈
+
+
+
+[https://github.com/cby-chen/Kubernetes](https://github.com/cby-chen/Kubernetes) 开源不易,帮忙点个star,谢谢了
+
+# 介绍
+
+kubernetes(k8s)二进制高可用安装部署,支持IPv4+IPv6双栈。
+
+我使用IPV6的目的是在公网进行访问,所以我配置了IPV6静态地址。
+
+若您没有IPV6环境,或者不想使用IPv6,不对主机进行配置IPv6地址即可。
+
+不配置IPV6,不影响后续,不过集群依旧是支持IPv6的。为后期留有扩展可能性。
+
+若不要IPv6 ,不给网卡配置IPv6即可,不要对IPv6相关配置删除或操作,否则会出问题。
+
+# 强烈建议在Github上查看文档 !!!!!!
+
+## Github出问题会更新文档,并且后续尽可能第一时间更新新版本文档 !!!
+
+## 手动项目地址:https://github.com/cby-chen/Kubernetes
+
+
+# 1.环境
+
+| 主机名称 | IP地址 | 说明 | 软件 |
+| -------- | --------- | ---------- | ------------------------------------------------------------ |
+| Master01 | 192.168.1.61 | master节点 | kube-apiserver、kube-controller-manager、kube-scheduler、etcd、
kubelet、kube-proxy、nfs-client、haproxy、keepalived、nginx |
+| Master02 | 192.168.1.62 | master节点 | kube-apiserver、kube-controller-manager、kube-scheduler、etcd、
kubelet、kube-proxy、nfs-client、haproxy、keepalived、nginx |
+| Master03 | 192.168.1.63 | master节点 | kube-apiserver、kube-controller-manager、kube-scheduler、etcd、
kubelet、kube-proxy、nfs-client、haproxy、keepalived、nginx |
+| Node01 | 192.168.1.64 | node节点 | kubelet、kube-proxy、nfs-client、nginx |
+| Node02 | 192.168.1.65 | node节点 | kubelet、kube-proxy、nfs-client、nginx |
+| | 192.168.8.66 | VIP | |
+
+| 软件 | 版本 |
+| :----------------------------------------------------------- | :-------------- |
+| kernel | 6.0.11 |
+| CentOS 8 | v8、 v7、Ubuntu |
+| kube-apiserver、kube-controller-manager、kube-scheduler、kubelet、kube-proxy | v1.25.4 |
+| etcd | v3.5.6 |
+| containerd | v1.6.10 |
+| docker | v20.10.21 |
+| cfssl | v1.6.3 |
+| cni | v1.1.1 |
+| crictl | v1.26.0 |
+| haproxy | v1.8.27 |
+| keepalived | v2.1.5 |
+
+
+
+网段
+
+物理主机:192.168.1.0/24
+
+service:10.96.0.0/12
+
+pod:172.16.0.0/12
+
+安装包已经整理好:https://github.com/cby-chen/Kubernetes/releases/download/v1.26.0/kubernetes-v1.26.0.tar
+
+
+
+## 1.1.k8s基础系统环境配置
+
+### 1.2.配置IP
+
+```shell
+ssh root@192.168.1.143 "nmcli con mod eth0 ipv4.addresses 192.168.1.61/24; nmcli con mod eth0 ipv4.gateway 192.168.1.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0"
+ssh root@192.168.1.144 "nmcli con mod eth0 ipv4.addresses 192.168.1.62/24; nmcli con mod eth0 ipv4.gateway 192.168.1.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0"
+ssh root@192.168.1.145 "nmcli con mod eth0 ipv4.addresses 192.168.1.63/24; nmcli con mod eth0 ipv4.gateway 192.168.1.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0"
+ssh root@192.168.1.146 "nmcli con mod eth0 ipv4.addresses 192.168.1.64/24; nmcli con mod eth0 ipv4.gateway 192.168.1.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0"
+ssh root@192.168.1.148 "nmcli con mod eth0 ipv4.addresses 192.168.1.65/24; nmcli con mod eth0 ipv4.gateway 192.168.1.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0"
+
+# 没有IPv6选择不配置即可
+ssh root@192.168.1.61 "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::10; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0"
+ssh root@192.168.1.62 "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::20; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0"
+ssh root@192.168.1.63 "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::30; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0"
+ssh root@192.168.1.64 "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::40; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0"
+ssh root@192.168.1.65 "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::50; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0"
+
+# 查看网卡配置
+[root@localhost ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth0
+TYPE=Ethernet
+PROXY_METHOD=none
+BROWSER_ONLY=no
+BOOTPROTO=none
+DEFROUTE=yes
+IPV4_FAILURE_FATAL=no
+IPV6INIT=yes
+IPV6_AUTOCONF=no
+IPV6_DEFROUTE=yes
+IPV6_FAILURE_FATAL=no
+IPV6_ADDR_GEN_MODE=stable-privacy
+NAME=eth0
+UUID=424fd260-c480-4899-97e6-6fc9722031e8
+DEVICE=eth0
+ONBOOT=yes
+IPADDR=192.168.1.61
+PREFIX=24
+GATEWAY=192.168.8.1
+DNS1=8.8.8.8
+IPV6ADDR=fc00:43f4:1eea:1::10/128
+IPV6_DEFAULTGW=fc00:43f4:1eea:1::1
+DNS2=2400:3200::1
+[root@localhost ~]#
+
+```
+
+### 1.3.设置主机名
+
+```shell
+hostnamectl set-hostname k8s-master01
+hostnamectl set-hostname k8s-master02
+hostnamectl set-hostname k8s-master03
+hostnamectl set-hostname k8s-node01
+hostnamectl set-hostname k8s-node02
+```
+
+### 1.4.配置yum源
+
+```shell
+# 对于 Ubuntu
+sed -i 's/cn.archive.ubuntu.com/mirrors.ustc.edu.cn/g' /etc/apt/sources.list
+
+# 对于 CentOS 7
+sudo sed -e 's|^mirrorlist=|#mirrorlist=|g' \
+ -e 's|^#baseurl=http://mirror.centos.org|baseurl=https://mirrors.tuna.tsinghua.edu.cn|g' \
+ -i.bak \
+ /etc/yum.repos.d/CentOS-*.repo
+
+# 对于 CentOS 8
+sudo sed -e 's|^mirrorlist=|#mirrorlist=|g' \
+ -e 's|^#baseurl=http://mirror.centos.org/$contentdir|baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos|g' \
+ -i.bak \
+ /etc/yum.repos.d/CentOS-*.repo
+
+# 对于私有仓库
+sed -e 's|^mirrorlist=|#mirrorlist=|g' -e 's|^#baseurl=http://mirror.centos.org/\$contentdir|baseurl=http://192.168.1.123/centos|g' -i.bak /etc/yum.repos.d/CentOS-*.repo
+```
+
+### 1.5.安装一些必备工具
+
+```shell
+# 对于 Ubuntu
+apt update && apt upgrade -y && apt install -y wget psmisc vim net-tools nfs-kernel-server telnet lvm2 git tar curl
+
+# 对于 CentOS 7
+yum update -y && yum -y install wget psmisc vim net-tools nfs-utils telnet yum-utils device-mapper-persistent-data lvm2 git tar curl
+
+# 对于 CentOS 8
+yum update -y && yum -y install wget psmisc vim net-tools nfs-utils telnet yum-utils device-mapper-persistent-data lvm2 git network-scripts tar curl
+```
+
+### 1.6.选择性下载需要工具
+
+```shell
+1.下载kubernetes1.26.+的二进制包
+github二进制包下载地址:https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md
+
+wget https://dl.k8s.io/v1.26.0/kubernetes-server-linux-amd64.tar.gz
+
+2.下载etcdctl二进制包
+github二进制包下载地址:https://github.com/etcd-io/etcd/releases
+
+wget https://ghproxy.com/https://github.com/etcd-io/etcd/releases/download/v3.5.6/etcd-v3.5.6-linux-amd64.tar.gz
+
+3.docker二进制包下载
+二进制包下载地址:https://download.docker.com/linux/static/stable/x86_64/
+
+wget https://download.docker.com/linux/static/stable/x86_64/docker-20.10.21.tgz
+
+4.下载cri-docker
+二进制包下载地址:https://github.com/Mirantis/cri-dockerd/releases/
+
+wget https://ghproxy.com/https://github.com/Mirantis/cri-dockerd/releases/download/v0.2.6/cri-dockerd-0.2.6.amd64.tgz
+
+4.containerd下载时下载带cni插件的二进制包。
+github下载地址:https://github.com/containerd/containerd/releases
+
+wget https://ghproxy.com/https://github.com/containerd/containerd/releases/download/v1.6.10/cri-containerd-cni-1.6.10-linux-amd64.tar.gz
+
+5.下载cfssl二进制包
+github二进制包下载地址:https://github.com/cloudflare/cfssl/releases
+
+wget https://ghproxy.com/https://github.com/cloudflare/cfssl/releases/download/v1.6.3/cfssl_1.6.3_linux_amd64
+wget https://ghproxy.com/https://github.com/cloudflare/cfssl/releases/download/v1.6.3/cfssljson_1.6.3_linux_amd64
+wget https://ghproxy.com/https://github.com/cloudflare/cfssl/releases/download/v1.6.3/cfssl-certinfo_1.6.3_linux_amd64
+
+6.cni插件下载
+github下载地址:https://github.com/containernetworking/plugins/releases
+
+wget https://ghproxy.com/https://github.com/containernetworking/plugins/releases/download/v1.1.1/cni-plugins-linux-amd64-v1.1.1.tgz
+
+7.crictl客户端二进制下载
+github下载:https://github.com/kubernetes-sigs/cri-tools/releases
+
+wget https://ghproxy.com/https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.26.0/crictl-v1.26.0-linux-amd64.tar.gz
+```
+
+### 1.7.关闭防火墙
+
+```shell
+# Ubuntu忽略,CentOS执行
+systemctl disable --now firewalld
+```
+
+### 1.8.关闭SELinux
+
+```shell
+# Ubuntu忽略,CentOS执行
+setenforce 0
+sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
+```
+
+### 1.9.关闭交换分区
+
+```shell
+sed -ri 's/.*swap.*/#&/' /etc/fstab
+swapoff -a && sysctl -w vm.swappiness=0
+
+cat /etc/fstab
+# /dev/mapper/centos-swap swap swap defaults 0 0
+```
+
+### 1.10.网络配置(俩种方式二选一)
+
+```shell
+# Ubuntu忽略,CentOS执行
+
+# 方式一
+# systemctl disable --now NetworkManager
+# systemctl start network && systemctl enable network
+
+# 方式二
+cat > /etc/NetworkManager/conf.d/calico.conf << EOF
+[keyfile]
+unmanaged-devices=interface-name:cali*;interface-name:tunl*
+EOF
+systemctl restart NetworkManager
+```
+
+### 1.11.进行时间同步
+
+```shell
+# 服务端
+# apt install chrony -y
+yum install chrony -y
+cat > /etc/chrony.conf << EOF
+pool ntp.aliyun.com iburst
+driftfile /var/lib/chrony/drift
+makestep 1.0 3
+rtcsync
+allow 192.168.1.0/24
+local stratum 10
+keyfile /etc/chrony.keys
+leapsectz right/UTC
+logdir /var/log/chrony
+EOF
+
+systemctl restart chronyd ; systemctl enable chronyd
+
+# 客户端
+# apt install chrony -y
+yum install chrony -y
+cat > /etc/chrony.conf << EOF
+pool 192.168.1.61 iburst
+driftfile /var/lib/chrony/drift
+makestep 1.0 3
+rtcsync
+keyfile /etc/chrony.keys
+leapsectz right/UTC
+logdir /var/log/chrony
+EOF
+
+systemctl restart chronyd ; systemctl enable chronyd
+
+#使用客户端进行验证
+chronyc sources -v
+```
+
+### 1.12.配置ulimit
+
+```shell
+ulimit -SHn 65535
+cat >> /etc/security/limits.conf <> /etc/modules-load.d/ipvs.conf < /etc/sysctl.d/k8s.conf
+net.ipv4.ip_forward = 1
+net.bridge.bridge-nf-call-iptables = 1
+fs.may_detach_mounts = 1
+vm.overcommit_memory=1
+vm.panic_on_oom=0
+fs.inotify.max_user_watches=89100
+fs.file-max=52706963
+fs.nr_open=52706963
+net.netfilter.nf_conntrack_max=2310720
+
+net.ipv4.tcp_keepalive_time = 600
+net.ipv4.tcp_keepalive_probes = 3
+net.ipv4.tcp_keepalive_intvl =15
+net.ipv4.tcp_max_tw_buckets = 36000
+net.ipv4.tcp_tw_reuse = 1
+net.ipv4.tcp_max_orphans = 327680
+net.ipv4.tcp_orphan_retries = 3
+net.ipv4.tcp_syncookies = 1
+net.ipv4.tcp_max_syn_backlog = 16384
+net.ipv4.ip_conntrack_max = 65536
+net.ipv4.tcp_max_syn_backlog = 16384
+net.ipv4.tcp_timestamps = 0
+net.core.somaxconn = 16384
+
+net.ipv6.conf.all.disable_ipv6 = 0
+net.ipv6.conf.default.disable_ipv6 = 0
+net.ipv6.conf.lo.disable_ipv6 = 0
+net.ipv6.conf.all.forwarding = 1
+EOF
+
+sysctl --system
+```
+
+### 1.18.所有节点配置hosts本地解析
+
+```shell
+cat > /etc/hosts < /etc/systemd/system/containerd.service < /etc/containerd/certs.d/docker.io/hosts.toml << EOF
+server = "https://docker.io"
+[host."https://hub-mirror.c.163.com"]
+ capabilities = ["pull", "resolve"]
+EOF
+```
+
+### 2.1.5启动并设置为开机启动
+
+```shell
+systemctl daemon-reload
+systemctl enable --now containerd
+systemctl restart containerd
+```
+
+### 2.1.6配置crictl客户端连接的运行时位置
+
+```shell
+# wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.24.2/crictl-v1.24.2-linux-amd64.tar.gz
+
+#解压
+tar xf crictl-v*-linux-amd64.tar.gz -C /usr/bin/
+#生成配置文件
+cat > /etc/crictl.yaml </etc/systemd/system/containerd.service < /etc/systemd/system/docker.service < /etc/systemd/system/docker.socket </etc/docker/daemon.json < /usr/lib/systemd/system/cri-docker.service < /usr/lib/systemd/system/cri-docker.socket < admin-csr.json << EOF
+{
+ "CN": "admin",
+ "key": {
+ "algo": "rsa",
+ "size": 2048
+ },
+ "names": [
+ {
+ "C": "CN",
+ "ST": "Beijing",
+ "L": "Beijing",
+ "O": "system:masters",
+ "OU": "Kubernetes-manual"
+ }
+ ]
+}
+EOF
+
+cat > ca-config.json << EOF
+{
+ "signing": {
+ "default": {
+ "expiry": "876000h"
+ },
+ "profiles": {
+ "kubernetes": {
+ "usages": [
+ "signing",
+ "key encipherment",
+ "server auth",
+ "client auth"
+ ],
+ "expiry": "876000h"
+ }
+ }
+ }
+}
+EOF
+
+cat > etcd-ca-csr.json << EOF
+{
+ "CN": "etcd",
+ "key": {
+ "algo": "rsa",
+ "size": 2048
+ },
+ "names": [
+ {
+ "C": "CN",
+ "ST": "Beijing",
+ "L": "Beijing",
+ "O": "etcd",
+ "OU": "Etcd Security"
+ }
+ ],
+ "ca": {
+ "expiry": "876000h"
+ }
+}
+EOF
+
+cat > front-proxy-ca-csr.json << EOF
+{
+ "CN": "kubernetes",
+ "key": {
+ "algo": "rsa",
+ "size": 2048
+ },
+ "ca": {
+ "expiry": "876000h"
+ }
+}
+EOF
+
+cat > kubelet-csr.json << EOF
+{
+ "CN": "system:node:\$NODE",
+ "key": {
+ "algo": "rsa",
+ "size": 2048
+ },
+ "names": [
+ {
+ "C": "CN",
+ "L": "Beijing",
+ "ST": "Beijing",
+ "O": "system:nodes",
+ "OU": "Kubernetes-manual"
+ }
+ ]
+}
+EOF
+
+cat > manager-csr.json << EOF
+{
+ "CN": "system:kube-controller-manager",
+ "key": {
+ "algo": "rsa",
+ "size": 2048
+ },
+ "names": [
+ {
+ "C": "CN",
+ "ST": "Beijing",
+ "L": "Beijing",
+ "O": "system:kube-controller-manager",
+ "OU": "Kubernetes-manual"
+ }
+ ]
+}
+EOF
+
+cat > apiserver-csr.json << EOF
+{
+ "CN": "kube-apiserver",
+ "key": {
+ "algo": "rsa",
+ "size": 2048
+ },
+ "names": [
+ {
+ "C": "CN",
+ "ST": "Beijing",
+ "L": "Beijing",
+ "O": "Kubernetes",
+ "OU": "Kubernetes-manual"
+ }
+ ]
+}
+EOF
+
+
+cat > ca-csr.json << EOF
+{
+ "CN": "kubernetes",
+ "key": {
+ "algo": "rsa",
+ "size": 2048
+ },
+ "names": [
+ {
+ "C": "CN",
+ "ST": "Beijing",
+ "L": "Beijing",
+ "O": "Kubernetes",
+ "OU": "Kubernetes-manual"
+ }
+ ],
+ "ca": {
+ "expiry": "876000h"
+ }
+}
+EOF
+
+cat > etcd-csr.json << EOF
+{
+ "CN": "etcd",
+ "key": {
+ "algo": "rsa",
+ "size": 2048
+ },
+ "names": [
+ {
+ "C": "CN",
+ "ST": "Beijing",
+ "L": "Beijing",
+ "O": "etcd",
+ "OU": "Etcd Security"
+ }
+ ]
+}
+EOF
+
+
+cat > front-proxy-client-csr.json << EOF
+{
+ "CN": "front-proxy-client",
+ "key": {
+ "algo": "rsa",
+ "size": 2048
+ }
+}
+EOF
+
+
+cat > kube-proxy-csr.json << EOF
+{
+ "CN": "system:kube-proxy",
+ "key": {
+ "algo": "rsa",
+ "size": 2048
+ },
+ "names": [
+ {
+ "C": "CN",
+ "ST": "Beijing",
+ "L": "Beijing",
+ "O": "system:kube-proxy",
+ "OU": "Kubernetes-manual"
+ }
+ ]
+}
+EOF
+
+
+cat > scheduler-csr.json << EOF
+{
+ "CN": "system:kube-scheduler",
+ "key": {
+ "algo": "rsa",
+ "size": 2048
+ },
+ "names": [
+ {
+ "C": "CN",
+ "ST": "Beijing",
+ "L": "Beijing",
+ "O": "system:kube-scheduler",
+ "OU": "Kubernetes-manual"
+ }
+ ]
+}
+EOF
+
+cd ..
+mkdir bootstrap
+cd bootstrap
+cat > bootstrap.secret.yaml << EOF
+apiVersion: v1
+kind: Secret
+metadata:
+ name: bootstrap-token-c8ad9c
+ namespace: kube-system
+type: bootstrap.kubernetes.io/token
+stringData:
+ description: "The default bootstrap token generated by 'kubelet '."
+ token-id: c8ad9c
+ token-secret: 2e4d610cf3e7426e
+ usage-bootstrap-authentication: "true"
+ usage-bootstrap-signing: "true"
+ auth-extra-groups: system:bootstrappers:default-node-token,system:bootstrappers:worker,system:bootstrappers:ingress
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: kubelet-bootstrap
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:node-bootstrapper
+subjects:
+- apiGroup: rbac.authorization.k8s.io
+ kind: Group
+ name: system:bootstrappers:default-node-token
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: node-autoapprove-bootstrap
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
+subjects:
+- apiGroup: rbac.authorization.k8s.io
+ kind: Group
+ name: system:bootstrappers:default-node-token
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: node-autoapprove-certificate-rotation
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
+subjects:
+- apiGroup: rbac.authorization.k8s.io
+ kind: Group
+ name: system:nodes
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ annotations:
+ rbac.authorization.kubernetes.io/autoupdate: "true"
+ labels:
+ kubernetes.io/bootstrapping: rbac-defaults
+ name: system:kube-apiserver-to-kubelet
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/proxy
+ - nodes/stats
+ - nodes/log
+ - nodes/spec
+ - nodes/metrics
+ verbs:
+ - "*"
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: system:kube-apiserver
+ namespace: ""
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:kube-apiserver-to-kubelet
+subjects:
+ - apiGroup: rbac.authorization.k8s.io
+ kind: User
+ name: kube-apiserver
+EOF
+
+
+cd ..
+mkdir coredns
+cd coredns
+cat > coredns.yaml << EOF
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: coredns
+ namespace: kube-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ kubernetes.io/bootstrapping: rbac-defaults
+ name: system:coredns
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ - services
+ - pods
+ - namespaces
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - discovery.k8s.io
+ resources:
+ - endpointslices
+ verbs:
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ annotations:
+ rbac.authorization.kubernetes.io/autoupdate: "true"
+ labels:
+ kubernetes.io/bootstrapping: rbac-defaults
+ name: system:coredns
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:coredns
+subjects:
+- kind: ServiceAccount
+ name: coredns
+ namespace: kube-system
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: coredns
+ namespace: kube-system
+data:
+ Corefile: |
+ .:53 {
+ errors
+ health {
+ lameduck 5s
+ }
+ ready
+ kubernetes cluster.local in-addr.arpa ip6.arpa {
+ fallthrough in-addr.arpa ip6.arpa
+ }
+ prometheus :9153
+ forward . /etc/resolv.conf {
+ max_concurrent 1000
+ }
+ cache 30
+ loop
+ reload
+ loadbalance
+ }
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: coredns
+ namespace: kube-system
+ labels:
+ k8s-app: kube-dns
+ kubernetes.io/name: "CoreDNS"
+spec:
+ # replicas: not specified here:
+ # 1. Default is 1.
+ # 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
+ strategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ k8s-app: kube-dns
+ template:
+ metadata:
+ labels:
+ k8s-app: kube-dns
+ spec:
+ priorityClassName: system-cluster-critical
+ serviceAccountName: coredns
+ tolerations:
+ - key: "CriticalAddonsOnly"
+ operator: "Exists"
+ nodeSelector:
+ kubernetes.io/os: linux
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: k8s-app
+ operator: In
+ values: ["kube-dns"]
+ topologyKey: kubernetes.io/hostname
+ containers:
+ - name: coredns
+ image: registry.cn-beijing.aliyuncs.com/dotbalo/coredns:1.8.6
+ imagePullPolicy: IfNotPresent
+ resources:
+ limits:
+ memory: 170Mi
+ requests:
+ cpu: 100m
+ memory: 70Mi
+ args: [ "-conf", "/etc/coredns/Corefile" ]
+ volumeMounts:
+ - name: config-volume
+ mountPath: /etc/coredns
+ readOnly: true
+ ports:
+ - containerPort: 53
+ name: dns
+ protocol: UDP
+ - containerPort: 53
+ name: dns-tcp
+ protocol: TCP
+ - containerPort: 9153
+ name: metrics
+ protocol: TCP
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ add:
+ - NET_BIND_SERVICE
+ drop:
+ - all
+ readOnlyRootFilesystem: true
+ livenessProbe:
+ httpGet:
+ path: /health
+ port: 8080
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ successThreshold: 1
+ failureThreshold: 5
+ readinessProbe:
+ httpGet:
+ path: /ready
+ port: 8181
+ scheme: HTTP
+ dnsPolicy: Default
+ volumes:
+ - name: config-volume
+ configMap:
+ name: coredns
+ items:
+ - key: Corefile
+ path: Corefile
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: kube-dns
+ namespace: kube-system
+ annotations:
+ prometheus.io/port: "9153"
+ prometheus.io/scrape: "true"
+ labels:
+ k8s-app: kube-dns
+ kubernetes.io/cluster-service: "true"
+ kubernetes.io/name: "CoreDNS"
+spec:
+ selector:
+ k8s-app: kube-dns
+ clusterIP: 10.96.0.10
+ ports:
+ - name: dns
+ port: 53
+ protocol: UDP
+ - name: dns-tcp
+ port: 53
+ protocol: TCP
+ - name: metrics
+ port: 9153
+ protocol: TCP
+EOF
+
+
+cd ..
+mkdir metrics-server
+cd metrics-server
+cat > metrics-server.yaml << EOF
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: metrics-server
+ namespace: kube-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ k8s-app: metrics-server
+ rbac.authorization.k8s.io/aggregate-to-admin: "true"
+ rbac.authorization.k8s.io/aggregate-to-edit: "true"
+ rbac.authorization.k8s.io/aggregate-to-view: "true"
+ name: system:aggregated-metrics-reader
+rules:
+- apiGroups:
+ - metrics.k8s.io
+ resources:
+ - pods
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: system:metrics-server
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ - nodes
+ - nodes/stats
+ - namespaces
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: metrics-server-auth-reader
+ namespace: kube-system
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+subjects:
+- kind: ServiceAccount
+ name: metrics-server
+ namespace: kube-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: metrics-server:system:auth-delegator
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:auth-delegator
+subjects:
+- kind: ServiceAccount
+ name: metrics-server
+ namespace: kube-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: system:metrics-server
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:metrics-server
+subjects:
+- kind: ServiceAccount
+ name: metrics-server
+ namespace: kube-system
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: metrics-server
+ namespace: kube-system
+spec:
+ ports:
+ - name: https
+ port: 443
+ protocol: TCP
+ targetPort: https
+ selector:
+ k8s-app: metrics-server
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: metrics-server
+ namespace: kube-system
+spec:
+ selector:
+ matchLabels:
+ k8s-app: metrics-server
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 0
+ template:
+ metadata:
+ labels:
+ k8s-app: metrics-server
+ spec:
+ containers:
+ - args:
+ - --cert-dir=/tmp
+ - --secure-port=4443
+ - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
+ - --kubelet-use-node-status-port
+ - --metric-resolution=15s
+ - --kubelet-insecure-tls
+ - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem # change to front-proxy-ca.crt for kubeadm
+ - --requestheader-username-headers=X-Remote-User
+ - --requestheader-group-headers=X-Remote-Group
+ - --requestheader-extra-headers-prefix=X-Remote-Extra-
+ image: registry.cn-beijing.aliyuncs.com/dotbalo/metrics-server:0.5.0
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /livez
+ port: https
+ scheme: HTTPS
+ periodSeconds: 10
+ name: metrics-server
+ ports:
+ - containerPort: 4443
+ name: https
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /readyz
+ port: https
+ scheme: HTTPS
+ initialDelaySeconds: 20
+ periodSeconds: 10
+ resources:
+ requests:
+ cpu: 100m
+ memory: 200Mi
+ securityContext:
+ readOnlyRootFilesystem: true
+ runAsNonRoot: true
+ runAsUser: 1000
+ volumeMounts:
+ - mountPath: /tmp
+ name: tmp-dir
+ - name: ca-ssl
+ mountPath: /etc/kubernetes/pki
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-cluster-critical
+ serviceAccountName: metrics-server
+ volumes:
+ - emptyDir: {}
+ name: tmp-dir
+ - name: ca-ssl
+ hostPath:
+ path: /etc/kubernetes/pki
+
+---
+apiVersion: apiregistration.k8s.io/v1
+kind: APIService
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: v1beta1.metrics.k8s.io
+spec:
+ group: metrics.k8s.io
+ groupPriorityMinimum: 100
+ insecureSkipTLSVerify: true
+ service:
+ name: metrics-server
+ namespace: kube-system
+ version: v1beta1
+ versionPriority: 100
+EOF
+```
+
+# 3.相关证书生成
+
+```shell
+# master01节点下载证书生成工具
+# wget "https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssl_1.6.2_linux_amd64" -O /usr/local/bin/cfssl
+# wget "https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssljson_1.6.2_linux_amd64" -O /usr/local/bin/cfssljson
+
+# 软件包内有
+cp cfssl_*_linux_amd64 /usr/local/bin/cfssl
+cp cfssljson_*_linux_amd64 /usr/local/bin/cfssljson
+
+chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson
+```
+
+## 3.1.生成etcd证书
+
+特别说明除外,以下操作在所有master节点操作
+
+### 3.1.1所有master节点创建证书存放目录
+
+```shell
+mkdir /etc/etcd/ssl -p
+```
+
+### 3.1.2master01节点生成etcd证书
+
+```shell
+cd pki
+# 生成etcd证书和etcd证书的key(如果你觉得以后可能会扩容,可以在ip那多写几个预留出来)
+# 若没有IPv6 可删除可保留
+cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare /etc/etcd/ssl/etcd-ca
+cfssl gencert \
+ -ca=/etc/etcd/ssl/etcd-ca.pem \
+ -ca-key=/etc/etcd/ssl/etcd-ca-key.pem \
+ -config=ca-config.json \
+ -hostname=127.0.0.1,k8s-master01,k8s-master02,k8s-master03,192.168.1.61,192.168.1.62,192.168.1.63,fc00:43f4:1eea:1::10,fc00:43f4:1eea:1::20,fc00:43f4:1eea:1::30 \
+ -profile=kubernetes \
+ etcd-csr.json | cfssljson -bare /etc/etcd/ssl/etcd
+```
+
+### 3.1.3将证书复制到其他节点
+
+```shell
+Master='k8s-master02 k8s-master03'
+for NODE in $Master; do ssh $NODE "mkdir -p /etc/etcd/ssl"; for FILE in etcd-ca-key.pem etcd-ca.pem etcd-key.pem etcd.pem; do scp /etc/etcd/ssl/${FILE} $NODE:/etc/etcd/ssl/${FILE}; done; done
+```
+
+## 3.2.生成k8s相关证书
+
+特别说明除外,以下操作在所有master节点操作
+
+### 3.2.1所有k8s节点创建证书存放目录
+
+```shell
+mkdir -p /etc/kubernetes/pki
+```
+
+### 3.2.2master01节点生成k8s证书
+
+```shell
+cfssl gencert -initca ca-csr.json | cfssljson -bare /etc/kubernetes/pki/ca
+
+# 生成一个根证书 ,多写了一些IP作为预留IP,为将来添加node做准备
+# 10.96.0.1是service网段的第一个地址,需要计算,192.168.8.66为高可用vip地址
+# 若没有IPv6 可删除可保留
+
+cfssl gencert \
+-ca=/etc/kubernetes/pki/ca.pem \
+-ca-key=/etc/kubernetes/pki/ca-key.pem \
+-config=ca-config.json \
+-hostname=10.96.0.1,192.168.8.66,127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.default.svc.cluster.local,x.oiox.cn,k.oiox.cn,l.oiox.cn,o.oiox.cn,192.168.1.61,192.168.1.62,192.168.1.63,192.168.1.64,192.168.1.65,192.168.8.66,192.168.1.67,192.168.1.68,192.168.1.69,192.168.1.70,fc00:43f4:1eea:1::10,fc00:43f4:1eea:1::20,fc00:43f4:1eea:1::30,fc00:43f4:1eea:1::40,fc00:43f4:1eea:1::50,fc00:43f4:1eea:1::60,fc00:43f4:1eea:1::70,fc00:43f4:1eea:1::80,fc00:43f4:1eea:1::90,fc00:43f4:1eea:1::100 \
+-profile=kubernetes apiserver-csr.json | cfssljson -bare /etc/kubernetes/pki/apiserver
+```
+
+### 3.2.3生成apiserver聚合证书
+
+```shell
+cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-ca
+
+# 有一个警告,可以忽略
+
+cfssl gencert \
+-ca=/etc/kubernetes/pki/front-proxy-ca.pem \
+-ca-key=/etc/kubernetes/pki/front-proxy-ca-key.pem \
+-config=ca-config.json \
+-profile=kubernetes front-proxy-client-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-client
+```
+
+### 3.2.4生成controller-manage的证书
+
+在《5.高可用配置》选择使用那种高可用方案
+若使用 haproxy、keepalived 那么为 `--server=https://192.168.8.66:8443`
+若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443`
+
+```shell
+cfssl gencert \
+ -ca=/etc/kubernetes/pki/ca.pem \
+ -ca-key=/etc/kubernetes/pki/ca-key.pem \
+ -config=ca-config.json \
+ -profile=kubernetes \
+ manager-csr.json | cfssljson -bare /etc/kubernetes/pki/controller-manager
+
+# 设置一个集群项
+
+# 在《5.高可用配置》选择使用那种高可用方案
+# 若使用 haproxy、keepalived 那么为 `--server=https://192.168.8.66:8443`
+# 若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443`
+
+kubectl config set-cluster kubernetes \
+ --certificate-authority=/etc/kubernetes/pki/ca.pem \
+ --embed-certs=true \
+ --server=https://127.0.0.1:8443 \
+ --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
+
+# 设置一个环境项,一个上下文
+
+kubectl config set-context system:kube-controller-manager@kubernetes \
+ --cluster=kubernetes \
+ --user=system:kube-controller-manager \
+ --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
+
+# 设置一个用户项
+
+kubectl config set-credentials system:kube-controller-manager \
+ --client-certificate=/etc/kubernetes/pki/controller-manager.pem \
+ --client-key=/etc/kubernetes/pki/controller-manager-key.pem \
+ --embed-certs=true \
+ --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
+
+# 设置默认环境
+
+kubectl config use-context system:kube-controller-manager@kubernetes \
+ --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
+
+cfssl gencert \
+ -ca=/etc/kubernetes/pki/ca.pem \
+ -ca-key=/etc/kubernetes/pki/ca-key.pem \
+ -config=ca-config.json \
+ -profile=kubernetes \
+ scheduler-csr.json | cfssljson -bare /etc/kubernetes/pki/scheduler
+
+# 在《5.高可用配置》选择使用那种高可用方案
+# 若使用 haproxy、keepalived 那么为 `--server=https://192.168.8.66:8443`
+# 若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443`
+
+kubectl config set-cluster kubernetes \
+ --certificate-authority=/etc/kubernetes/pki/ca.pem \
+ --embed-certs=true \
+ --server=https://127.0.0.1:8443 \
+ --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
+
+kubectl config set-credentials system:kube-scheduler \
+ --client-certificate=/etc/kubernetes/pki/scheduler.pem \
+ --client-key=/etc/kubernetes/pki/scheduler-key.pem \
+ --embed-certs=true \
+ --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
+
+kubectl config set-context system:kube-scheduler@kubernetes \
+ --cluster=kubernetes \
+ --user=system:kube-scheduler \
+ --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
+
+kubectl config use-context system:kube-scheduler@kubernetes \
+ --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
+
+cfssl gencert \
+ -ca=/etc/kubernetes/pki/ca.pem \
+ -ca-key=/etc/kubernetes/pki/ca-key.pem \
+ -config=ca-config.json \
+ -profile=kubernetes \
+ admin-csr.json | cfssljson -bare /etc/kubernetes/pki/admin
+
+# 在《5.高可用配置》选择使用那种高可用方案
+# 若使用 haproxy、keepalived 那么为 `--server=https://192.168.8.66:8443`
+# 若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443`
+
+kubectl config set-cluster kubernetes \
+ --certificate-authority=/etc/kubernetes/pki/ca.pem \
+ --embed-certs=true \
+ --server=https://127.0.0.1:8443 \
+ --kubeconfig=/etc/kubernetes/admin.kubeconfig
+
+kubectl config set-credentials kubernetes-admin \
+ --client-certificate=/etc/kubernetes/pki/admin.pem \
+ --client-key=/etc/kubernetes/pki/admin-key.pem \
+ --embed-certs=true \
+ --kubeconfig=/etc/kubernetes/admin.kubeconfig
+
+kubectl config set-context kubernetes-admin@kubernetes \
+ --cluster=kubernetes \
+ --user=kubernetes-admin \
+ --kubeconfig=/etc/kubernetes/admin.kubeconfig
+
+kubectl config use-context kubernetes-admin@kubernetes --kubeconfig=/etc/kubernetes/admin.kubeconfig
+```
+
+### 3.2.5创建kube-proxy证书
+
+在《5.高可用配置》选择使用那种高可用方案
+若使用 haproxy、keepalived 那么为 `--server=https://192.168.8.66:8443`
+若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443`
+
+```shell
+cfssl gencert \
+ -ca=/etc/kubernetes/pki/ca.pem \
+ -ca-key=/etc/kubernetes/pki/ca-key.pem \
+ -config=ca-config.json \
+ -profile=kubernetes \
+ kube-proxy-csr.json | cfssljson -bare /etc/kubernetes/pki/kube-proxy
+
+# 在《5.高可用配置》选择使用那种高可用方案
+# 若使用 haproxy、keepalived 那么为 `--server=https://192.168.8.66:8443`
+# 若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443`
+
+kubectl config set-cluster kubernetes \
+ --certificate-authority=/etc/kubernetes/pki/ca.pem \
+ --embed-certs=true \
+ --server=https://127.0.0.1:8443 \
+ --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
+
+kubectl config set-credentials kube-proxy \
+ --client-certificate=/etc/kubernetes/pki/kube-proxy.pem \
+ --client-key=/etc/kubernetes/pki/kube-proxy-key.pem \
+ --embed-certs=true \
+ --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
+
+kubectl config set-context kube-proxy@kubernetes \
+ --cluster=kubernetes \
+ --user=kube-proxy \
+ --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
+
+kubectl config use-context kube-proxy@kubernetes --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
+```
+
+
+
+### 3.2.5创建ServiceAccount Key ——secret
+
+```shell
+openssl genrsa -out /etc/kubernetes/pki/sa.key 2048
+openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub
+```
+
+### 3.2.6将证书发送到其他master节点
+
+```shell
+#其他节点创建目录
+# mkdir /etc/kubernetes/pki/ -p
+
+for NODE in k8s-master02 k8s-master03; do for FILE in $(ls /etc/kubernetes/pki | grep -v etcd); do scp /etc/kubernetes/pki/${FILE} $NODE:/etc/kubernetes/pki/${FILE}; done; for FILE in admin.kubeconfig controller-manager.kubeconfig scheduler.kubeconfig; do scp /etc/kubernetes/${FILE} $NODE:/etc/kubernetes/${FILE}; done; done
+```
+
+### 3.2.7查看证书
+
+```shell
+ls /etc/kubernetes/pki/
+admin.csr controller-manager.csr kube-proxy.csr
+admin-key.pem controller-manager-key.pem kube-proxy-key.pem
+admin.pem controller-manager.pem kube-proxy.pem
+apiserver.csr front-proxy-ca.csr sa.key
+apiserver-key.pem front-proxy-ca-key.pem sa.pub
+apiserver.pem front-proxy-ca.pem scheduler.csr
+ca.csr front-proxy-client.csr scheduler-key.pem
+ca-key.pem front-proxy-client-key.pem scheduler.pem
+ca.pem front-proxy-client.pem
+
+# 一共26个就对了
+ls /etc/kubernetes/pki/ |wc -l
+26
+```
+
+# 4.k8s系统组件配置
+
+## 4.1.etcd配置
+
+### 4.1.1master01配置
+
+```shell
+# 如果要用IPv6那么把IPv4地址修改为IPv6即可
+cat > /etc/etcd/etcd.config.yml << EOF
+name: 'k8s-master01'
+data-dir: /var/lib/etcd
+wal-dir: /var/lib/etcd/wal
+snapshot-count: 5000
+heartbeat-interval: 100
+election-timeout: 1000
+quota-backend-bytes: 0
+listen-peer-urls: 'https://192.168.1.61:2380'
+listen-client-urls: 'https://192.168.1.61:2379,http://127.0.0.1:2379'
+max-snapshots: 3
+max-wals: 5
+cors:
+initial-advertise-peer-urls: 'https://192.168.1.61:2380'
+advertise-client-urls: 'https://192.168.1.61:2379'
+discovery:
+discovery-fallback: 'proxy'
+discovery-proxy:
+discovery-srv:
+initial-cluster: 'k8s-master01=https://192.168.1.61:2380,k8s-master02=https://192.168.1.62:2380,k8s-master03=https://192.168.1.63:2380'
+initial-cluster-token: 'etcd-k8s-cluster'
+initial-cluster-state: 'new'
+strict-reconfig-check: false
+enable-v2: true
+enable-pprof: true
+proxy: 'off'
+proxy-failure-wait: 5000
+proxy-refresh-interval: 30000
+proxy-dial-timeout: 1000
+proxy-write-timeout: 5000
+proxy-read-timeout: 0
+client-transport-security:
+ cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
+ key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
+ client-cert-auth: true
+ trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
+ auto-tls: true
+peer-transport-security:
+ cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
+ key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
+ peer-client-cert-auth: true
+ trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
+ auto-tls: true
+debug: false
+log-package-levels:
+log-outputs: [default]
+force-new-cluster: false
+EOF
+```
+
+### 4.1.2master02配置
+
+```shell
+# 如果要用IPv6那么把IPv4地址修改为IPv6即可
+cat > /etc/etcd/etcd.config.yml << EOF
+name: 'k8s-master02'
+data-dir: /var/lib/etcd
+wal-dir: /var/lib/etcd/wal
+snapshot-count: 5000
+heartbeat-interval: 100
+election-timeout: 1000
+quota-backend-bytes: 0
+listen-peer-urls: 'https://192.168.1.62:2380'
+listen-client-urls: 'https://192.168.1.62:2379,http://127.0.0.1:2379'
+max-snapshots: 3
+max-wals: 5
+cors:
+initial-advertise-peer-urls: 'https://192.168.1.62:2380'
+advertise-client-urls: 'https://192.168.1.62:2379'
+discovery:
+discovery-fallback: 'proxy'
+discovery-proxy:
+discovery-srv:
+initial-cluster: 'k8s-master01=https://192.168.1.61:2380,k8s-master02=https://192.168.1.62:2380,k8s-master03=https://192.168.1.63:2380'
+initial-cluster-token: 'etcd-k8s-cluster'
+initial-cluster-state: 'new'
+strict-reconfig-check: false
+enable-v2: true
+enable-pprof: true
+proxy: 'off'
+proxy-failure-wait: 5000
+proxy-refresh-interval: 30000
+proxy-dial-timeout: 1000
+proxy-write-timeout: 5000
+proxy-read-timeout: 0
+client-transport-security:
+ cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
+ key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
+ client-cert-auth: true
+ trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
+ auto-tls: true
+peer-transport-security:
+ cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
+ key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
+ peer-client-cert-auth: true
+ trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
+ auto-tls: true
+debug: false
+log-package-levels:
+log-outputs: [default]
+force-new-cluster: false
+EOF
+```
+
+### 4.1.3master03配置
+
+```shell
+# 如果要用IPv6那么把IPv4地址修改为IPv6即可
+cat > /etc/etcd/etcd.config.yml << EOF
+name: 'k8s-master03'
+data-dir: /var/lib/etcd
+wal-dir: /var/lib/etcd/wal
+snapshot-count: 5000
+heartbeat-interval: 100
+election-timeout: 1000
+quota-backend-bytes: 0
+listen-peer-urls: 'https://192.168.1.63:2380'
+listen-client-urls: 'https://192.168.1.63:2379,http://127.0.0.1:2379'
+max-snapshots: 3
+max-wals: 5
+cors:
+initial-advertise-peer-urls: 'https://192.168.1.63:2380'
+advertise-client-urls: 'https://192.168.1.63:2379'
+discovery:
+discovery-fallback: 'proxy'
+discovery-proxy:
+discovery-srv:
+initial-cluster: 'k8s-master01=https://192.168.1.61:2380,k8s-master02=https://192.168.1.62:2380,k8s-master03=https://192.168.1.63:2380'
+initial-cluster-token: 'etcd-k8s-cluster'
+initial-cluster-state: 'new'
+strict-reconfig-check: false
+enable-v2: true
+enable-pprof: true
+proxy: 'off'
+proxy-failure-wait: 5000
+proxy-refresh-interval: 30000
+proxy-dial-timeout: 1000
+proxy-write-timeout: 5000
+proxy-read-timeout: 0
+client-transport-security:
+ cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
+ key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
+ client-cert-auth: true
+ trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
+ auto-tls: true
+peer-transport-security:
+ cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
+ key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
+ peer-client-cert-auth: true
+ trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
+ auto-tls: true
+debug: false
+log-package-levels:
+log-outputs: [default]
+force-new-cluster: false
+EOF
+```
+
+## 4.2.创建service(所有master节点操作)
+
+### 4.2.1创建etcd.service并启动
+
+```shell
+cat > /usr/lib/systemd/system/etcd.service << EOF
+
+[Unit]
+Description=Etcd Service
+Documentation=https://coreos.com/etcd/docs/latest/
+After=network.target
+
+[Service]
+Type=notify
+ExecStart=/usr/local/bin/etcd --config-file=/etc/etcd/etcd.config.yml
+Restart=on-failure
+RestartSec=10
+LimitNOFILE=65536
+
+[Install]
+WantedBy=multi-user.target
+Alias=etcd3.service
+
+EOF
+```
+
+### 4.2.2创建etcd证书目录
+
+```shell
+mkdir /etc/kubernetes/pki/etcd
+ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/
+systemctl daemon-reload
+systemctl enable --now etcd
+```
+
+### 4.2.3查看etcd状态
+
+```shell
+# 如果要用IPv6那么把IPv4地址修改为IPv6即可
+export ETCDCTL_API=3
+etcdctl --endpoints="192.168.1.63:2379,192.168.1.62:2379,192.168.1.61:2379" --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem endpoint status --write-out=table
++----------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
+| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
++----------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
+| 192.168.1.63:2379 | c0c8142615b9523f | 3.5.6 | 20 kB | false | false | 2 | 9 | 9 | |
+| 192.168.1.62:2379 | de8396604d2c160d | 3.5.6 | 20 kB | false | false | 2 | 9 | 9 | |
+| 192.168.1.61:2379 | 33c9d6df0037ab97 | 3.5.6 | 20 kB | true | false | 2 | 9 | 9 | |
++----------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
+
+[root@k8s-master01 pki]#
+```
+
+# 5.高可用配置(在Master服务器上操作)
+
+**注意* 5.1.1 和5.1.2 二选一即可**
+
+选择使用那种高可用方案
+
+在《3.2.生成k8s相关证书》
+
+若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443`
+若使用 haproxy、keepalived 那么为 `--server=https://192.168.8.66:8443`
+
+## 5.1 NGINX高可用方案 (推荐)
+
+### 5.1.1自己手动编译
+在所有节点执行
+```shell
+# 安装编译环境
+yum install gcc -y
+
+# 下载解压nginx二进制文件
+wget http://nginx.org/download/nginx-1.22.1.tar.gz
+tar xvf nginx-*.tar.gz
+cd nginx-*
+
+# 进行编译
+./configure --with-stream --without-http --without-http_uwsgi_module --without-http_scgi_module --without-http_fastcgi_module
+make && make install
+```
+
+
+### 5.1.2使用我编译好的
+```shell
+# 使用我编译好的
+
+cd kubernetes-v1.26.0/cby
+# 拷贝我编译好的nginx
+node='k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02'
+for NODE in $node; do scp nginx.tar $NODE:/usr/local/; done
+
+# 其他节点上执行
+cd /usr/local/
+tar xvf nginx.tar
+```
+
+### 5.1.3写入启动配置
+在所有主机上执行
+```shell
+# 写入nginx配置文件
+cat > /usr/local/nginx/conf/kube-nginx.conf < /etc/systemd/system/kube-nginx.service </etc/haproxy/haproxy.cfg<<"EOF"
+global
+ maxconn 2000
+ ulimit-n 16384
+ log 127.0.0.1 local0 err
+ stats timeout 30s
+
+defaults
+ log global
+ mode http
+ option httplog
+ timeout connect 5000
+ timeout client 50000
+ timeout server 50000
+ timeout http-request 15s
+ timeout http-keep-alive 15s
+
+
+frontend monitor-in
+ bind *:33305
+ mode http
+ option httplog
+ monitor-uri /monitor
+
+frontend k8s-master
+ bind 0.0.0.0:8443
+ bind 127.0.0.1:8443
+ mode tcp
+ option tcplog
+ tcp-request inspect-delay 5s
+ default_backend k8s-master
+
+
+backend k8s-master
+ mode tcp
+ option tcplog
+ option tcp-check
+ balance roundrobin
+ default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
+ server k8s-master01 192.168.1.61:6443 check
+ server k8s-master02 192.168.1.62:6443 check
+ server k8s-master03 192.168.1.63:6443 check
+EOF
+```
+
+### 5.2.3Master01配置keepalived master节点
+
+```shell
+#cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
+
+cat > /etc/keepalived/keepalived.conf << EOF
+! Configuration File for keepalived
+
+global_defs {
+ router_id LVS_DEVEL
+}
+vrrp_script chk_apiserver {
+ script "/etc/keepalived/check_apiserver.sh"
+ interval 5
+ weight -5
+ fall 2
+ rise 1
+}
+vrrp_instance VI_1 {
+ state MASTER
+ # 注意网卡名
+ interface eth0
+ mcast_src_ip 192.168.1.61
+ virtual_router_id 51
+ priority 100
+ nopreempt
+ advert_int 2
+ authentication {
+ auth_type PASS
+ auth_pass K8SHA_KA_AUTH
+ }
+ virtual_ipaddress {
+ 192.168.8.66
+ }
+ track_script {
+ chk_apiserver
+} }
+
+EOF
+```
+
+### 5.2.4Master02配置keepalived backup节点
+
+```shell
+# cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
+
+cat > /etc/keepalived/keepalived.conf << EOF
+! Configuration File for keepalived
+
+global_defs {
+ router_id LVS_DEVEL
+}
+vrrp_script chk_apiserver {
+ script "/etc/keepalived/check_apiserver.sh"
+ interval 5
+ weight -5
+ fall 2
+ rise 1
+
+}
+vrrp_instance VI_1 {
+ state BACKUP
+ # 注意网卡名
+ interface eth0
+ mcast_src_ip 192.168.1.62
+ virtual_router_id 51
+ priority 80
+ nopreempt
+ advert_int 2
+ authentication {
+ auth_type PASS
+ auth_pass K8SHA_KA_AUTH
+ }
+ virtual_ipaddress {
+ 192.168.8.66
+ }
+ track_script {
+ chk_apiserver
+} }
+
+EOF
+```
+
+### 5.2.5Master03配置keepalived backup节点
+
+```shell
+# cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
+
+cat > /etc/keepalived/keepalived.conf << EOF
+! Configuration File for keepalived
+
+global_defs {
+ router_id LVS_DEVEL
+}
+vrrp_script chk_apiserver {
+ script "/etc/keepalived/check_apiserver.sh"
+ interval 5
+ weight -5
+ fall 2
+ rise 1
+
+}
+vrrp_instance VI_1 {
+ state BACKUP
+ # 注意网卡名
+ interface eth0
+ mcast_src_ip 192.168.1.63
+ virtual_router_id 51
+ priority 50
+ nopreempt
+ advert_int 2
+ authentication {
+ auth_type PASS
+ auth_pass K8SHA_KA_AUTH
+ }
+ virtual_ipaddress {
+ 192.168.8.66
+ }
+ track_script {
+ chk_apiserver
+} }
+
+EOF
+```
+
+
+
+### 5.2.6健康检查脚本配置(两台lb主机)
+
+```shell
+cat > /etc/keepalived/check_apiserver.sh << EOF
+#!/bin/bash
+
+err=0
+for k in \$(seq 1 3)
+do
+ check_code=\$(pgrep haproxy)
+ if [[ \$check_code == "" ]]; then
+ err=\$(expr \$err + 1)
+ sleep 1
+ continue
+ else
+ err=0
+ break
+ fi
+done
+
+if [[ \$err != "0" ]]; then
+ echo "systemctl stop keepalived"
+ /usr/bin/systemctl stop keepalived
+ exit 1
+else
+ exit 0
+fi
+EOF
+
+# 给脚本授权
+
+chmod +x /etc/keepalived/check_apiserver.sh
+```
+
+### 5.2.7启动服务
+
+```shell
+systemctl daemon-reload
+systemctl enable --now haproxy
+systemctl enable --now keepalived
+```
+
+### 5.2.8测试高可用
+
+```shell
+# 能ping同
+
+[root@k8s-node02 ~]# ping 192.168.8.66
+
+# 能telnet访问
+
+[root@k8s-node02 ~]# telnet 192.168.8.66 8443
+
+# 关闭主节点,看vip是否漂移到备节点
+```
+
+# 6.k8s组件配置(区别于第4点)
+
+所有k8s节点创建以下目录
+
+```shell
+mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes
+```
+
+## 6.1.创建apiserver(所有master节点)
+
+### 6.1.1master01节点配置
+
+```shell
+cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
+
+[Unit]
+Description=Kubernetes API Server
+Documentation=https://github.com/kubernetes/kubernetes
+After=network.target
+
+[Service]
+ExecStart=/usr/local/bin/kube-apiserver \\
+ --v=2 \\
+ --allow-privileged=true \\
+ --bind-address=0.0.0.0 \\
+ --secure-port=6443 \\
+ --advertise-address=192.168.1.61 \\
+ --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\
+ --service-node-port-range=30000-32767 \\
+ --etcd-servers=https://192.168.1.61:2379,https://192.168.1.62:2379,https://192.168.1.63:2379 \\
+ --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \\
+ --etcd-certfile=/etc/etcd/ssl/etcd.pem \\
+ --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \\
+ --client-ca-file=/etc/kubernetes/pki/ca.pem \\
+ --tls-cert-file=/etc/kubernetes/pki/apiserver.pem \\
+ --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \\
+ --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \\
+ --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \\
+ --service-account-key-file=/etc/kubernetes/pki/sa.pub \\
+ --service-account-signing-key-file=/etc/kubernetes/pki/sa.key \\
+ --service-account-issuer=https://kubernetes.default.svc.cluster.local \\
+ --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \\
+ --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \
+ --authorization-mode=Node,RBAC \\
+ --enable-bootstrap-token-auth=true \\
+ --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \\
+ --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \\
+ --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \\
+ --requestheader-allowed-names=aggregator \\
+ --requestheader-group-headers=X-Remote-Group \\
+ --requestheader-extra-headers-prefix=X-Remote-Extra- \\
+ --requestheader-username-headers=X-Remote-User \\
+ --enable-aggregator-routing=true
+ # --feature-gates=IPv6DualStack=true
+ # --token-auth-file=/etc/kubernetes/token.csv
+
+Restart=on-failure
+RestartSec=10s
+LimitNOFILE=65535
+
+[Install]
+WantedBy=multi-user.target
+
+EOF
+```
+
+### 6.1.2master02节点配置
+
+```shell
+cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
+[Unit]
+Description=Kubernetes API Server
+Documentation=https://github.com/kubernetes/kubernetes
+After=network.target
+
+[Service]
+ExecStart=/usr/local/bin/kube-apiserver \\
+ --v=2 \\
+ --allow-privileged=true \\
+ --bind-address=0.0.0.0 \\
+ --secure-port=6443 \\
+ --advertise-address=192.168.1.62 \\
+ --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\
+ --service-node-port-range=30000-32767 \\
+ --etcd-servers=https://192.168.1.61:2379,https://192.168.1.62:2379,https://192.168.1.63:2379 \\
+ --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \\
+ --etcd-certfile=/etc/etcd/ssl/etcd.pem \\
+ --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \\
+ --client-ca-file=/etc/kubernetes/pki/ca.pem \\
+ --tls-cert-file=/etc/kubernetes/pki/apiserver.pem \\
+ --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \\
+ --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \\
+ --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \\
+ --service-account-key-file=/etc/kubernetes/pki/sa.pub \\
+ --service-account-signing-key-file=/etc/kubernetes/pki/sa.key \\
+ --service-account-issuer=https://kubernetes.default.svc.cluster.local \\
+ --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \\
+ --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \\
+ --authorization-mode=Node,RBAC \\
+ --enable-bootstrap-token-auth=true \\
+ --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \\
+ --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \\
+ --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \\
+ --requestheader-allowed-names=aggregator \\
+ --requestheader-group-headers=X-Remote-Group \\
+ --requestheader-extra-headers-prefix=X-Remote-Extra- \\
+ --requestheader-username-headers=X-Remote-User \\
+ --enable-aggregator-routing=true
+ # --feature-gates=IPv6DualStack=true
+ # --token-auth-file=/etc/kubernetes/token.csv
+
+Restart=on-failure
+RestartSec=10s
+LimitNOFILE=65535
+
+[Install]
+WantedBy=multi-user.target
+
+EOF
+```
+
+### 6.1.3master03节点配置
+
+```shell
+cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
+
+[Unit]
+Description=Kubernetes API Server
+Documentation=https://github.com/kubernetes/kubernetes
+After=network.target
+
+[Service]
+ExecStart=/usr/local/bin/kube-apiserver \\
+ --v=2 \\
+ --allow-privileged=true \\
+ --bind-address=0.0.0.0 \\
+ --secure-port=6443 \\
+ --advertise-address=192.168.1.63 \\
+ --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\
+ --service-node-port-range=30000-32767 \\
+ --etcd-servers=https://192.168.1.61:2379,https://192.168.1.62:2379,https://192.168.1.63:2379 \\
+ --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \\
+ --etcd-certfile=/etc/etcd/ssl/etcd.pem \\
+ --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \\
+ --client-ca-file=/etc/kubernetes/pki/ca.pem \\
+ --tls-cert-file=/etc/kubernetes/pki/apiserver.pem \\
+ --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \\
+ --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \\
+ --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \\
+ --service-account-key-file=/etc/kubernetes/pki/sa.pub \\
+ --service-account-signing-key-file=/etc/kubernetes/pki/sa.key \\
+ --service-account-issuer=https://kubernetes.default.svc.cluster.local \\
+ --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \\
+ --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \\
+ --authorization-mode=Node,RBAC \\
+ --enable-bootstrap-token-auth=true \\
+ --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \\
+ --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \\
+ --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \\
+ --requestheader-allowed-names=aggregator \\
+ --requestheader-group-headers=X-Remote-Group \\
+ --requestheader-extra-headers-prefix=X-Remote-Extra- \\
+ --requestheader-username-headers=X-Remote-User \\
+ --enable-aggregator-routing=true
+ # --feature-gates=IPv6DualStack=true
+ # --token-auth-file=/etc/kubernetes/token.csv
+
+Restart=on-failure
+RestartSec=10s
+LimitNOFILE=65535
+
+[Install]
+WantedBy=multi-user.target
+
+EOF
+```
+
+### 6.1.4启动apiserver(所有master节点)
+
+```shell
+systemctl daemon-reload && systemctl enable --now kube-apiserver
+
+# 注意查看状态是否启动正常
+# systemctl status kube-apiserver
+```
+
+## 6.2.配置kube-controller-manager service
+
+```shell
+# 所有master节点配置,且配置相同
+# 172.16.0.0/12为pod网段,按需求设置你自己的网段
+
+cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF
+
+[Unit]
+Description=Kubernetes Controller Manager
+Documentation=https://github.com/kubernetes/kubernetes
+After=network.target
+
+[Service]
+ExecStart=/usr/local/bin/kube-controller-manager \\
+ --v=2 \\
+ --bind-address=127.0.0.1 \\
+ --root-ca-file=/etc/kubernetes/pki/ca.pem \\
+ --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \\
+ --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \\
+ --service-account-private-key-file=/etc/kubernetes/pki/sa.key \\
+ --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \\
+ --leader-elect=true \\
+ --use-service-account-credentials=true \\
+ --node-monitor-grace-period=40s \\
+ --node-monitor-period=5s \\
+ --pod-eviction-timeout=2m0s \\
+ --controllers=*,bootstrapsigner,tokencleaner \\
+ --allocate-node-cidrs=true \\
+ --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\
+ --cluster-cidr=172.16.0.0/12,fc00:2222::/112 \\
+ --node-cidr-mask-size-ipv4=24 \\
+ --node-cidr-mask-size-ipv6=120 \\
+ --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem
+ # --feature-gates=IPv6DualStack=true
+
+Restart=always
+RestartSec=10s
+
+[Install]
+WantedBy=multi-user.target
+
+EOF
+```
+
+### 6.2.1启动kube-controller-manager,并查看状态
+
+```shell
+systemctl daemon-reload
+systemctl enable --now kube-controller-manager
+# systemctl status kube-controller-manager
+```
+
+## 6.3.配置kube-scheduler service
+
+### 6.3.1所有master节点配置,且配置相同
+
+```shell
+cat > /usr/lib/systemd/system/kube-scheduler.service << EOF
+
+[Unit]
+Description=Kubernetes Scheduler
+Documentation=https://github.com/kubernetes/kubernetes
+After=network.target
+
+[Service]
+ExecStart=/usr/local/bin/kube-scheduler \\
+ --v=2 \\
+ --bind-address=127.0.0.1 \\
+ --leader-elect=true \\
+ --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
+
+Restart=always
+RestartSec=10s
+
+[Install]
+WantedBy=multi-user.target
+
+EOF
+```
+
+### 6.3.2启动并查看服务状态
+
+```shell
+systemctl daemon-reload
+systemctl enable --now kube-scheduler
+# systemctl status kube-scheduler
+```
+
+# 7.TLS Bootstrapping配置
+
+## 7.1在master01上配置
+
+```shell
+# 在《5.高可用配置》选择使用那种高可用方案
+# 若使用 haproxy、keepalived 那么为 `--server=https://192.168.8.66:8443`
+# 若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443`
+
+cd bootstrap
+
+kubectl config set-cluster kubernetes \
+--certificate-authority=/etc/kubernetes/pki/ca.pem \
+--embed-certs=true --server=https://127.0.0.1:8443 \
+--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
+
+kubectl config set-credentials tls-bootstrap-token-user \
+--token=c8ad9c.2e4d610cf3e7426e \
+--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
+
+kubectl config set-context tls-bootstrap-token-user@kubernetes \
+--cluster=kubernetes \
+--user=tls-bootstrap-token-user \
+--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
+
+kubectl config use-context tls-bootstrap-token-user@kubernetes \
+--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
+
+# token的位置在bootstrap.secret.yaml,如果修改的话到这个文件修改
+mkdir -p /root/.kube ; cp /etc/kubernetes/admin.kubeconfig /root/.kube/config
+```
+
+## 7.2查看集群状态,没问题的话继续后续操作
+
+```shell
+kubectl get cs
+Warning: v1 ComponentStatus is deprecated in v1.19+
+NAME STATUS MESSAGE ERROR
+scheduler Healthy ok
+controller-manager Healthy ok
+etcd-0 Healthy {"health":"true","reason":""}
+etcd-2 Healthy {"health":"true","reason":""}
+etcd-1 Healthy {"health":"true","reason":""}
+
+# 切记执行,别忘记!!!
+kubectl create -f bootstrap.secret.yaml
+```
+
+# 8.node节点配置
+
+## 8.1.在master01上将证书复制到node节点
+
+```shell
+cd /etc/kubernetes/
+
+for NODE in k8s-master02 k8s-master03 k8s-node01 k8s-node02; do ssh $NODE mkdir -p /etc/kubernetes/pki; for FILE in pki/ca.pem pki/ca-key.pem pki/front-proxy-ca.pem bootstrap-kubelet.kubeconfig kube-proxy.kubeconfig; do scp /etc/kubernetes/$FILE $NODE:/etc/kubernetes/${FILE}; done; done
+```
+
+## 8.2.kubelet配置
+
+**注意 : 8.2.1 和 8.2.2 需要和 上方 2.1 和 2.2 对应起来**
+
+### 8.2.1当使用docker作为Runtime(暂不支持)
+
+v1.26.0 暂时不支持docker方式
+
+```shell
+cat > /usr/lib/systemd/system/kubelet.service << EOF
+
+[Unit]
+Description=Kubernetes Kubelet
+Documentation=https://github.com/kubernetes/kubernetes
+
+[Service]
+ExecStart=/usr/local/bin/kubelet \\
+ --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig \\
+ --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\
+ --config=/etc/kubernetes/kubelet-conf.yml \\
+ --container-runtime-endpoint=unix:///run/cri-dockerd.sock \\
+ --node-labels=node.kubernetes.io/node=
+
+[Install]
+WantedBy=multi-user.target
+EOF
+```
+
+### 8.2.2当使用Containerd作为Runtime (推荐)
+
+```shell
+mkdir -p /var/lib/kubelet /var/log/kubernetes /etc/systemd/system/kubelet.service.d /etc/kubernetes/manifests/
+
+# 所有k8s节点配置kubelet service
+cat > /usr/lib/systemd/system/kubelet.service << EOF
+
+[Unit]
+Description=Kubernetes Kubelet
+Documentation=https://github.com/kubernetes/kubernetes
+After=containerd.service
+Requires=containerd.service
+
+[Service]
+ExecStart=/usr/local/bin/kubelet \\
+ --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig \\
+ --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\
+ --config=/etc/kubernetes/kubelet-conf.yml \\
+ --container-runtime-endpoint=unix:///run/containerd/containerd.sock \\
+ --node-labels=node.kubernetes.io/node=
+ # --feature-gates=IPv6DualStack=true
+ # --container-runtime=remote
+ # --runtime-request-timeout=15m
+ # --cgroup-driver=systemd
+
+[Install]
+WantedBy=multi-user.target
+EOF
+```
+
+
+### 8.2.3所有k8s节点创建kubelet的配置文件
+
+```shell
+cat > /etc/kubernetes/kubelet-conf.yml < 18s v1.26.0
+k8s-master02 Ready 16s v1.26.0
+k8s-master03 Ready 16s v1.26.0
+k8s-node01 Ready 14s v1.26.0
+k8s-node02 Ready 14s v1.26.0
+[root@k8s-master01 ~]#
+```
+
+## 8.3.kube-proxy配置
+
+### 8.3.1将kubeconfig发送至其他节点
+
+```shell
+for NODE in k8s-master02 k8s-master03; do scp /etc/kubernetes/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig; done
+
+for NODE in k8s-node01 k8s-node02; do scp /etc/kubernetes/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig; done
+```
+
+### 8.3.2所有k8s节点添加kube-proxy的service文件
+
+```shell
+cat > /usr/lib/systemd/system/kube-proxy.service << EOF
+[Unit]
+Description=Kubernetes Kube Proxy
+Documentation=https://github.com/kubernetes/kubernetes
+After=network.target
+
+[Service]
+ExecStart=/usr/local/bin/kube-proxy \\
+ --config=/etc/kubernetes/kube-proxy.yaml \\
+ --v=2
+
+Restart=always
+RestartSec=10s
+
+[Install]
+WantedBy=multi-user.target
+
+EOF
+```
+
+### 8.3.3所有k8s节点添加kube-proxy的配置
+
+```shell
+cat > /etc/kubernetes/kube-proxy.yaml << EOF
+apiVersion: kubeproxy.config.k8s.io/v1alpha1
+bindAddress: 0.0.0.0
+clientConnection:
+ acceptContentTypes: ""
+ burst: 10
+ contentType: application/vnd.kubernetes.protobuf
+ kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
+ qps: 5
+clusterCIDR: 172.16.0.0/12,fc00:2222::/112
+configSyncPeriod: 15m0s
+conntrack:
+ max: null
+ maxPerCore: 32768
+ min: 131072
+ tcpCloseWaitTimeout: 1h0m0s
+ tcpEstablishedTimeout: 24h0m0s
+enableProfiling: false
+healthzBindAddress: 0.0.0.0:10256
+hostnameOverride: ""
+iptables:
+ masqueradeAll: false
+ masqueradeBit: 14
+ minSyncPeriod: 0s
+ syncPeriod: 30s
+ipvs:
+ masqueradeAll: true
+ minSyncPeriod: 5s
+ scheduler: "rr"
+ syncPeriod: 30s
+kind: KubeProxyConfiguration
+metricsBindAddress: 127.0.0.1:10249
+mode: "ipvs"
+nodePortAddresses: null
+oomScoreAdj: -999
+portRange: ""
+udpIdleTimeout: 250ms
+
+EOF
+```
+
+### 8.3.4启动kube-proxy
+
+```shell
+ systemctl daemon-reload
+ systemctl restart kube-proxy
+ systemctl enable --now kube-proxy
+```
+
+# 9.安装网络插件
+
+**注意 9.1 和 9.2 二选其一即可,建议在此处创建好快照后在进行操作,后续出问题可以回滚**
+
+** centos7 要升级libseccomp 不然 无法安装网络插件**
+
+```shell
+# https://github.com/opencontainers/runc/releases
+# 升级runc
+wget https://ghproxy.com/https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.amd64
+install -m 755 runc.amd64 /usr/local/sbin/runc
+cp -p /usr/local/sbin/runc /usr/local/bin/runc
+cp -p /usr/local/sbin/runc /usr/bin/runc
+
+#下载高于2.4以上的包
+yum -y install http://rpmfind.net/linux/centos/8-stream/BaseOS/x86_64/os/Packages/libseccomp-2.5.1-1.el8.x86_64.rpm
+
+
+#查看当前版本
+[root@k8s-master-1 ~]# rpm -qa | grep libseccomp
+libseccomp-2.5.1-1.el8.x86_64
+
+
+```
+
+## 9.1安装Calico
+
+### 9.1.1更改calico网段
+
+```shell
+# 本地没有公网 IPv6 使用 calico.yaml
+kubectl apply -f calico.yaml
+
+# 本地有公网 IPv6 使用 calico-ipv6.yaml
+# kubectl apply -f calico-ipv6.yaml
+
+# 若docker镜像拉不下来,可以使用我的仓库
+# sed -i "s#docker.io/calico/#registry.cn-hangzhou.aliyuncs.com/chenby/#g" calico.yaml
+# sed -i "s#docker.io/calico/#registry.cn-hangzhou.aliyuncs.com/chenby/#g" calico-ipv6.yaml
+```
+
+### 9.1.2查看容器状态
+
+```shell
+# calico 初始化会很慢 需要耐心等待一下,大约十分钟左右
+[root@k8s-master01 ~]# kubectl get pod -A
+NAMESPACE NAME READY STATUS RESTARTS AGE
+kube-system calico-kube-controllers-6747f75cdc-fbvvc 1/1 Running 0 61s
+kube-system calico-node-fs7hl 1/1 Running 0 61s
+kube-system calico-node-jqz58 1/1 Running 0 61s
+kube-system calico-node-khjlg 1/1 Running 0 61s
+kube-system calico-node-wmf8q 1/1 Running 0 61s
+kube-system calico-node-xc6gn 1/1 Running 0 61s
+kube-system calico-typha-6cdc4b4fbc-57snb 1/1 Running 0 61s
+```
+
+## 9.2 安装cilium
+
+### 9.2.1 安装helm
+
+```shell
+# [root@k8s-master01 ~]# curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
+# [root@k8s-master01 ~]# chmod 700 get_helm.sh
+# [root@k8s-master01 ~]# ./get_helm.sh
+
+wget https://get.helm.sh/helm-canary-linux-amd64.tar.gz
+tar xvf helm-canary-linux-amd64.tar.tar
+cp linux-amd64/helm /usr/local/bin/
+```
+
+### 9.2.2 安装cilium
+
+```shell
+# 添加源
+helm repo add cilium https://helm.cilium.io
+
+# 默认参数安装
+helm install cilium cilium/cilium --namespace kube-system
+
+# 启用ipv6
+# helm install cilium cilium/cilium --namespace kube-system --set ipv6.enabled=true
+
+# 启用路由信息和监控插件
+# helm install cilium cilium/cilium --namespace kube-system --set hubble.relay.enabled=true --set hubble.ui.enabled=true --set prometheus.enabled=true --set operator.prometheus.enabled=true --set hubble.enabled=true --set hubble.metrics.enabled="{dns,drop,tcp,flow,port-distribution,icmp,http}"
+
+```
+
+### 9.2.3 查看
+
+```shell
+[root@k8s-master01 ~]# kubectl get pod -A | grep cil
+kube-system cilium-gmr6c 1/1 Running 0 5m3s
+kube-system cilium-kzgdj 1/1 Running 0 5m3s
+kube-system cilium-operator-69b677f97c-6pw4k 1/1 Running 0 5m3s
+kube-system cilium-operator-69b677f97c-xzzdk 1/1 Running 0 5m3s
+kube-system cilium-q2rnr 1/1 Running 0 5m3s
+kube-system cilium-smx5v 1/1 Running 0 5m3s
+kube-system cilium-tdjq4 1/1 Running 0 5m3s
+[root@k8s-master01 ~]#
+```
+
+### 9.2.4 下载专属监控面板
+
+```shell
+[root@k8s-master01 yaml]# wget https://raw.githubusercontent.com/cilium/cilium/1.12.1/examples/kubernetes/addons/prometheus/monitoring-example.yaml
+[root@k8s-master01 yaml]#
+[root@k8s-master01 yaml]# kubectl apply -f monitoring-example.yaml
+namespace/cilium-monitoring created
+serviceaccount/prometheus-k8s created
+configmap/grafana-config created
+configmap/grafana-cilium-dashboard created
+configmap/grafana-cilium-operator-dashboard created
+configmap/grafana-hubble-dashboard created
+configmap/prometheus created
+clusterrole.rbac.authorization.k8s.io/prometheus created
+clusterrolebinding.rbac.authorization.k8s.io/prometheus created
+service/grafana created
+service/prometheus created
+deployment.apps/grafana created
+deployment.apps/prometheus created
+[root@k8s-master01 yaml]#
+```
+
+### 9.2.5 下载部署测试用例
+
+```shell
+[root@k8s-master01 yaml]# wget https://raw.githubusercontent.com/cilium/cilium/master/examples/kubernetes/connectivity-check/connectivity-check.yaml
+
+[root@k8s-master01 yaml]# sed -i "s#google.com#oiox.cn#g" connectivity-check.yaml
+
+[root@k8s-master01 yaml]# kubectl apply -f connectivity-check.yaml
+deployment.apps/echo-a created
+deployment.apps/echo-b created
+deployment.apps/echo-b-host created
+deployment.apps/pod-to-a created
+deployment.apps/pod-to-external-1111 created
+deployment.apps/pod-to-a-denied-cnp created
+deployment.apps/pod-to-a-allowed-cnp created
+deployment.apps/pod-to-external-fqdn-allow-google-cnp created
+deployment.apps/pod-to-b-multi-node-clusterip created
+deployment.apps/pod-to-b-multi-node-headless created
+deployment.apps/host-to-b-multi-node-clusterip created
+deployment.apps/host-to-b-multi-node-headless created
+deployment.apps/pod-to-b-multi-node-nodeport created
+deployment.apps/pod-to-b-intra-node-nodeport created
+service/echo-a created
+service/echo-b created
+service/echo-b-headless created
+service/echo-b-host-headless created
+ciliumnetworkpolicy.cilium.io/pod-to-a-denied-cnp created
+ciliumnetworkpolicy.cilium.io/pod-to-a-allowed-cnp created
+ciliumnetworkpolicy.cilium.io/pod-to-external-fqdn-allow-google-cnp created
+[root@k8s-master01 yaml]#
+```
+
+### 9.2.6 查看pod
+
+```shell
+[root@k8s-master01 yaml]# kubectl get pod -A
+NAMESPACE NAME READY STATUS RESTARTS AGE
+cilium-monitoring grafana-59957b9549-6zzqh 1/1 Running 0 10m
+cilium-monitoring prometheus-7c8c9684bb-4v9cl 1/1 Running 0 10m
+default chenby-75b5d7fbfb-7zjsr 1/1 Running 0 27h
+default chenby-75b5d7fbfb-hbvr8 1/1 Running 0 27h
+default chenby-75b5d7fbfb-ppbzg 1/1 Running 0 27h
+default echo-a-6799dff547-pnx6w 1/1 Running 0 10m
+default echo-b-fc47b659c-4bdg9 1/1 Running 0 10m
+default echo-b-host-67fcfd59b7-28r9s 1/1 Running 0 10m
+default host-to-b-multi-node-clusterip-69c57975d6-z4j2z 1/1 Running 0 10m
+default host-to-b-multi-node-headless-865899f7bb-frrmc 1/1 Running 0 10m
+default pod-to-a-allowed-cnp-5f9d7d4b9d-hcd8x 1/1 Running 0 10m
+default pod-to-a-denied-cnp-65cc5ff97b-2rzb8 1/1 Running 0 10m
+default pod-to-a-dfc64f564-p7xcn 1/1 Running 0 10m
+default pod-to-b-intra-node-nodeport-677868746b-trk2l 1/1 Running 0 10m
+default pod-to-b-multi-node-clusterip-76bbbc677b-knfq2 1/1 Running 0 10m
+default pod-to-b-multi-node-headless-698c6579fd-mmvd7 1/1 Running 0 10m
+default pod-to-b-multi-node-nodeport-5dc4b8cfd6-8dxmz 1/1 Running 0 10m
+default pod-to-external-1111-8459965778-pjt9b 1/1 Running 0 10m
+default pod-to-external-fqdn-allow-google-cnp-64df9fb89b-l9l4q 1/1 Running 0 10m
+kube-system cilium-7rfj6 1/1 Running 0 56s
+kube-system cilium-d4cch 1/1 Running 0 56s
+kube-system cilium-h5x8r 1/1 Running 0 56s
+kube-system cilium-operator-5dbddb6dbf-flpl5 1/1 Running 0 56s
+kube-system cilium-operator-5dbddb6dbf-gcznc 1/1 Running 0 56s
+kube-system cilium-t2xlz 1/1 Running 0 56s
+kube-system cilium-z65z7 1/1 Running 0 56s
+kube-system coredns-665475b9f8-jkqn8 1/1 Running 1 (36h ago) 36h
+kube-system hubble-relay-59d8575-9pl9z 1/1 Running 0 56s
+kube-system hubble-ui-64d4995d57-nsv9j 2/2 Running 0 56s
+kube-system metrics-server-776f58c94b-c6zgs 1/1 Running 1 (36h ago) 37h
+[root@k8s-master01 yaml]#
+```
+
+### 9.2.7 修改为NodePort
+
+```shell
+[root@k8s-master01 yaml]# kubectl edit svc -n kube-system hubble-ui
+service/hubble-ui edited
+[root@k8s-master01 yaml]#
+[root@k8s-master01 yaml]# kubectl edit svc -n cilium-monitoring grafana
+service/grafana edited
+[root@k8s-master01 yaml]#
+[root@k8s-master01 yaml]# kubectl edit svc -n cilium-monitoring prometheus
+service/prometheus edited
+[root@k8s-master01 yaml]#
+
+type: NodePort
+```
+
+### 9.2.8 查看端口
+
+```shell
+[root@k8s-master01 yaml]# kubectl get svc -A | grep monit
+cilium-monitoring grafana NodePort 10.100.250.17 3000:30707/TCP 15m
+cilium-monitoring prometheus NodePort 10.100.131.243 9090:31155/TCP 15m
+[root@k8s-master01 yaml]#
+[root@k8s-master01 yaml]# kubectl get svc -A | grep hubble
+kube-system hubble-metrics ClusterIP None 9965/TCP 5m12s
+kube-system hubble-peer ClusterIP 10.100.150.29 443/TCP 5m12s
+kube-system hubble-relay ClusterIP 10.109.251.34 80/TCP 5m12s
+kube-system hubble-ui NodePort 10.102.253.59 80:31219/TCP 5m12s
+[root@k8s-master01 yaml]#
+```
+
+### 9.2.9 访问
+
+```shell
+http://192.168.1.61:30707
+http://192.168.1.61:31155
+http://192.168.1.61:31219
+```
+
+
+
+# 10.安装CoreDNS
+
+## 10.1以下步骤只在master01操作
+
+### 10.1.1修改文件
+
+```shell
+cd coredns/
+cat coredns.yaml | grep clusterIP:
+ clusterIP: 10.96.0.10
+```
+
+### 10.1.2安装
+
+```shell
+kubectl create -f coredns.yaml
+serviceaccount/coredns created
+clusterrole.rbac.authorization.k8s.io/system:coredns created
+clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
+configmap/coredns created
+deployment.apps/coredns created
+service/kube-dns created
+```
+
+# 11.安装Metrics Server
+
+## 11.1以下步骤只在master01操作
+
+### 11.1.1安装Metrics-server
+
+在新版的Kubernetes中系统资源的采集均使用Metrics-server,可以通过Metrics采集节点和Pod的内存、磁盘、CPU和网络的使用率
+
+```shell
+# 安装metrics server
+cd metrics-server/
+
+kubectl apply -f metrics-server.yaml
+```
+
+### 11.1.2稍等片刻查看状态
+
+```shell
+kubectl top node
+NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
+k8s-master01 154m 1% 1715Mi 21%
+k8s-master02 151m 1% 1274Mi 16%
+k8s-master03 523m 6% 1345Mi 17%
+k8s-node01 84m 1% 671Mi 8%
+k8s-node02 73m 0% 727Mi 9%
+k8s-node03 96m 1% 769Mi 9%
+k8s-node04 68m 0% 673Mi 8%
+k8s-node05 82m 1% 679Mi 8%
+```
+
+# 12.集群验证
+
+## 12.1部署pod资源
+
+```shell
+cat< 443/TCP 17h
+
+kubectl exec busybox -n default -- nslookup kubernetes
+3Server: 10.96.0.10
+Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
+
+Name: kubernetes
+Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local
+```
+
+## 12.3测试跨命名空间是否可以解析
+
+```shell
+kubectl exec busybox -n default -- nslookup kube-dns.kube-system
+Server: 10.96.0.10
+Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
+
+Name: kube-dns.kube-system
+Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
+```
+
+## 12.4每个节点都必须要能访问Kubernetes的kubernetes svc 443和kube-dns的service 53
+
+```shell
+telnet 10.96.0.1 443
+Trying 10.96.0.1...
+Connected to 10.96.0.1.
+Escape character is '^]'.
+
+ telnet 10.96.0.10 53
+Trying 10.96.0.10...
+Connected to 10.96.0.10.
+Escape character is '^]'.
+
+curl 10.96.0.10:53
+curl: (52) Empty reply from server
+```
+
+## 12.5Pod和Pod之前要能通
+
+```shell
+kubectl get po -owide
+NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+busybox 1/1 Running 0 17m 172.27.14.193 k8s-node02
+
+ kubectl get po -n kube-system -owide
+NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+calico-kube-controllers-5dffd5886b-4blh6 1/1 Running 0 77m 172.25.244.193 k8s-master01
+calico-node-fvbdq 1/1 Running 1 (75m ago) 77m 192.168.1.61 k8s-master01
+calico-node-g8nqd 1/1 Running 0 77m 192.168.1.64 k8s-node01
+calico-node-mdps8 1/1 Running 0 77m 192.168.1.65 k8s-node02
+calico-node-nf4nt 1/1 Running 0 77m 192.168.1.63 k8s-master03
+calico-node-sq2ml 1/1 Running 0 77m 192.168.1.62 k8s-master02
+calico-typha-8445487f56-mg6p8 1/1 Running 0 77m 192.168.1.65 k8s-node02
+calico-typha-8445487f56-pxbpj 1/1 Running 0 77m 192.168.1.61 k8s-master01
+calico-typha-8445487f56-tnssl 1/1 Running 0 77m 192.168.1.64 k8s-node01
+coredns-5db5696c7-67h79 1/1 Running 0 63m 172.25.92.65 k8s-master02
+metrics-server-6bf7dcd649-5fhrw 1/1 Running 0 61m 172.18.195.1 k8s-master03
+
+# 进入busybox ping其他节点上的pod
+
+kubectl exec -ti busybox -- sh
+/ # ping 192.168.1.64
+PING 192.168.1.64 (192.168.1.64): 56 data bytes
+64 bytes from 192.168.1.64: seq=0 ttl=63 time=0.358 ms
+64 bytes from 192.168.1.64: seq=1 ttl=63 time=0.668 ms
+64 bytes from 192.168.1.64: seq=2 ttl=63 time=0.637 ms
+64 bytes from 192.168.1.64: seq=3 ttl=63 time=0.624 ms
+64 bytes from 192.168.1.64: seq=4 ttl=63 time=0.907 ms
+
+# 可以连通证明这个pod是可以跨命名空间和跨主机通信的
+```
+
+## 12.6创建三个副本,可以看到3个副本分布在不同的节点上(用完可以删了)
+
+```shell
+cat > deployments.yaml << EOF
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: nginx-deployment
+ labels:
+ app: nginx
+spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app: nginx
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - name: nginx
+ image: docker.io/library/nginx:1.14.2
+ ports:
+ - containerPort: 80
+
+EOF
+
+kubectl apply -f deployments.yaml
+deployment.apps/nginx-deployment created
+
+kubectl get pod
+NAME READY STATUS RESTARTS AGE
+busybox 1/1 Running 0 6m25s
+nginx-deployment-9456bbbf9-4bmvk 1/1 Running 0 8s
+nginx-deployment-9456bbbf9-9rcdk 1/1 Running 0 8s
+nginx-deployment-9456bbbf9-dqv8s 1/1 Running 0 8s
+
+# 删除nginx
+
+[root@k8s-master01 ~]# kubectl delete -f deployments.yaml
+```
+
+
+
+
+
+# 13.安装dashboard
+
+```shell
+
+helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/
+helm install kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard
+
+```
+
+## 13.1更改dashboard的svc为NodePort,如果已是请忽略
+
+```shell
+kubectl edit svc kubernetes-dashboard
+ type: NodePort
+```
+
+## 13.2查看端口号
+
+```shell
+kubectl get svc kubernetes-dashboard -n kubernetes-dashboard
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+kubernetes-dashboard NodePort 10.108.120.110 443:30034/TCP 34s
+```
+
+## 13.3创建token
+
+```shell
+kubectl -n kubernetes-dashboard create token admin-user
+eyJhbGciOiJSUzI1NiIsImtpZCI6IkFZWENLUmZQWTViWUF4UV81NWJNb0JEa0I4R2hQMHVac2J3RDM3RHJLcFEifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNjcwNjc0MzY1LCJpYXQiOjE2NzA2NzA3NjUsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhZG1pbi11c2VyIiwidWlkIjoiODkyODRjNGUtYzk0My00ODkzLWE2ZjctNTYxZWJhMzE2NjkwIn19LCJuYmYiOjE2NzA2NzA3NjUsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDphZG1pbi11c2VyIn0.DFxzS802Iu0lldikjhyp2diZSpVAUoSTbOjerH2t7ToM0TMoPQdcdDyvBTcNlIew3F01u4D6atNV7J36IGAnHEX0Q_cYAb00jINjy1YXGz0gRhRE0hMrXay2-Qqo6tAORTLUVWrctW6r0li5q90rkBjr5q06Lt5BTpUhbhbgLQQJWwiEVseCpUEikxD6wGnB1tCamFyjs3sa-YnhhqCR8wUAZcTaeVbMxCuHVAuSqnIkxat9nyxGcsjn7sqmBqYjjOGxp5nhHPDj03TWmSJlb_Csc7pvLsB9LYm0IbER4xDwtLZwMAjYWRbjKxbkUp4L9v5CZ4PbIHap9qQp1FXreA
+```
+
+## 13.3登录dashboard
+
+https://192.168.1.61:30034/
+
+# 14.ingress安装
+
+## 14.1执行部署
+
+```shell
+cd ingress/
+
+kubectl apply -f deploy.yaml
+kubectl apply -f backend.yaml
+
+# 等创建完成后在执行:
+kubectl apply -f ingress-demo-app.yaml
+
+kubectl get ingress
+NAME CLASS HOSTS ADDRESS PORTS AGE
+ingress-host-bar nginx hello.chenby.cn,demo.chenby.cn 192.168.1.62 80 7s
+
+```
+
+## 14.2过滤查看ingress端口
+
+```shell
+[root@hello ~/yaml]# kubectl get svc -A | grep ingress
+ingress-nginx ingress-nginx-controller NodePort 10.104.231.36 80:32636/TCP,443:30579/TCP 104s
+ingress-nginx ingress-nginx-controller-admission ClusterIP 10.101.85.88 443/TCP 105s
+[root@hello ~/yaml]#
+```
+
+# 15.IPv6测试
+
+
+
+```shell
+#部署应用
+
+cat< 80:30779/TCP 5s
+[root@k8s-master01 ~]#
+
+#使用内网访问
+[root@localhost yaml]# curl -I http://[fd00::a29c]
+HTTP/1.1 200 OK
+Server: nginx/1.21.6
+Date: Thu, 05 May 2022 10:20:35 GMT
+Content-Type: text/html
+Content-Length: 615
+Last-Modified: Tue, 25 Jan 2022 15:03:52 GMT
+Connection: keep-alive
+ETag: "61f01158-267"
+Accept-Ranges: bytes
+
+[root@localhost yaml]# curl -I http://192.168.1.61:30779
+HTTP/1.1 200 OK
+Server: nginx/1.21.6
+Date: Thu, 05 May 2022 10:20:59 GMT
+Content-Type: text/html
+Content-Length: 615
+Last-Modified: Tue, 25 Jan 2022 15:03:52 GMT
+Connection: keep-alive
+ETag: "61f01158-267"
+Accept-Ranges: bytes
+
+[root@localhost yaml]#
+
+#使用公网访问
+[root@localhost yaml]# curl -I http://[2409:8a10:9e18:9020::10]:30779
+HTTP/1.1 200 OK
+Server: nginx/1.21.6
+Date: Thu, 05 May 2022 10:20:54 GMT
+Content-Type: text/html
+Content-Length: 615
+Last-Modified: Tue, 25 Jan 2022 15:03:52 GMT
+Connection: keep-alive
+ETag: "61f01158-267"
+Accept-Ranges: bytes
+```
+
+# 16.安装命令行自动补全功能
+
+```shell
+yum install bash-completion -y
+source /usr/share/bash-completion/bash_completion
+source <(kubectl completion bash)
+echo "source <(kubectl completion bash)" >> ~/.bashrc
+```
+
+> **关于**
+>
+> https://www.oiox.cn/
+>
+> https://www.oiox.cn/index.php/start-page.html
+>
+> **CSDN、GitHub、知乎、开源中国、思否、掘金、简书、华为云、阿里云、腾讯云、哔哩哔哩、今日头条、新浪微博、个人博客**
+>
+> **全网可搜《小陈运维》**
+>
+> **文章主要发布于微信公众号:《Linux运维交流社区》**