Ansible playbook部署Kubernetes v1.23.17高可用Cluster
  q0QLSy9HnAZQ 2023年11月22日 54 0

一、环境信息

环境介绍:待补充。。。

废话不多说,直接上干货!

二、剧本目录文件

[root@ansible-host ansible]# tree roles/kubernetes/
roles/kubernetes/
├── files
│   ├── 99-kubernetes-cri.conf
│   ├── check_apiserver.sh
│   ├── containerd.conf
│   ├── crictl.yaml
│   ├── images
│   │   ├── cadvisor-0.46.0.tar
│   │   ├── cni-v3.22.0.tar
│   │   ├── coredns-v1.8.6.tar
│   │   ├── dashboard-v2.4.0.tar
│   │   ├── etcd-3.5.6-0.tar
│   │   ├── kube-apiserver-v1.23.17.tar
│   │   ├── kube-controller-manager-v1.23.17.tar
│   │   ├── kube-controllers-v3.22.0.tar
│   │   ├── kube-proxy-v1.23.17.tar
│   │   ├── kube-scheduler-v1.23.17.tar
│   │   ├── metrics-scraper-v1.0.7.tar
│   │   ├── metrics-server-0.5.0.tar
│   │   ├── node-v3.22.0.tar
│   │   ├── pause-3.6.tar
│   │   ├── pod2daemon-flexvol-v3.22.0.tar
│   │   └── typha-v3.22.0.tar
│   ├── ipvs.conf
│   ├── k8s.conf
│   ├── kernel-ml-4.19.12-1.el7.elrepo.x86_64.rpm
│   ├── kernel-ml-devel-4.19.12-1.el7.elrepo.x86_64.rpm
│   ├── kubelet
│   ├── kube-proxy.sh
│   ├── kubernetes.repo
│   ├── limits.conf
│   └── sshkey
│       ├── authorized_keys
│       ├── id_rsa
│       ├── id_rsa.pub
│       └── known_hosts
├── handlers
├── tasks
│   ├── calico.yaml
│   ├── containerd.yaml
│   ├── dashboard.yaml
│   ├── ha-master.yaml
│   ├── haproxy.yaml
│   ├── init.yaml
│   ├── k8s-module.yaml
│   ├── keepalived.yaml
│   ├── kubeadm-init.yaml
│   ├── kube-proxy.yaml
│   ├── main.yaml
│   ├── metrics.yaml
│   ├── node-join.yaml
│   ├── token.yaml
│   └── yum.yaml
├── templates
│   ├── config.toml.j2
│   ├── haproxy.cfg.j2
│   ├── hosts.j2
│   ├── keepalived.conf.j2
│   ├── kubeadm-config.yaml.j2
│   └── new.yaml.j2
└── vars
    └── main.yaml

7 directories, 54 files

2.1 yum.yaml剧本内容

[root@ansible-host ansible]# cat roles/kubernetes/tasks/yum.yaml 
- name: copy hosts file to remote host
  template:
    src: hosts.j2
    dest: /etc/hosts
  tags:
    - hostsfile
    - yum

- name: back local yum repo file
  shell: mkdir -p /etc/yum.repos.d/back && mv /etc/yum.repos.d/*.repo /etc/yum.repos.d/back/ && curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
  tags:
    - backrepo
    - yum

- name: yum install bash packages
  yum:
    name:
      - yum-utils
      - device-mapper-persistent-data
      - lvm2
      - bash-completion
      - epel-release
      - nmap-ncat
    state: present
  tags:
    - installpack
    - yum

- name: download docker repo
  shell: yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
  tags:
    - dockerrepo
    - yum

- name: create kubernetes repo
  copy:
    src: kubernetes.repo
    dest: /etc/yum.repos.d/
  tags:
    - createrepo
    - yum

- name: config kubernetes repo
  shell: sed -i -e '/mirrors.cloud.aliyuncs.com/d' -e '/mirrors.aliyuncs.com/d' /etc/yum.repos.d/CentOS-Base.repo
  tags:
    - configrepo
    - yum

- name: yum install necessary packages
  yum:
    name:
      - yum-utils
      - wget
      - jq
      - psmisc
      - vim
      - net-tools
      - telnet
      - git
    state: present
  tags:
    - nepack
    - yum

2.2 init.yaml剧本内容

[root@ansible-host ansible]# cat roles/kubernetes/tasks/init.yaml 
- name: stop firewalld
  service:
    name: firewalld
    state: stopped
    enabled: no
  tags:
    - stopfirewalld
    - init

- name: stop dnsmasq
  service:
    name: dnsmasq
    state: stopped
    enabled: no
  ignore_errors: yes
  tags:
    - stopdnsmasq
    - init

- name: stop NetworkManager
  service:
    name: NetworkManager
    state: stopped
    enabled: no
  tags:
    - stopNetworkManager
    - init

- name: stop selinux
  shell: setenforce 0
  tags:
    - stopselinux
    - init

- name: disable selinux
  lineinfile:
    path: /etc/sysconfig/selinux
    regexp: '^SELINUX='
    line: 'SELINUX=disabled'
  tags:
    - disableselinux
    - init

- name: disable selinux
  lineinfile:
    path: /etc/selinux/config
    regexp: '^SELINUX='
    line: 'SELINUX=disabled'
  tags:
    - disableselinux
    - init

- name: stop swap
  shell: swapoff -a && sysctl -w vm.swappiness=0
  tags:
    - stopswap
    - init

- name: disable swap
  shell: sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab
  tags:
    - disableswap
    - init

- name: download ntpdate rpm
  shell: rpm -ivh http://mirrors.wlnmp.com/centos/wlnmp-release-centos.noarch.rpm
  ignore_errors: yes
  tags:
    - downloadntpdate
    - init

- name: install ntpdate
  yum:
    name: ntpdate
    state: present
  ignore_errors: yes
  tags:
    - installntp
    - init

- name: create timedate link
  file:
    src: /usr/share/zoneinfo/Asia/Shanghai
    dest: /etc/localtime
  tags:
    - init

- name: config timezone
  shell: echo 'Asia/Shanghai' >/etc/timezone
  tags:
    - init

- name: config ntpdate
  shell: ntpdate time2.aliyun.com
  ignore_errors: yes
  tags:
    - init

- name: config crontab
  shell: echo "*/5 * * * * root /usr/sbin/ntpdate time2.aliyun.com" >> /etc/crontab
  ignore_errors: yes
  tags:
    - init

- name: config system limit
  shell: ulimit -SHn 65535
  tags:
    - init

- name: set system limit to file
  copy:
    src: limits.conf
    dest: /etc/security/
  tags:
    - init

- name: copy sshkey to remote host
  copy:
    src: sshkey/
    dest: /root/.ssh
  tags:
    - init

- name: download codefile
  shell: cd /root/ && git clone https://gitee.com/dukuan/k8s-ha-install.git
  when:
    - ansible_facts['nodename'] == "kube-master1"
  tags:
    - downloadcodefile
    - init

- name: update kernel
  shell: yum update -y --exclude=kernel*
  tags:
    - updatekernel
    - init

- name: wait reboot system
  reboot:
    reboot_timeout: 300

- name: copy kernel-ml-devel file to remote host
  copy:
    src: kernel-ml-devel-4.19.12-1.el7.elrepo.x86_64.rpm
    dest: /root/
  tags:
    - kernel-ml-devel
    - init

- name: copy kernel-ml file to remote host
  copy:
    src: kernel-ml-4.19.12-1.el7.elrepo.x86_64.rpm
    dest: /root/
  tags:
    - kernel-ml
    - init

- name: install kernel-ml
  shell: cd /root && yum localinstall -y kernel-ml*
  tags:
    - installkernel
    - init

- name: set kernel boot order
  shell: grub2-set-default  0 && grub2-mkconfig -o /etc/grub2.cfg && grubby --args="user_namespace.enable=1" --update-kernel="$(grubby --default-kernel)"
  tags:
    - kernelboot
    - init

- name: check default kernel
  shell: grubby --default-kernel > /tmp/kernel.log && grep "4.19.12-1.el7.elrepo.x86_64" /tmp/kernel.log
  tags:
    - defaultkernel
    - init

- name: wait reboot system
  reboot:
    reboot_timeout: 300
  tags:
    - reboot
    - init

- name: check kernel
  shell: uname -a > /tmp/uname.log && grep "4.19.12-1.el7.elrepo.x86_64" /tmp/uname.log
  tags:
    - checkkernel
    - init

- name: install ipvsadm
  yum:
    name:
      - ipvsadm
      - ipset
      - sysstat
      - conntrack
      - libseccomp
    state: present
  tags:
    - ipvsadm
    - init

- name: config ipvs module
  shell: modprobe -- ip_vs && modprobe -- ip_vs_rr && modprobe -- ip_vs_wrr && modprobe -- ip_vs_sh && modprobe -- nf_conntrack
  tags:
    - ipvsmodule
    - init

- name: copy ipvs.conf file to remote host
  copy:
    src: ipvs.conf
    dest: /etc/modules-load.d/
  tags:
    - ipvsconf
    - init

- name: enable systemd modules load service
  service:
    name: systemd-modules-load
    state: started
    enabled: yes
  tags:
    - loadmodules
    - init

- name: copy k8s.conf file to remote host
  copy:
    src: k8s.conf
    dest: /etc/sysctl.d/
  tags:
    - k8sconf
    - init

- name: load k8s conf
  shell: sysctl --system
  tags:
    - loadk8sconf
    - init

- name: wait reboot system
  reboot:
    reboot_timeout: 300
  tags:
    - reboot
    - init

- name: get ip_vs conf
  shell: lsmod | grep --color=auto -e ip_vs -e nf_conntrack > /tmp/ip_vs.log
  tags:
    - getipvs
    - init

- name: check ip_vs conf
  shell: grep ip_vs /tmp/ip_vs.log
  tags:
    - checkipvs
    - init

2.3 containerd.yaml剧本内容

[root@ansible-host ansible]# cat roles/kubernetes/tasks/containerd.yaml 
- name: yum install docker
  shell: yum install docker-ce-20.10.* docker-ce-cli-20.10.* -y
  tags:
    - installdocker
    - containerd

- name: copy containerd.conf to remote host
  copy:
    src: containerd.conf
    dest: /etc/modules-load.d/
  tags:
    - containerdconf
    - containerd

- name: load modules
  shell: modprobe -- overlay && modprobe -- br_netfilter
  tags:
    - loadmodules
    - containerd

- name: copy 99-kubernetes-cri.conf to remote host
  copy:
    src: 99-kubernetes-cri.conf
    dest: /etc/sysctl.d/
  tags:
    - criconf
    - containerd

- name: load kernel
  shell: sysctl --system && mkdir -p /etc/containerd
  tags:
    - loadkernel
    - containerd

- name: copy containerd config file to remote host
  template:
    src: config.toml.j2
    dest: /etc/containerd/config.toml
  tags:
    - setcontainerd
    - containerd

- name: start containerd service
  shell: systemctl daemon-reload && systemctl enable --now containerd
  tags:
    - startcontainerd
    - containerd

- name: config crictl client
  copy:
    src: crictl.yaml
    dest: /etc/crictl.yaml
  tags:
    - crictlclient
    - containerd

2.4 k8s-module.yaml剧本内容

[root@ansible-host ansible]# cat roles/kubernetes/tasks/k8s-module.yaml 
- name: install kubelet v{{ k8s_version }}
  yum:
    name: kubelet-{{ k8s_version }}-0.x86_64
    state: present
  tags:
    - kubelet
    - module

- name: install kubectl v{{ k8s_version }}
  yum:
    name: kubectl-{{ k8s_version }}-0.x86_64
    state: present
  tags:
    - kubectl
    - module

- name: install kubeadm v{{ k8s_version }}
  yum:
    name: kubeadm-{{ k8s_version }}-0.x86_64
    state: present
  tags:
    - kubeadm
    - module

- name: copy kubelet config file to remote host
  copy:
    src: kubelet
    dest: /etc/sysconfig/
  tags:
    - kubeletconf
    - module

- name: daemon reload service
  shell: systemctl daemon-reload
  tags:
    - daemonreload
    - module

- name: start kubelet service
  service:
    name: kubelet
    state: started
    enabled: yes
  ignore_errors: yes
  tags:
    - daemonreload
    - module

2.5 haproxy.yaml剧本内容

[root@ansible-host ansible]# cat roles/kubernetes/tasks/haproxy.yaml 
- name: install haproxy
  yum:
    name: haproxy
    state: present
  when:
    - ansible_fqdn is match ("kube-master*")
  tags:
    - installhaproxy
    - haproxy

- name: mkdir haproxy directory
  file:
    path: /etc/haproxy
    state: directory
  ignore_errors: yes
  when:
    - ansible_fqdn is match ("kube-master*")
  tags:
    - haproxydir
    - haproxy

- name: copy haproxy config file to remote host
  template:
    src: haproxy.cfg.j2
    dest: /etc/haproxy/haproxy.cfg
  when:
    - ansible_fqdn is match ("kube-master*")
  tags:
    - haproxyconf
    - haproxy

- name: start haproxy service
  service:
    name: haproxy
    state: started
    enabled: yes
  when:
    - ansible_fqdn is match ("kube-master*")
  tags:
    - haproxyservice
    - haproxy

2.6 keepalived.yaml剧本内容

[root@ansible-host ansible]# cat roles/kubernetes/tasks/keepalived.yaml 
- name: install keepalived
  yum:
    name: keepalived
    state: present
  when:
    - ansible_fqdn is match ("kube-master*")
  tags:
    - installkeepalived
    - keepalived

- name: mkdir keepalived directory
  file:
    path: /etc/keepalived
    state: directory
  ignore_errors: yes
  when:
    - ansible_fqdn is match ("kube-master*")
  tags:
    - keepaliveddir
    - keepalived

- name: copy keepalived config file to remote host
  template:
    src: keepalived.conf.j2
    dest: /etc/keepalived/keepalived.conf
  when:
    - ansible_fqdn is match ("kube-master*")
  tags:
    - keepalivedconf
    - keepalived

- name: copy check apiserver script to remote host
  copy:
    src: check_apiserver.sh
    dest: /etc/keepalived/check_apiserver.sh
    mode: 776
  when:
    - ansible_fqdn is match ("kube-master*")
  tags:
    - apiserverscript
    - keepalived

- name: start keepalived service
  service:
    name: keepalived
    state: started
    enabled: yes
  when:
    - ansible_fqdn is match ("kube-master*")
  tags:
    - keepalivedservice
    - keepalived

- name: check kube-vip is available
  shell: ping -c 5 "{{ kube_vip }}"
  when:
    - ansible_fqdn is match ("kube-master1")
  tags:
    - checkvip
    - keepalived

- name: check kube-vip tcp port is active
  shell: nc -zv "{{ kube_vip }}" 16443
  when:
    - ansible_fqdn is match ("kube-master1")
  tags:
    - checktcpport
    - keepalived

2.7 kubeadm-init.yaml剧本内容

[root@ansible-host ansible]# cat roles/kubernetes/tasks/kubeadm-init.yaml 
- name: copy new.yaml to master hosts
  template:
    src: new.yaml.j2
    dest: /root/new.yaml
  when:
    - ansible_fqdn is match ("kube-master1")
  tags:
    - newyaml
    - kubeadminit

- name: copy images file to remote hosts
  copy:
    src: images
    dest: /root/
  tags:
    - copyimages
    - kubeadminit

- name: load images to localhost
  shell: for i in `ls /root/images/`;do ctr -n k8s.io image import /root/images/${i};done
  tags:
    - loadimage
    - kubeadminit

- name: restart kubelet service
  service:
    name: kubelet
    state: restarted
    enabled: yes
  tags:
    - restartkubelet
    - kubeadminit

- name: master1 node kubeadm init
  shell: kubeadm init --config /root/new.yaml --upload-certs > /root/master1-init.log
  when:
    - ansible_fqdn is match ("kube-master1")
  tags:
    - master1init
    - kubeadminit

- name: master1 node set path
  shell: echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> /root/.bashrc
  when:
    - ansible_fqdn is match ("kube-master1")
  tags:
    - setpath
    - kubeadminit

- name: master1 node export master-join script
  shell: grep -A2 "kubeadm join" /root/master1-init.log | head -3 > /root/master-join.sh
  when:
    - ansible_fqdn is match ("kube-master1")
  tags:
    - exportmasterjoin
    - kubeadminit

- name: fetch master-join.sh to ansible host
  fetch:
    src: /root/master-join.sh
    dest: /opt/
  when:
    - ansible_fqdn is match ("kube-master1")
  tags:
    - fetchmasterjoin
    - kubeadminit

- name: master1 node export worker-join script
  shell: grep -A2 "kubeadm join" /root/master1-init.log | head -6 | tail -2 > /root/worker-join.sh
  when:
    - ansible_fqdn is match ("kube-master1")
  tags:
    - exportworkjoin
    - kubeadminit

- name: fetch worker-join.sh to ansible host
  fetch:
    src: /root/worker-join.sh
    dest: /opt/
  when:
    - ansible_fqdn is match ("kube-master1")
  tags:
    - fetchworkerjoin
    - kubeadminit

2.8 ha-master.yaml剧本内容

[root@ansible-host ansible]# cat roles/kubernetes/tasks/ha-master.yaml 
- name: master2 node join cluster
  script: /opt/{{ kube_master1 }}/root/master-join.sh
  when:
    - ansible_fqdn is match ("kube-master2")
  tags:
    - master2join
    - hamaster

- name: master3 node join cluster
  script: /opt/{{ kube_master1 }}/root/master-join.sh
  when:
    - ansible_fqdn is match ("kube-master3")
  tags:
    - master3join
    - hamaster

2.9 node-join.yaml剧本内容

[root@ansible-host ansible]# cat roles/kubernetes/tasks/node-join.yaml 
- name: node1 join cluster
  script: /opt/{{ kube_master1 }}/root/worker-join.sh
  when:
    - ansible_fqdn is match ("kube-node1")
  tags:
    - node1join
    - nodejoin

- name: node2 join cluster
  script: /opt/{{ kube_master1 }}/root/worker-join.sh
  when:
    - ansible_fqdn is match ("kube-node2")
  tags:
    - node2join
    - nodejoin

- name: node3 join cluster
  script: /opt/{{ kube_master1 }}/root/worker-join.sh
  when:
    - ansible_fqdn is match ("kube-node3")
  tags:
    - node3join
    - nodejoin

2.10 calico.yaml剧本内容

[root@ansible-host ansible]# cat roles/kubernetes/tasks/calico.yaml 
- name: get pods subnet
  shell: cat /etc/kubernetes/manifests/kube-controller-manager.yaml | grep cluster-cidr= | awk -F= '{print $NF}'
  register: POD_SUBNET
  when:
    - ansible_fqdn is match ("kube-master1")
  tags:
    - podsubnet
    - calico

- name: apply calico yaml
  shell: cd /root/k8s-ha-install && git checkout manual-installation-v1.23.x && cd calico/ && sed -i 's#POD_CIDR#{{ POD_SUBNET.stdout }}#g' calico.yaml && kubectl apply -f calico.yaml
  when:
    - ansible_fqdn is match ("kube-master1")
  tags:
    - applycalico
    - calico

2.11 metrics.yaml剧本内容

[root@ansible-host ansible]# cat roles/kubernetes/tasks/metrics.yaml 
- name: fetch  front-proxy-ca.crt to ansible host
  fetch:
    src: /etc/kubernetes/pki/front-proxy-ca.crt
    dest: /opt/
  when:
    - ansible_fqdn is match ("kube-master1")
  tags:
    - fetchfrontproxy
    - metrics

- name: copy front-proxy-ca_crt to node hosts
  copy:
    src: /opt/{{ kube_master1 }}/etc/kubernetes/pki/front-proxy-ca.crt
    dest: /etc/kubernetes/pki/
  when:
    - ansible_fqdn is match ("kube-node*")
  tags:
    - copyfrontproxy
    - metrics

- name: apply metrics-server yaml
  shell: cd /root/k8s-ha-install/kubeadm-metrics-server && kubectl apply -f comp.yaml
  when:
    - ansible_fqdn is match ("kube-master1")
  tags:
    - applymetrics
    - metrics

2.12 dashboard.yaml剧本内容

[root@ansible-host ansible]# cat roles/kubernetes/tasks/dashboard.yaml 
- name: apply dashboard yaml
  shell: cd /root/k8s-ha-install/dashboard/ && kubectl create -f .
  when:
    - ansible_fqdn is match ("kube-master1")
  tags:
    - dashboard

2.13 token.yaml剧本内容

[root@ansible-host ansible]# cat roles/kubernetes/tasks/token.yaml 
- name: get admin-user token to master1 node
  shell: kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}') | grep ^token | awk '{print $2}' > /root/admin-user-token.log
  when:
    - ansible_fqdn is match ("kube-master1")
  tags:
    - gettoken
    - token

- name: set kubectl tab
  shell: kubectl completion bash > /usr/share/bash-completion/completions/kubectl
  when:
    - ansible_fqdn is match ("kube-master1")
  tags:
    - kubectltab
    - token

2.14 kube-proxy.yaml剧本内容

[root@ansible-host ansible]# cat roles/kubernetes/tasks/kube-proxy.yaml 
- name: set kube-proxy mode
  script: kube-proxy.sh
  when:
    - ansible_fqdn is match ("kube-master1") 
  tags:
    - setproxy
    - kubeproxy

- name: update kube-proxy pods
  shell: kubectl patch daemonset kube-proxy -p "{\"spec\":{\"template\":{\"metadata\":{\"annotations\":{\"date\":\"`date +'%s'`\"}}}}}" -n kube-system
  when:
    - ansible_fqdn is match ("kube-master1")
  tags:
    - updatepod
    - kubeproxy

2.15 main.yaml主任务yaml剧本内容

[root@ansible-host ansible]# cat roles/kubernetes/tasks/main.yaml 
- include: yum.yaml
- include: init.yaml
- include: containerd.yaml
- include: k8s-module.yaml
- include: haproxy.yaml
- include: keepalived.yaml
- include: kubeadm-init.yaml
- include: ha-master.yaml
- include: node-join.yaml
- include: calico.yaml
- include: metrics.yaml
- include: dashboard.yaml
- include: token.yaml
- include: kube-proxy.yaml

2.16 hosts模板文件内容

[root@ansible-host ansible]# cat roles/kubernetes/templates/hosts.j2 
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
{{ kube_master1 }} kube-master1
{{ kube_master2 }} kube-master2
{{ kube_master3 }} kube-master3
{{ kube_node1 }} kube-node1
{{ kube_node2 }} kube-node2
{{ kube_node3 }} kube-node3
{{ kube_vip }} kube-vip

2.17 roles/main.yaml存放变量的yaml剧本内容

[root@ansible-host ansible]# cat roles/kubernetes/vars/main.yaml 
master1_nodename: kube-master1
master2_nodename: kube-master2
master3_nodename: kube-master3
node1_nodename: kube-node1
node2_nodename: kube-node2
node3_nodename: kube-node3
kube_master1: 10.50.121.241
kube_master2: 10.50.121.242
kube_master3: 10.50.121.243
kube_node1: 10.50.121.244
kube_node2: 10.50.121.245
kube_node3: 10.50.121.246
kube_vip: 10.50.121.240
pause_version: 3.6
k8s_version: 1.23.17
clustername: k8s-test1
dnsdomain: cluster.local
podsubnet: 172.31.0.0/16
servicesubnet: 172.30.0.0/16
net_interface_name0: eth0
net_interface_name1: eth1
net_interface_name2: eth2
net_interface_name3: eth3

2.18 containerd模板配置文件内容

[root@ansible-host ansible]# cat roles/kubernetes/templates/config.toml.j2 
disabled_plugins = []
imports = []
oom_score = 0
plugin_dir = ""
required_plugins = []
root = "/var/lib/containerd"
state = "/run/containerd"
temp = ""
version = 2

[cgroup]
  path = ""

[debug]
  address = ""
  format = ""
  gid = 0
  level = ""
  uid = 0

[grpc]
  address = "/run/containerd/containerd.sock"
  gid = 0
  max_recv_message_size = 16777216
  max_send_message_size = 16777216
  tcp_address = ""
  tcp_tls_ca = ""
  tcp_tls_cert = ""
  tcp_tls_key = ""
  uid = 0

[metrics]
  address = ""
  grpc_histogram = false

[plugins]

  [plugins."io.containerd.gc.v1.scheduler"]
    deletion_threshold = 0
    mutation_threshold = 100
    pause_threshold = 0.02
    schedule_delay = "0s"
    startup_delay = "100ms"

  [plugins."io.containerd.grpc.v1.cri"]
    device_ownership_from_security_context = false
    disable_apparmor = false
    disable_cgroup = false
    disable_hugetlb_controller = true
    disable_proc_mount = false
    disable_tcp_service = true
    enable_selinux = false
    enable_tls_streaming = false
    enable_unprivileged_icmp = false
    enable_unprivileged_ports = false
    ignore_image_defined_volumes = false
    max_concurrent_downloads = 3
    max_container_log_line_size = 16384
    netns_mounts_under_state_dir = false
    restrict_oom_score_adj = false
    sandbox_image = "registry.cn-hangzhou.aliyuncs.com/google_containers/pause:{{ pause_version }}"
    selinux_category_range = 1024
    stats_collect_period = 10
    stream_idle_timeout = "4h0m0s"
    stream_server_address = "127.0.0.1"
    stream_server_port = "0"
    systemd_cgroup = false
    tolerate_missing_hugetlb_controller = true
    unset_seccomp_profile = ""

    [plugins."io.containerd.grpc.v1.cri".cni]
      bin_dir = "/opt/cni/bin"
      conf_dir = "/etc/cni/net.d"
      conf_template = ""
      ip_pref = ""
      max_conf_num = 1

    [plugins."io.containerd.grpc.v1.cri".containerd]
      default_runtime_name = "runc"
      disable_snapshot_annotations = true
      discard_unpacked_layers = false
      ignore_rdt_not_enabled_errors = false
      no_pivot = false
      snapshotter = "overlayfs"

      [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
        base_runtime_spec = ""
        cni_conf_dir = ""
        cni_max_conf_num = 0
        container_annotations = []
        pod_annotations = []
        privileged_without_host_devices = false
        runtime_engine = ""
        runtime_path = ""
        runtime_root = ""
        runtime_type = ""

        [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options]

      [plugins."io.containerd.grpc.v1.cri".containerd.runtimes]

        [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
          base_runtime_spec = ""
          cni_conf_dir = ""
          cni_max_conf_num = 0
          container_annotations = []
          pod_annotations = []
          privileged_without_host_devices = false
          runtime_engine = ""
          runtime_path = ""
          runtime_root = ""
          runtime_type = "io.containerd.runc.v2"

          [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
            BinaryName = ""
            CriuImagePath = ""
            CriuPath = ""
            CriuWorkPath = ""
            IoGid = 0
            IoUid = 0
            NoNewKeyring = false
            NoPivotRoot = false
            Root = ""
            ShimCgroup = ""
            SystemdCgroup = true

      [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
        base_runtime_spec = ""
        cni_conf_dir = ""
        cni_max_conf_num = 0
        container_annotations = []
        pod_annotations = []
        privileged_without_host_devices = false
        runtime_engine = ""
        runtime_path = ""
        runtime_root = ""
        runtime_type = ""

        [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options]

    [plugins."io.containerd.grpc.v1.cri".image_decryption]
      key_model = "node"

    [plugins."io.containerd.grpc.v1.cri".registry]
      config_path = ""

      [plugins."io.containerd.grpc.v1.cri".registry.auths]

      [plugins."io.containerd.grpc.v1.cri".registry.configs]

      [plugins."io.containerd.grpc.v1.cri".registry.headers]

      [plugins."io.containerd.grpc.v1.cri".registry.mirrors]

    [plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]
      tls_cert_file = ""
      tls_key_file = ""

  [plugins."io.containerd.internal.v1.opt"]
    path = "/opt/containerd"

  [plugins."io.containerd.internal.v1.restart"]
    interval = "10s"

  [plugins."io.containerd.internal.v1.tracing"]
    sampling_ratio = 1.0
    service_name = "containerd"

  [plugins."io.containerd.metadata.v1.bolt"]
    content_sharing_policy = "shared"

  [plugins."io.containerd.monitor.v1.cgroups"]
    no_prometheus = false

  [plugins."io.containerd.runtime.v1.linux"]
    no_shim = false
    runtime = "runc"
    runtime_root = ""
    shim = "containerd-shim"
    shim_debug = false

  [plugins."io.containerd.runtime.v2.task"]
    platforms = ["linux/amd64"]
    sched_core = false

  [plugins."io.containerd.service.v1.diff-service"]
    default = ["walking"]

  [plugins."io.containerd.service.v1.tasks-service"]
    rdt_config_file = ""

  [plugins."io.containerd.snapshotter.v1.aufs"]
    root_path = ""

  [plugins."io.containerd.snapshotter.v1.btrfs"]
    root_path = ""

  [plugins."io.containerd.snapshotter.v1.devmapper"]
    async_remove = false
    base_image_size = ""
    discard_blocks = false
    fs_options = ""
    fs_type = ""
    pool_name = ""
    root_path = ""

  [plugins."io.containerd.snapshotter.v1.native"]
    root_path = ""

  [plugins."io.containerd.snapshotter.v1.overlayfs"]
    root_path = ""
    upperdir_label = false

  [plugins."io.containerd.snapshotter.v1.zfs"]
    root_path = ""

  [plugins."io.containerd.tracing.processor.v1.otlp"]
    endpoint = ""
    insecure = false
    protocol = ""

[proxy_plugins]

[stream_processors]

  [stream_processors."io.containerd.ocicrypt.decoder.v1.tar"]
    accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"]
    args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
    env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
    path = "ctd-decoder"
    returns = "application/vnd.oci.image.layer.v1.tar"

  [stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"]
    accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"]
    args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
    env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
    path = "ctd-decoder"
    returns = "application/vnd.oci.image.layer.v1.tar+gzip"

[timeouts]
  "io.containerd.timeout.bolt.open" = "0s"
  "io.containerd.timeout.shim.cleanup" = "5s"
  "io.containerd.timeout.shim.load" = "5s"
  "io.containerd.timeout.shim.shutdown" = "3s"
  "io.containerd.timeout.task.state" = "2s"

[ttrpc]
  address = ""
  gid = 0
  uid = 0

2.19 haproxy模板配置文件内容

[root@ansible-host ansible]# cat roles/kubernetes/templates/haproxy.cfg.j2 
global
  maxconn  2000
  ulimit-n  16384
  log  127.0.0.1 local0 err
  stats timeout 30s

defaults
  log global
  mode  http
  option  httplog
  timeout connect 5000
  timeout client  50000
  timeout server  50000
  timeout http-request 15s
  timeout http-keep-alive 15s

frontend monitor-in
  bind *:33305
  mode http
  option httplog
  monitor-uri /monitor

frontend k8s-master
  bind 0.0.0.0:16443
  bind 127.0.0.1:16443
  mode tcp
  option tcplog
  tcp-request inspect-delay 5s
  default_backend k8s-master

backend k8s-master
  mode tcp
  option tcplog
  option tcp-check
  balance roundrobin
  default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
  server kube-master1	{{ kube_master1 }}:6443  check
  server kube-master2	{{ kube_master2 }}:6443  check
  server kube-master3	{{ kube_master3 }}:6443  check

2.20 keepalived模板配置文件内容

[root@ansible-host ansible]# cat roles/kubernetes/templates/keepalived.conf.j2 
! Configuration File for keepalived
global_defs {
    router_id LVS_DEVEL
    script_user root
    enable_script_security
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 5
    weight -5
    fall 2  
    rise 1
}
vrrp_instance VI_1 {
{% if ansible_fqdn == 'kube-master1' %}
    state MASTER
    interface {{ net_interface_name1 }}
    mcast_src_ip {{ kube_master1 }}
    virtual_router_id 51
    priority 101
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        {{ kube_vip }}
    }
    track_script {
       chk_apiserver
    }
{% elif ansible_fqdn == 'kube-master2' %}
    state BACKUP
    interface {{ net_interface_name1 }}
    mcast_src_ip {{ kube_master2 }}
    virtual_router_id 51
    priority 100
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        {{ kube_vip }}
    }
    track_script {
       chk_apiserver
    }
{% elif ansible_fqdn == 'kube-master3' %}
    state BACKUP
    interface {{ net_interface_name1 }}
    mcast_src_ip {{ kube_master3 }}
    virtual_router_id 51
    priority 100
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        {{ kube_vip }}
    }
    track_script {
       chk_apiserver
    }
{% endif %}
}

2.21 kubeadm初始化模板配置文件

[root@ansible-host ansible]# cat roles/kubernetes/templates/new.yaml.j2 
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: 7t2weq.bjbawausm0jaxury
  ttl: 240h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: {{ kube_master1 }}
  bindPort: 6443
nodeRegistration:
  criSocket: /run/containerd/containerd.sock
  imagePullPolicy: IfNotPresent
  name: {{ master1_nodename }}
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  certSANs:
  - {{ kube_vip }}
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: {{ clustername }}
controlPlaneEndpoint: {{ kube_vip }}:16443
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v{{ k8s_version }}
networking:
  dnsDomain: {{ dnsdomain }}
  podSubnet: {{ podsubnet }}
  serviceSubnet: {{ servicesubnet }}
scheduler: {}

2.22 apiserver端口探测脚本

[root@ansible-host ansible]# cat roles/kubernetes/files/check_apiserver.sh 
#!/bin/bash

err=0
for k in $(seq 1 3)
do
    check_code=$(pgrep haproxy)
    if [[ $check_code == "" ]]; then
        err=$(expr $err + 1)
        sleep 1
        continue
    else
        err=0
        break
    fi
done

if [[ $err != "0" ]]; then
    echo "systemctl stop keepalived"
    /usr/bin/systemctl stop keepalived
    exit 1
else
    exit 0
fi


【版权声明】本文内容来自摩杜云社区用户原创、第三方投稿、转载,内容版权归原作者所有。本网站的目的在于传递更多信息,不拥有版权,亦不承担相应法律责任。如果您发现本社区中有涉嫌抄袭的内容,欢迎发送邮件进行举报,并提供相关证据,一经查实,本社区将立刻删除涉嫌侵权内容,举报邮箱: cloudbbs@moduyun.com

上一篇: Lamp 下一篇: 职场小白必备知识点-FTP协议
  1. 分享:
最后一次编辑于 2023年11月22日 0

暂无评论

推荐阅读
  6YY0QMPUXEwu   2023年12月10日   29   0   0 linux网卡
  Ex81gqy3LOX7   2023年12月07日   19   0   0 linux
  nIt0XG0acU8j   2023年12月11日   29   0   0 linuxhtop
  nIt0XG0acU8j   2023年12月09日   33   0   0 linuxsort