Kubernetes v1.14.0のkubelet配備

30441 ワード

kubelet準備


1、サーバー構成


対外ip
イントラネット容器ip
cpu
メモリ
ハードドライブ
hostname
192.168.4.1
172.172.2.1
8
16
200
k8s-vip-01
192.168.4.2
172.172.2.2
8
16
200
k8s-vip-02
192.168.4.3
172.172.2.3
64
256
4T
k8s-node-01
192.168.4.4
172.172.2.4
64
256
4T
k8s-node-02
192.168.4.5
172.172.2.5
64
256
4T
k8s-node-03
192.168.31.1
172.172.31.1
8
16
200
k8s-ingress-01
192.168.32.2
172.172.31.2
8
16
200
k8s-ingress-02

2、kubeletバイナリ準備

cd /apps/work/k8s/node
wget https://storage.googleapis.com/kubernetes-release/release/v1.14.0/kubernetes-node-linux-amd64.tar.gz
tar -xvf kubernetes-node-linux-amd64.tar.gz 
cd kubernetes/node
mkdir conf kubelet-plugins  log  ssl

3、bootstrap Tokenを生成する

cd /apps/work/k8s/node/kubernetes
Bootstrap Token   
echo "$(head -c 6 /dev/urandom | md5sum | head -c 6)"."$(head -c 16 /dev/urandom | md5sum | head -c 16)"
8a7988.f77fde53170b9d91
###    Bootstrap Token Secret
vi bootstrap.secret.yaml
apiVersion: v1
kind: Secret
metadata:
  # Name MUST be of form "bootstrap-token-"
  name: bootstrap-token-8a7988
  namespace: kube-system

# Type MUST be 'bootstrap.kubernetes.io/token'
type: bootstrap.kubernetes.io/token
stringData:
  # Human readable description. Optional.
  description: "The default bootstrap token generated by 'kubelet '."

  # Token ID and secret. Required.
  token-id: 8a7988
  token-secret: f77fde53170b9d91

  # Expiration. Optional. #           999 
  expiration: 2019-09-10T00:00:11Z

  # Allowed usages.
  usage-bootstrap-authentication: "true"
  usage-bootstrap-signing: "true"

  # Extra groups to authenticate the token as. Must start with "system:bootstrappers:"
  auth-extra-groups: system:bootstrappers:worker,system:bootstrappers:ingress

###   k8s  
kubectl create -f bootstrap.secret.yaml
###   bootstrap.clusterrole.yaml
vi bootstrap.clusterrole.yaml
# A ClusterRole which instructs the CSR approver to approve a node requesting a
# serving cert matching its client cert.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeserver
rules:
- apiGroups: ["certificates.k8s.io"]
  resources: ["certificatesigningrequests/selfnodeserver"]
  verbs: ["create"]

kubectl create -f bootstrap.clusterrole.yaml
###    apiserver-to-kubelet.yaml
vi apiserver-to-kubelet.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:kubernetes-to-kubelet
rules:
  - apiGroups:
      - ""
    resources:
      - nodes/proxy
      - nodes/stats
      - nodes/log
      - nodes/spec
      - nodes/metrics
    verbs:
      - "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: system:kubernetes
  namespace: ""
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:kubernetes-to-kubelet
subjects:
  - apiGroup: rbac.authorization.k8s.io
    kind: User
    name: kubernetes
kubectl create -f apiserver-to-kubelet.yaml
###      token
kubeadm token list
#    system:bootstrappers       CSR   
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --group=system:bootstrappers
#      system:bootstrappers     TLS bootstrapping         CSR   
kubectl create clusterrolebinding node-client-auto-approve-csr --clusterrole=system:certificates.k8s.io:certificatesigningrequests:nodeclient --group=system:bootstrappers
#      system:nodes       kubelet     apiserver       CSR   
kubectl create clusterrolebinding node-client-auto-renew-crt --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeclient --group=system:nodes

#      system:nodes       kubelet 10250 api       CSR   
kubectl create clusterrolebinding node-server-auto-renew-crt --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeserver --group=system:nodes

4、bootstrapを作成する.kubeconfig

cd /apps/work/k8s/node/kubernetes/node/conf
#       
kubectl config set-cluster kubernetes \
  --certificate-authority=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \
  --embed-certs=true \
  --server=https://api.k8s.niuke.local:6443 \
  --kubeconfig=bootstrap.kubeconfig
#          
kubectl config set-credentials system:bootstrap:8a7988 \
  --token=8a7988.f77fde53170b9d91 \
  --kubeconfig=bootstrap.kubeconfig
#        
kubectl config set-context default \
  --cluster=kubernetes \
  --user=system:bootstrap:8a7988 \
  --kubeconfig=bootstrap.kubeconfig
#        
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

5、kubelet特殊パラメータ説明

rotate-server-certificates=true             kubectl get csr |grep system:node | grep Pending| while read name number; do     kubectl  certificate approve  $name ; done 
 --node-labels=node-role.kubernetes.io/k8s-vip=true            

6、kubelet構成例の作成:他のノードはサブ構成を参照する

cd /apps/work/k8s/node/kubernetes/node/conf
KUBELET_OPTS="--bootstrap-kubeconfig=/apps/kubernetes/conf/bootstrap.kubeconfig \
              --fail-swap-on=false \
              --network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/apps/cni/bin \
              --kubeconfig=/apps/kubernetes/conf/kubelet.kubeconfig \
              --address=172.172.2.1 \
              --node-ip=172.172.2.1 \
              --hostname-override=k8s-vip-01 \
              --cluster-dns=10.64.0.2 \
              --cluster-domain=niuke.local \
              --authorization-mode=Webhook \
              --authentication-token-webhook=true \
              --client-ca-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem \
              --rotate-certificates=true \
              --rotate-server-certificates=true \
              --cgroup-driver=cgroupfs \
              --allow-privileged=true \
              --healthz-port=10248 \
              --healthz-bind-address=172.172.2.1 \
              --cert-dir=/apps/kubernetes/ssl \
              --feature-gates=RotateKubeletClientCertificate=true,RotateKubeletServerCertificate=true \
              --node-labels=node-role.kubernetes.io/k8s-vip=true \
              --serialize-image-pulls=false \
              --enforce-node-allocatable=pods,kube-reserved,system-reserved \
              --pod-manifest-path=/apps/work/kubernetes/manifests \
              --runtime-cgroups=/systemd/system.slice/kubelet.service \
              --kube-reserved-cgroup=/systemd/system.slice/kubelet.service \
              --system-reserved-cgroup=/systemd/system.slice \
              --root-dir=/apps/work/kubernetes/kubelet \
              --log-dir=/apps/kubernetes/log \
              --alsologtostderr=true \
              --logtostderr=false \
              --anonymous-auth=true \
              --image-gc-high-threshold=70 \
              --image-gc-low-threshold=50 \
              --kube-reserved=cpu=500m,memory=512Mi,ephemeral-storage=1Gi \
              --system-reserved=cpu=1000m,memory=1024Mi,ephemeral-storage=1Gi \
              --eviction-hard=memory.available<500Mi,nodefs.available<10% \
              --sync-frequency=30s \
              --resolv-conf=/etc/resolv.conf \
              --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0 \
              --v=2 \
              --image-pull-progress-deadline=30s \
              --event-burst=30 \
              --event-qps=15 \
              --kube-api-burst=30 \
              --kube-api-qps=15 \
              --max-pods=200 \
              --pods-per-core=10 \
              --read-only-port=0 \
              --volume-plugin-dir=/apps/kubernetes/kubelet-plugins/volume"

7、kubeletを作成する。service

cd /apps/work/k8s/node/kubernetes/
vi kubelet.service 
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service

[Service]
WorkingDirectory=/apps/work/kubernetes
LimitNOFILE=1024000
LimitNPROC=1024000
LimitCORE=infinity
LimitMEMLOCK=infinity

EnvironmentFile=-/apps/kubernetes/conf/kubelet
ExecStart=/apps/kubernetes/bin/kubelet $KUBELET_OPTS
Restart=on-failure
RestartSec=5
KillMode=process
[Install]
WantedBy=multi-user.target

8、cniプラグインの準備

mkdir /apps/work/k8s/cni
cd /apps/work/k8s/cni
wget https://github.com/containernetworking/plugins/releases/download/v0.7.1/cni-plugins-amd64-v0.7.1.tgz
tar -xvf cni-plugins-amd64-v0.7.1.tgz
rm -rf cni-plugins-amd64-v0.7.1.tgz
mkdir bin
mv * bin
###   10-kuberouter.conf   
mkdir -p cni/net.d
cd cni/net.d
vi 10-kuberouter.conf
{
  "name":"kubernetes",
  "type":"bridge",
  "bridge":"kube-bridge",
  "isDefaultGateway":true,
    "hairpinMode":true,
  "ipam": {
    "type":"host-local"
  }
}
#####   hostport     
10-kuberouter.conflist
{
      "cniVersion":"0.3.0",
       "name":"mynet",
       "plugins":[
          {
             "name":"kubernetes",
             "type":"bridge",
             "bridge":"kube-bridge",
             "isDefaultGateway":true,
             "hairpinMode":true,
             "ipam":{
                "type":"host-local"
             }
          },
          {
             "type":"portmap",
             "capabilities":{
                "snat":true,
                "portMappings":true
             }
          }
       ]
    }

9、lxcfsはインストール準備の文章の中ですでにコンパイルして生成しました


ここでは書類配布のみ

10、インストール依存nodeノード実行

yum install -y  epel-release
yum install -y   yum-utils  ipvsadm  telnet  wget  net-tools  conntrack  ipset  jq  iptables  curl  sysstat  libseccomp  socat  nfs-utils  fuse  fuse-devel  ceph-common
ansible -i host node ingress vip  -m shell -a "yum install -y  epel-release"
ansible -i host node ingress vip  -m shell -a "yum install -y   yum-utils  ipvsadm  telnet  wget  net-tools  conntrack  ipset  jq  iptables  curl  sysstat  libseccomp  socat  nfs-utils  fuse  fuse-devel  ceph-common"

11、cp k8s-ca.pem

/apps/work/k8s/node/kubernetes/node/ssl
mkdir k8s
cp -pdr /apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem ./k8s/

12、lxcfsを配布して起動する

cd /apps/work/k8s/binlxfs
ansible -i /apps/work/k8s/host node ingress vip -m copy -a "src=lxcfs  dest=/usr/local/bin/lxcfs owner=root group=root mode=755"
ansible -i /apps/work/k8s/host node ingress vip -m copy -a "src=lxcfs.service dest=/usr/lib/systemd/system/lxcfs.service" 
ansible -i /apps/work/k8s/host node ingress vip -m copy -a "src=lib dest=/usr/local/" 
ansible -i /apps/work/k8s/host node ingress vip -m shell -a "mkdir -p /var/lib/lxcfs/"
ansible -i /apps/work/k8s/host node ingress vip -m shell -a "systemctl daemon-reload && systemctl start lxcfs && systemctl enable lxcfs"

13、docker nodeノードのインストール

     
cat > /etc/yum.repos.d/docker-ce.repo << EOF
[docker-ce-stable]
name=Docker CE Stable - \$basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/\$basearch/stable
enabled=1
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-stable-debuginfo]
name=Docker CE Stable - Debuginfo \$basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/debug-\$basearch/stable
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-stable-source]
name=Docker CE Stable - Sources
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/source/stable
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-edge]
name=Docker CE Edge - \$basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/\$basearch/edge
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-edge-debuginfo]
name=Docker CE Edge - Debuginfo \$basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/debug-\$basearch/edge
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-edge-source]
name=Docker CE Edge - Sources
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/source/edge
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-test]
name=Docker CE Test - \$basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/\$basearch/test
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-test-debuginfo]
name=Docker CE Test - Debuginfo \$basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/debug-\$basearch/test
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-test-source]
name=Docker CE Test - Sources
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/source/test
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-nightly]
name=Docker CE Nightly - \$basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/\$basearch/nightly
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-nightly-debuginfo]
name=Docker CE Nightly - Debuginfo \$basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/debug-\$basearch/nightly
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-nightly-source]
name=Docker CE Nightly - Sources
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/source/nightly
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
EOF

###   docker   
yum install -y    python-pip python-devel yum-utils device-mapper-persistent-data lvm2   
##   docker
yum install -y docker-ce
###   /lib/systemd/system/docker.service
ExecStart=/usr/bin/dockerd -H fd:// --graph /apps/docker  -H unix:///var/run/docker.sock  --max-concurrent-downloads=20 --log-opt max-size=20M --log-opt max-file=10 --default-ulimit nofile=1024000 --default-ulimit nproc=1024000
systemctl daemon-reload
systemctl restart docker
systemctl enable docker

14、cniをnodeノードに配布する

cd /apps/work/k8s/cni
    cni   
    ansible -i /apps/work/k8s/host node ingress vip -m shell -a "mkdir -p /apps/cni"
      cni bin  
    ansible -i /apps/work/k8s/host node ingress vip -m copy -a "src=bin dest=/apps/cni/  owner=root group=root mode=755"
            
    ansible-i /apps/work/k8s/host node ingress vip -m copy -a "src=cni dest=/etc/"

15、クbeletをnodeノードに配布する

cd /apps/work/k8s/node/kubernetes
ansible -i /apps/work/k8s/host node ingress vip -m shell -a "mkdir -p /apps/kubernetes/kubelet-plugins/volume"
ansible -i /apps/work/k8s/host node ingress vip -m copy -a "src=node/ dest=/apps/kubernetes/"
ansible -i /apps/work/k8s/host node ingress vip -m shell -a "mkdir -p /apps/work/kubernetes/{kubelet,manifests}"
ansible -i /apps/work/k8s/host node ingress vip -m shell -a "chmod u+x /apps/kubernetes/bin/*"
ansible -i /apps/work/k8s/host node ingress vip -m copy -a "src=kubelet.service dest=/usr/lib/systemd/system/"
  :node /apps/kubernetes/conf/kubelet      ip              

16、クbeletを起動する

ansible -i /apps/work/k8s/host node ingress vip -m shell -a "systemctl daemon-reload"
ansible -i /apps/work/k8s/host node ingress vip -m shell -a " systemctl enable kubelet"
ansible -i /apps/work/k8s/host node ingress vip -m shell -a " systemctl start kubelet"
ansible -i /apps/work/k8s/host node ingress vip -m shell -a " systemctl status kubelet"

17、ノードが正常に手動でkubeletサーバー証明書を発行しているかどうかを確認する

kubectl get csr
[root@jenkins k8s]# kubectl get csr
NAME        AGE     REQUESTOR                 CONDITION
csr-l9tkn   119s    system:node:node04        Pending        #         
csr-zjvrx   2m11s   system:bootstrap:8a7988   Approved,Issued #           
  kubectl      rotate-server-certificates=true
kubectl get csr |grep system:node | grep Pending| while read name number; do     kubectl  certificate approve  $name ; done
[root@jenkins k8s]# kubectl get csr |grep system:node | grep Pending| while read name number; do     kubectl  certificate approve  $name ; done
certificatesigningrequest.certificates.k8s.io/csr-l9tkn approved
[root@jenkins k8s]# kubectl get csr
NAME        AGE     REQUESTOR                 CONDITION
csr-l9tkn   4m3s    system:node:node04        Approved,Issued
csr-zjvrx   4m15s   system:bootstrap:8a7988   Approved,Issued
        
[root@node04 ssl]# ll
    8
drwxr-xr-x 2 root root   24 6   24 16:25 k8s
-rw------- 1 root root 1273 6   24 16:38 kubelet-client-2019-06-24-16-38-43.pem
lrwxrwxrwx 1 root root   59 6   24 16:38 kubelet-client-current.pem -> /apps/kubernetes/ssl/kubelet-client-2019-06-24-16-38-43.pem
-rw------- 1 root root 1309 6   24 16:42 kubelet-server-2019-06-24-16-42-46.pem
lrwxrwxrwx 1 root root   59 6   24 16:42 kubelet-server-current.pem -> /apps/kubernetes/ssl/kubelet-server-2019-06-24-16-42-46.pem
[root@node04 ssl]# 
kubectl get  node        
[root@jenkins tasks]# kubectl get  node
NAME           STATUS  ROLES          AGE   VERSION
k8s-vip-01       Ready   k8s-vip            26d    v1.14.0   
k8s-vip-02       Ready   k8s-vip            26d    v1.14.0     
k8s-node-01    Ready   k8s-node        26d    v1.14.0
k8s-node-02    Ready   k8s-node        26d    v1.14.0
k8s-node-03    Ready   k8s-node        26d    v1.14.0
k8s-ingress-01 Ready   k8s-ingress    26d    v1.14.0
k8s-ingress-02 Ready   k8s-ingress    26d    v1.14.0
###         
cd /apps/kubernetes/ssl
ll
[root@k8s-vip-01 ssl]# ll
total 12
drwxr-xr-x 2 k8s  root 4096 Apr 23 09:22 k8s
-rw------- 1 root root 1273 May  5 14:25 kubelet-client-2019-05-05-14-25-10.pem
lrwxrwxrwx 1 root root   59 May  5 14:25 kubelet-client-current.pem -> /apps/kubernetes/ssl/kubelet-client-2019-05-05-14-25-10.pem
-rw------- 1 root root 1309 May  5 15:06 kubelet-server-2019-05-05-15-06-57.pem
lrwxrwxrwx 1 root root   59 May  5 15:06 kubelet-server-current.pem -> /apps/kubernetes/ssl/kubelet-server-2019-05-05-15-06-57.pem

18、nodeノードplaybook

.
├── cni
│   ├── defaults
│   ├── files
│   │   ├── 10-kuberouter.conf
│   │   └── bin
│   │       ├── bridge
│   │       ├── dhcp
│   │       ├── flannel
│   │       ├── host-device
│   │       ├── host-local
│   │       ├── ipvlan
│   │       ├── loopback
│   │       ├── macvlan
│   │       ├── portmap
│   │       ├── ptp
│   │       ├── sample
│   │       ├── tuning
│   │       └── vlan
│   ├── handlers
│   ├── meta
│   ├── tasks
│   │   └── main.yml
│   ├── templates
│   └── vars
├── docker_client
│   ├── defaults
│   ├── files
│   │   ├── docker-compose
│   │   ├── docker-enter
│   │   └── docker-enter.old
│   ├── handlers
│   ├── meta
│   ├── tasks
│   │   └── main.yml
│   ├── templates
│   │   └── docker.repo
│   └── vars
├── kubelet
│   ├── defaults
│   ├── files
│   │   ├── bin
│   │   │   ├── kubeadm
│   │   │   ├── kubectl
│   │   │   ├── kubelet
│   │   │   └── kube-proxy
│   │   └── ssl
│   │       └── k8s
│   │           └── k8s-ca.pem
│   ├── handlers
│   ├── meta
│   ├── tasks
│   │   └── main.yml
│   ├── templates
│   │   ├── conf
│   │   │   ├── bootstrap.kubeconfig
│   │   │   └── kubelet
│   │   └── kubelet.service
│   └── vars
├── lxcfs
│   ├── defaults
│   ├── files
│   │   ├── lib
│   │   │   └── lxcfs
│   │   │       ├── liblxcfs.la
│   │   │       └── liblxcfs.so
│   │   ├── lxcfs
│   │   └── lxcfs.service
│   ├── handlers
│   ├── meta
│   ├── tasks
│   │   └── main.yml
│   ├── templates
│   └── vars
└── rpm
    ├── defaults
    ├── files
    ├── handlers
    ├── meta
    ├── tasks
    │   └── main.yml
    ├── templates
    └── vars

18.1、rpm

- name: Yum Install
  yum: name="{{ item }}" state=latest
  with_items:
      - yum-plugin-fastestmirror
      - epel-release
  become: yes
  become_method: su
- name: rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
  raw: rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
- name: rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-2.el7.elrepo.noarch.rpm
  raw: rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-2.el7.elrepo.noarch.rpm
- name: yum -y --enablerepo=elrepo-kernel install kernel-ml
  shell: yum -y --enablerepo=elrepo-kernel install kernel-ml
- name: grub2-set-default 0
  shell: grub2-set-default 0
- name: grub2-mkconfig -o /boot/grub2/grub.cfg
  shell: grub2-mkconfig -o /boot/grub2/grub.cfg
- name: Yum Install
  yum: name="{{ item }}" state=latest
  with_items:
      - yum-utils
      - ipvsadm
      - telnet
      - wget
      - net-tools
      - conntrack
      - ipset
      - jq
      - iptables
      - curl
      - sysstat
      - libseccomp
      - socat
      - nfs-utils
      - fuse
      - fuse-devel
      - ceph-common
  become: yes
  become_method: su

18.2、lxcfs

- name: copy /usr/local/lib/lxcfs
  copy: src=lib dest=/usr/local/
- name: up lxcfs
  copy: src=lxcfs dest=/usr/local/bin/lxcfs owner=root group=root mode=755
- name: up lxcfs.service
  copy: src=lxcfs.service dest=/usr/lib/systemd/system/lxcfs.service
- name: create /var/lib/lxcfs
  shell: mkdir -p /var/lib/lxcfs
- name: systemctl daemon-reload
  shell: systemctl daemon-reload
- name: systemctl enable lxcfs
  shell: systemctl enable lxcfs
- name: systemctl start lxcfs 
  shell: systemctl start lxcfs

18.3、cni

- name: create cni
  shell: mkdir -p {{ k8s_path }}/cni
- name: copy to cni
  copy: src=bin dest={{ k8s_path }}/cni/ owner=root group=root mode=755
- name: create /etc/cni/net.d
  shell: mkdir -p /etc/cni/net.d
- name: copy 10-kuberouter.conf
  copy: src=10-kuberouter.conf dest=/etc/cni/net.d 

18.4 docker

- name: yum epel-release
  yum: name=epel-release state=present
- name: yum python-pip
  yum: name={{ item }} state=present
  with_items: 
      - python-pip
      - python-devel
      - yum-utils
      - device-mapper-persistent-data
      - lvm2     
- pip: name={{ item }}
  with_items:
     - docker-py
- stat: path=/usr/bin/docker
  register: docker_path_register
- name: yum old docker
  yum: name=docker* state=removed
  when: docker_path_register.stat.exists == True
- name: cp  docker.repo client
  template: src=docker.repo dest=/etc/yum.repos.d/docker.repo
- name: yum install docker
  yum: name=docker-ce state=present
- lineinfile: 
    dest: /lib/systemd/system/docker.service
    regexp: '^ExecStart='
    line: 'ExecStart=/usr/bin/dockerd -H fd:// --graph {{ graph }} -H unix:///var/run/docker.sock  --max-concurrent-downloads=20 --log-opt max-size=20M --log-opt max-file=10 --default-ulimit nofile=1024000 --default-ulimit nproc=1024000'
- name: systemctl daemon-reload
  shell: systemctl daemon-reload
- name: enabled service docker.service
  service: name=docker.service enabled=yes
- name: start  service docker.service
  service: name=docker  state=started
- name: cp docker-compose
  copy: src=docker-compose  dest=/usr/bin/docker-compose owner=root group=root mode=755
- name: start  service docker.service
  service: name=docker  state=restarted

18.5 kubelet

- name: create {{ k8s_path }}/kubernetes/{log,kubelet-plugins,conf}
  shell: mkdir -p {{ k8s_path }}/kubernetes/{log,kubelet-plugins,conf} && mkdir -p {{ k8s_path }}/work/kubernetes/manifests 
- name: copy kubelet to {{ k8s_path }}/kubernetes
  copy: src=bin dest={{ k8s_path }}/kubernetes/ owner=root group=root mode=755
- name: copy kubelet ssl
  copy: src=ssl dest={{ k8s_path }}/kubernetes/
- name: copy to kubelet config
  template: src=conf/{{ item }} dest={{ k8s_path }}/kubernetes/conf
  with_items:
      - kubelet
      - bootstrap.kubeconfig
- name:  copy to kubelet service
  template: src={{ item }} dest=/usr/lib/systemd/system/
  with_items:
      - kubelet.service
- name: systemctl daemon-reload 
  shell: systemctl daemon-reload
- name: systemctl enable kubelet
  shell: systemctl enable kubelet &&  systemctl start kubelet

    ###### kubelet.service
    [Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service

[Service]
LimitNOFILE=1024000
LimitNPROC=1024000
LimitCORE=infinity
LimitMEMLOCK=infinity
EnvironmentFile=-{{ k8s_path }}/kubernetes/conf/kubelet
ExecStart={{ k8s_path }}/kubernetes/bin/kubelet $KUBELET_OPTS
Restart=on-failure
KillMode=process
[Install]
WantedBy=multi-user.target
####### kubelet
KUBELET_OPTS="--bootstrap-kubeconfig={{ k8s_path }}/kubernetes/conf/bootstrap.kubeconfig \
              --fail-swap-on=false \
              --network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir={{ k8s_path }}/cni/bin \
              --kubeconfig={{ k8s_path }}/kubernetes/conf/kubelet.kubeconfig \
              --address={{ ansible_eth1.ipv4.address }} \
              --node-ip={{ ansible_eth1.ipv4.address }} \
              --hostname-override={{ ansible_hostname }} \
              --cluster-dns={{ k8s_dns }} \
              --cluster-domain={{ k8s_domain }} \
              --authorization-mode=Webhook \
              --authentication-token-webhook=true \
              --client-ca-file={{ k8s_path }}/kubernetes/ssl/k8s/k8s-ca.pem \
              --rotate-certificates=true \
              --rotate-server-certificates=true \
              --cgroup-driver=cgroupfs \
              --allow-privileged=true \
              --healthz-port=10248 \
              --healthz-bind-address={{ ansible_eth1.ipv4.address }} \
              --cert-dir={{ k8s_path }}/kubernetes/ssl \
              --feature-gates=RotateKubeletClientCertificate=true,RotateKubeletServerCertificate=true \
              --node-labels=node-role.kubernetes.io/{{ k8s_node }}=true \
              --serialize-image-pulls=false \
              --enforce-node-allocatable=pods,kube-reserved,system-reserved \
              --pod-manifest-path={{ k8s_path }}/work/kubernetes/manifests \
              --runtime-cgroups=/systemd/system.slice/kubelet.service \
              --kube-reserved-cgroup=/systemd/system.slice/kubelet.service \
              --system-reserved-cgroup=/systemd/system.slice \
              --root-dir={{ k8s_path }}/work/kubernetes/kubelet \
              --log-dir={{ k8s_path }}/kubernetes/log \
              --alsologtostderr=true \
              --logtostderr=false \
              --anonymous-auth=true \
              --image-gc-high-threshold=70 \
              --image-gc-low-threshold=50 \
              --kube-reserved=cpu=500m,memory=512Mi,ephemeral-storage=1Gi \
              --system-reserved=cpu=1000m,memory=1024Mi,ephemeral-storage=1Gi \
              --eviction-hard=memory.available<500Mi,nodefs.available<10% \
              --serialize-image-pulls=false \
              --sync-frequency=30s \
              --resolv-conf=/etc/resolv.conf \
              --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0 \
              --image-pull-progress-deadline=30s \
              --v={{ level_log }} \
              --event-burst=30 \
              --event-qps=15 \
              --kube-api-burst=30 \
              --kube-api-qps=15 \
              --max-pods=200 \
              --pods-per-core=10 \
              --read-only-port=0 \
              --volume-plugin-dir={{ k8s_path }}/kubernetes/kubelet-plugins/volume"

18.6

cd /apps/work/k8s

cat site.yml
- hosts: all
  user: root
  vars:
    k8s_path: /apps
    k8s_dns: 10.64.0.2
    k8s_domain: niuke.local
    cluster_cidr: 10.48.0.0/12
    level_log: 2
    graph: "/apps/docker"
  roles:
    - cni
    - lxcfs
    - docker_client
    - kubelet

18.7 host

[vip]
192.168.4.1
192.168.4.2
[node]
192.168.4.3
192.168.4.4
192.168.4.5
[ingress]
192.168.31.1
192.168.32.2
[vip:vars]
k8s_node=k8s-vip
[node:vars]
k8s_node=k8s-node
[ingress:vars]
k8s_node=k8s-ingress

19.podpreset修正タイムゾーンの作成とlxcfsのマウント

cd /apps/work/k8s
vi allow-lxcfs-tz-env.yaml
apiVersion: settings.k8s.io/v1alpha1
kind: PodPreset
metadata:
  name: allow-lxcfs-tz-env
spec:
  selector:
    matchLabels:
  volumeMounts:
    - mountPath: /proc/cpuinfo
      name: proc-cpuinfo
    - mountPath: /proc/diskstats
      name: proc-diskstats
    - mountPath: /proc/meminfo
      name: proc-meminfo
    - mountPath: /proc/stat
      name: proc-stat
    - mountPath: /proc/swaps
      name: proc-swaps
    - mountPath: /proc/uptime
      name: proc-uptime
    - mountPath: /etc/localtime
      name: allow-tz-env

  volumes:
    - name: proc-cpuinfo
      hostPath:
        path: /var/lib/lxcfs/proc/cpuinfo
    - name: proc-diskstats
      hostPath:
        path: /var/lib/lxcfs/proc/diskstats
    - name: proc-meminfo
      hostPath:
        path: /var/lib/lxcfs/proc/meminfo
    - name: proc-stat
      hostPath:
        path: /var/lib/lxcfs/proc/stat
    - name: proc-swaps
      hostPath:
        path: /var/lib/lxcfs/proc/swaps
    - name: proc-uptime
      hostPath:
        path: /var/lib/lxcfs/proc/uptime
    - name: allow-tz-env
      hostPath:
        path: /usr/share/zoneinfo/Asia/Shanghai
       ###  allow-lxcfs-tz-env.yaml 
                kubectl apply -f allow-lxcfs-tz-env.yaml 
                ####     namespaces     
                kubectl apply -f allow-lxcfs-tz-env.yaml -n kube-system

上一篇:Kubernetes生产环境インストール配置Kubernetes v 1.14.0のkube-router配備