ARMアーキテクチャサーバ(フライングプラットフォーム)centos 7.5 yumインストールk 8 sチュートリアル
23899 ワード
1インストール環境
[root@k8s-master ~]# uname -a
Linux k8s-master 4.14.0-49.12.ts7.aarch64 #1 SMP Tue Nov 12 19:06:54 CST 2019 aarch64 aarch64 aarch64 GNU/Linux
[root@k8s-master ~]# cat /etc/redhat-release
TongyuanOS release 7.5.1810
ホスト
IP
機能
k8s-master
192.168.0.239
Master
k8s-node1
192.168.0.244
node
2マスターとnodeのhostsファイルを変更する
[root@k8s-master ~]# vim /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
10.2.152.78 k8s-master
10.2.152.72 k8s-node1
3 ntpをインストールしてすべてのサーバ間の時間同期を実現する
[root@k8s-master ~]# yum install ntp -y
Loaded plugins: fastestmirror, langpacks
Loading mirror speeds from cached hostfile
* epel: mirrors.tuna.tsinghua.edu.cn
file:///mnt/repodata/repomd.xml: [Errno 14] curl#37 - "Couldn't open file /mnt/repodata/repomd.xml"
Trying other mirror.
Package ntp-4.2.6p5-28.ts7.aarch64 already installed and latest version
Nothing to do
[root@k8s-master ~]# vim /etc/ntp.conf
# For more information about this file, see the man pages
# ntp.conf(5), ntp_acc(5), ntp_auth(5), ntp_clock(5), ntp_misc(5), ntp_mon(5).
driftfile /var/lib/ntp/drift
# Permit time synchronization with our time source, but do not
# permit the source to query or modify the service on this system.
restrict default nomodify notrap nopeer noquery
# Permit all access over the loopback interface. This could
# be tightened as well, but to do so would effect some of
# the administrative functions.
restrict 127.0.0.1
restrict ::1
# Hosts on local network are less restricted.
#restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap
# Use public servers from the pool.ntp.org project.
# Please consider joining the pool (http://www.pool.ntp.org/join.html).
server 192.168.0.244 iburst
#server 0.centos.pool.ntp.org iburst
#server 1.centos.pool.ntp.org iburst
#server 2.centos.pool.ntp.org iburst
#server 3.centos.pool.ntp.org iburst
#broadcast 192.168.1.255 autokey # broadcast server
#broadcastclient # broadcast client
#broadcast 224.0.1.1 autokey # multicast server
#multicastclient 224.0.1.1 # multicast client
#manycastserver 239.255.254.254 # manycast server
#manycastclient 239.255.254.254 autokey # manycast client
# Enable public key cryptography.
#crypto
includefile /etc/ntp/crypto/pw
# Key file containing the keys and key identifiers used when operating
# with symmetric key cryptography.
keys /etc/ntp/keys
# Specify the key identifiers which are trusted.
#trustedkey 4 8 42
# Specify the key identifier to use with the ntpdc utility.
#requestkey 8
# Specify the key identifier to use with the ntpq utility.
#controlkey 8
# Enable writing of statistics records.
#statistics clockstats cryptostats loopstats peerstats
# Disable the monitoring facility to prevent amplification attacks using ntpdc
# monlist command when default restrict does not include the noquery flag. See
# CVE-2013-5211 for more details.
# Note: Monitoring will not be disabled with the limited restriction flag.
disable monitor
[root@k8s-master ~]#
4マスターとnodeのファイアウォールとselinuxを閉じる
[root@k8s-master ~]# systemctl stop firewalld
[root@k8s-master ~]# systemctl disable firewalld
[root@k8s-master ~]# vim /etc/selinux/config
# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
# enforcing - SELinux security policy is enforced.
# permissive - SELinux prints warnings instead of enforcing.
# disabled - No SELinux policy is loaded.
SELINUX=disabled
# SELINUXTYPE= can take one of three two values:
# targeted - Targeted processes are protected,
# minimum - Modification of targeted policy. Only selected processes are protected.
# mls - Multi Level Security protection.
SELINUXTYPE=targeted
[root@k8s-master ~]# reboot
主にSELINUX=disabledを表示し、SELINUX=eabledをdisabledに変更する必要がある場合
swapをオフにする
swapoff -a
# swap , swap
vim /etc/fstab
5 masterとnodeにdockerを取り付ける
依存パッケージのインストール
yum install -y yum-utils device-mapper-persistent-data lvm2
dockerパッケージのyumソースを追加
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
テストバージョンlistを閉じます(安定版のみ表示)
yum-config-manager --enable docker-ce-edge
yum-config-manager --enable docker-ce-test
yumパッケージインデックスの更新
yum makecache fast
dockerのインストール
ダイレクトインストールDocker CE
yum install docker-ce
指定バージョンのDocker CEのインストール
yum list docker-ce --showduplicates|sort -r #
yum install docker-ce-18.06.0.ce -y # docker
systemctl start docker & systemctl enable docker
Error
Transaction check error:
file /usr/bin/docker from install of docker-ce-18.06.0.ce-3.el7.centos.aarch64 conflicts with file from package docker-ce-cli-1:18.09.7-3.el7.aarch64
file /usr/share/bash-completion/completions/docker from install of docker-ce-18.06.0.ce-3.el7.centos.aarch64 conflicts with file from package docker-ce-cli-1:18.09.7-3.el7.aarch64
file /usr/share/man/man1/docker-attach.1.gz from install of docker-ce-18.06.0.ce-3.el7.centos.aarch64 conflicts with file from package docker-ce-cli-1:18.09.7-3.el7.aarch64
古いバージョンのdockerパッケージをアンインストール
yum erase docker-ce-cli-1:18.09.7-3.el7.aarch64
dockerの再インストール
Error
dockerのインストールが完了したら、dockerバージョンを「docker version」で表示します.
Cannot connect to the Docker daemon at tcp://0.0.0.0:2375. Is the docker daemon running?
解決策:DOCKER_の構成HOST
vim /etc/profile.d/docker.sh
#
export DOCKER_HOST=tcp://localhost:2375
#
source /etc/profile
#
vim /lib/systemd/system/docker.service
#
ExecStart=/usr/bin/dockerd
#
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock -H tcp://0.0.0.0:7654
# :2375 ;7654
#
systemctl daemon-reload
systemctl restart docker.service
「docker version」を再度実行してdockerプログラムを表示します.
masterとnodeにk 8 sをインストール
yumソースをアリソースに変更
vim /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-aarch64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
exclude=kube*
yum取付k 8 s
yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
#
yum install -y kubelet-1.18.1 kubeadm-1.18.1 kubectl-1.18.1 --disableexcludes=kubernetes
k 8 sサービスの開始
systemctl enable kubelet && systemctl start kubelet
バージョン番号の表示
kubeadm version
kubeadm version: &version.Info{Major:"1", Minor:"11", GitVersion:"v1.11.2", GitCommit:"bb9ffb1654d4a729bb4cec18ff088eacc153c239", GitTreeState:"clean", BuildDate:"2018-08-07T23:14:39Z", GoVersion:"go1.10.3", Compiler:"gc", Platform:"linux/arm64"}
iptableの設定
vim /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness=0
#
sysctl --system
etcdとflannelのインストール(masterにetcd+flannelをインストールし、nodeにはflannelのみをインストール)
yum -y install etcd
systemctl start etcd;systemctl enable etc
yum -y install flannel
マスター上でミラーを初期化する
[root@k8s-master ~]#kubeadm init --kubernetes-version=v1.18.1 --pod-network-cidr=10.244.0.0/16 --service-cidr=10.1.0.0/16 --apiserver-advertise-address=192.168.0.239
W0602 17:15:06.331923 14191 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
[init] Using Kubernetes version: v1.18.1
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.0.239]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master localhost] and IPs [192.168.0.239 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master localhost] and IPs [192.168.0.239 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
W0602 17:15:20.993249 14191 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
[control-plane] Creating static Pod manifest for "kube-scheduler"
W0602 17:15:20.995775 14191 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 21.003198 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.18" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node k8s-master as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node k8s-master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: vyd29d.an9fg2qjn1mld1oy
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.0.239:6443 --token vyd29d.an9fg2qjn1mld1oy \
--discovery-token-ca-cert-hash sha256:5c09859a5a681d8ddf362590effb580b433f7173787e6819482a165386aa216b
[root@k8s-master ~]#
実行中にミラーをプルできない場合はarm 64バージョンのミラーを手動でプルします.
docker pull quay-mirror.qiniu.com/coreos/flannel:v0.12.0-arm64
docker tag quay-mirror.qiniu.com/coreos/flannel:v0.12.0-arm64 quay.io/coreos/flannel:v0.12.0-arm64
docker rmi quay-mirror.qiniu.com/coreos/flannel:v0.12.0-arm64
docker pull mirrorgcrio/kube-apiserver-arm64:v1.18.1
docker tag mirrorgcrio/kube-apiserver-arm64:v1.18.1 k8s.gcr.io/kube-apiserver:v1.18.1
docker rmi mirrorgcrio/kube-apiserver-arm64:v1.18.1
docker pull mirrorgcrio/kube-controller-manager-arm64:v1.18.1
docker tag mirrorgcrio/kube-controller-manager-arm64:v1.18.1 k8s.gcr.io/kube-controller-manager:v1.18.1
docker rmi mirrorgcrio/kube-controller-manager-arm64:v1.18.1
docker pull mirrorgcrio/kube-scheduler-arm64:v1.18.1
docker tag mirrorgcrio/kube-scheduler-arm64:v1.18.1 k8s.gcr.io/kube-scheduler:v1.18.1
docker rmi mirrorgcrio/kube-scheduler-arm64:v1.18.1
docker pull mirrorgcrio/kube-proxy-arm64:v1.18.1
docker tag mirrorgcrio/kube-proxy-arm64:v1.18.1 k8s.gcr.io/kube-proxy:v1.18.1
docker rmi mirrorgcrio/kube-proxy-arm64:v1.18.1
docker pull mirrorgcrio/etcd-arm64:3.4.3-0
docker tag mirrorgcrio/etcd-arm64:3.4.3-0 k8s.gcr.io/etcd:3.4.3-0
docker rmi mirrorgcrio/etcd-arm64:3.4.3-0
docker pull mirrorgcrio/pause-arm64:3.2
docker tag mirrorgcrio/pause-arm64:3.2 k8s.gcr.io/pause:3.2
docker rmi mirrorgcrio/pause-arm64:3.2
docker pull coredns/coredns:coredns-arm64
docker tag coredns/coredns:coredns-arm64 k8s.gcr.io/coredns:coredns-arm64
docker rmi coredns/coredns:coredns-arm64
docker pull mirrorgcrio/metrics-server-arm64:v0.3.6
docker tag mirrorgcrio/metrics-server-arm64:v0.3.6 k8s.gcr.io/metrics-server-arm64:v0.3.6
docker rmi mirrorgcrio/metrics-server-arm64:v0.3.6
出力を実行します.
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
クラスタマスターノードのインストールに成功しました.このコマンドを保存して、各ノードがクラスタに参加するようにしてください.You can now join any number of machines by running the following on each nodeas root:
kubeadm join 192.168.0.239:6443 --token r0y84o.kcmv4dumghku67cj \
--discovery-token-ca-cert-hash sha256:e09ecb1421e7370c473b8ac56d1f6a993afafdb9c27729e4889422781d3c51d3
kubetl認証情報の構成
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile
source ~/.bash_profile
export KUBECONFIG=/etc/kubernetes/admin.conf
source ~/.bash_profile
クラスタのステータスを確認し、コンポーネントがhealthy状態であることを確認します.
kubectl get cs
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
scheduler Healthy ok
etcd-0 Healthy {"health":"true"}
flannelネットワークの構成
kdir -p ~/k8s/
cd ~/k8s
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubectl apply -f kube-flannel.yml
場合https://raw.githubusercontent.comアクセスできません:https://site.ip138.com/raw.Githubusercontent.com/入力raw.githubusercontent.comはIPアドレスを検索してhosts Ubuntuを修正して、CentOSとmacOSは直接端末で入力します
sudo vi /etc/hosts
151.101.76.133 raw.githubusercontent.com
コマンドの実行結果は次のとおりです.
kubectl apply -f kube-flannel.yml
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds-amd64 created
daemonset.apps/kube-flannel-ds-arm64 created
daemonset.apps/kube-flannel-ds-arm created
daemonset.apps/kube-flannel-ds-ppc64le created
daemonset.apps/kube-flannel-ds-s390x created
Dashboardプラグインの配備
Dashboardプラグインプロファイルのダウンロード
#node
docker pull kubernetesui/dashboard:v2.0.0
docker pull kubernetesui/metrics-scraper:v1.0.4
#
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0/aio/deploy/recommended.yaml
kubernetes-dashboardを編集します.yamlファイル、Dashboard Serviceにtype:NodePortを追加し、Dashboardサービスを暴露
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
type: NodePort
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
コマンドの実行
[root@k8s-master k8s]# kubectl create -f recommended.yaml
namespace/kubernetes-dashboard created
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created
saを作成し、デフォルトのcluster-admin管理者クラスタロールをバインドします.
kubectl create serviceaccount dashboard-admin -n kubernetes-dashboard
kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:dashboard-admin
kubernetes-dashboardにログイン:
kubectl get secret -n kubernetes-dashboard
kubectl describe secret dashboard-admin-token-bwdjj -n kubernetes-dashboard
注:kubernetes-dashboardコマンドの表示:
kubectl --namespace=kubernetes-dashboard get service kubernetes-dashboard
Googleブラウザがkubernetes dashboardを開くことができない方法を解決します
コマンドの実行
mkdir key && cd key
証明書の生成
openssl genrsa -out dashboard.key 2048
openssl req -new -out dashboard.csr -key dashboard.key -subj '/CN=192.168.246.200'
openssl x509 -req -in dashboard.csr -signkey dashboard.key -out dashboard.crt
既存の証明書secretを削除
kubectl delete secret kubernetes-dashboard-certs -n kubernetes-dashboard
新しい証明書secretの作成
kubectl create secret generic kubernetes-dashboard-certs --from-file=dashboard.key --from-file=dashboard.crt -n kubernetes-dashboard
podの表示
kubectl get pod -n kubernetes-dashboard
再起動pod
kubectl delete pod kubernetes-dashboard-7b544877d5-d76kd -n kubernetes-dashboard
metrics serverの配備
プロファイルのダウンロード
wget https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.3.6/components.yaml
ファイルの内容の変更
[root@k8s-master k8s]# vi components.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: metrics-server
namespace: kube-system
labels:
k8s-app: metrics-server
spec:
selector:
matchLabels:
k8s-app: metrics-server
template:
metadata:
name: metrics-server
labels:
k8s-app: metrics-server
spec:
serviceAccountName: metrics-server
volumes:
# mount in tmp so we can safely use from-scratch images and/or read-only containers
- name: tmp-dir
emptyDir: {}
hostNetwork: true #
containers:
- name: metrics-server
image: k8s.gcr.io/metrics-server-arm64:v0.3.6
imagePullPolicy: IfNotPresent
command: #
- /metrics-server #
- --metric-resolution=30s #
- --requestheader-allowed-names=aggregator #
- --kubelet-insecure-tls #
- --kubelet-preferred-address-types=InternalDNS,InternalIP,ExternalDNS,ExternalIP,Hostname #
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:metrics-server
labels: #
kubernetes.io/cluster-service: "true" #
addonmanager.kubernetes.io/mode: Reconcile #
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
- namespaces
- configmaps
verbs:
- get
- list
- watch
に贈り物を
arm 64のソフトウェアソース
国産オペレーティングシステムは一般的に比較的成熟したubuntuまたはcentosに基づいており、これらのリリース版のarm 64派生版であるため、x 86上のubuntuまたはcentosとは操作上あまり差がなく、唯一の違いはソフトウェアソースが異なる可能性がある.
一般的なarm 64ソフトウェアのソースアドレスは次のとおりです.
centosのarm 64 yumソースアドレスは:https://mirrors.aliyun.com/centos-altarch/
ubuntuのarm 64 aptソースアドレスは:https://mirrors.aliyun.com/ubuntu-ports/
epelのarm 64 yumソースアドレスは:https://mirrors.aliyun.com/epel
yumソース、aptソースの構成方法はネット上のドキュメントを参考にすればいいです.
実は多くの常用ソフトウェアはarm 64のソフトウェアソースを持っていて、見てみましょうhttps://opsx.alibaba.com/mirrorああ、ソフトウェアソースにはaarch 64などのディレクトリがあり、arm 64ハードウェアアーキテクチャをサポートするソフトウェアソースです.
k 8 sはarm 64アーキテクチャをサポートする
実はk 8 sがarm 64をサポートするのは比較的簡単で、Go言語でプラットフォームをまたいでクロスコンパイルするのは簡単なため、k 8 sコアのいくつかのバイナリファイルとdockerミラーはarm 64アーキテクチャがあり、正常に配置されたk 8 sクラスタの中でこれらのバイナリファイルをarm 64アーキテクチャに置き換え、k 8 sもarm 64上で正常に動作することができます.例:
etcd:https://github.com/etcd-io/etcd/releases(バイナリファイル名にaarch 64が付いているのがarm 64アーキテクチャのバイナリファイル)
kubernetes: https://kubernetes.io/docs/setup/release/notes/#client-binaries, https://kubernetes.io/docs/setup/release/notes/#server-binaries, https://kubernetes.io/docs/setup/release/notes/#node-binaries(arm 64を含むバイナリファイル名はarm 64アーキテクチャのバイナリファイル)
docker: https://mirrors.aliyun.com/docker-ce/linux/(centos,ubuntuには対応するdocker arm 64ソフトウェアソースがあります)
default cni plugin(flannel): https://github.com/containernetworking/cni/releases,https://github.com/coreos/flannel/releases(バイナリファイル名にarm 64が付いているのがarm 64アーキテクチャのバイナリファイル)
calico:https://github.com/projectcalico/cni-plugin/releases
pauseミラー:gcr.io/google_containers/pause-arm64
metrics-serverミラー:gcr.io/google_containers/metrics-server-arm64
corednsミラー:coredns/coredns:coredns-arm 64
kubernetes-dashboardミラー:gcr.io/google_containers/kubernetes-dashboard-arm64
flannelミラー:gcr.io/google_containers/flannel-arm64
kube-state-metricsミラー:gcr.io/google_containers/kube-state-metrics-arm64
他のarm 64ミラーは、ここを参照してください.https://hub.docker.com/u/googlecontainer/,https://hub.docker.com/r/arm64v8/