k8s v-1.20版本部署详细过程[实测可用无坑]1.部署环境准备1.1 各软件版本
系统 | Docker | k8s |
---|---|---|
Linux master 3.10.0-1160.el7.x86_64 | Docker version 20.10.17 | 1.20.0-0 |
1.2.部署规划[单master]
主机名 | IP地址 | 角色 |
---|---|---|
k8s-master | 192.168.56.202 | master节点 |
k8s-node1 | 192.168.56.203 | node节点 |
k8s-node2 | 192.168.56.204 | node节点 |
2.系统环境的初始化操作2.1 关闭防火墙
[root@K8sMaster ~]# systemctl stop firewalld [root@K8sMaster ~]# systemctl disable firewalld
2.2 关闭selinux
#临时关闭[root@K8sMaster ~]# setenforce 0#永久关闭[root@K8sMaster ~]# sed -i 's/enforcing/disabled/' /etc/selinux/config 禁用
2.3 关闭swap交换分区
#此处一定要关闭交换分区否则在kubeadm初始化时会报错#临时关闭[root@K8sMaster ~]# swapoff -a#永久关闭[root@K8sMaster ~]# sed -ri 's/.*swap.*/#&/' /etc/fstab
2.4 设置主机名
#临时处理[root@K8sMaster ~]# hostnamectl set-hostname k8s-master#永久处理[root@master kubelet]# cat > /etc/hostname << EOFk8smasterEOF
2.5 设置主机名通信
[root@K8sMaster ~]# cat >> /etc/hosts << EOF192.168.56.202 K8sMaster192.168.56.203 K8sNode1192.168.56.204 K8sNode2EOF
2.6 桥接的IPv4流量传递到IpTABLES链
[root@K8sMaster sysctl.d]# cat > /etc/sysctl.d/k8s.conf << EOFnet.bridge.bridge-nf-call-ip6tables = 1net.bridge.bridge-nf-call-iptables = 1 net.ipv4.ip_forward = 1EOF# 配置生效[root@K8sMaster ~]# sysctl --system [root@K8sMaster ~]# sysctl -a
2.7 设置时钟同步
[root@K8sMaster ~]# yum -y install ntpdate# 从阿里云同步时钟[root@K8sMaster ~]# ntpdate time1.aliyun.com
3.安装docker3.1 设置docker仓库
[root@K8sMaster ~]# yum install -y yum-utils [root@K8sMaster ~]# yum-config-manager \ --add-repo \ https://download.docker.com/linux/centos/docker-ce.repo [root@K8sMaster ~]# yum-config-manager \ --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
3.2 安装docker
# 安装 [root@K8sMaster ~]# yum install docker-ce # 启用 [root@K8sMaster ~]# systemctl start docker # 设置开机启用 此处要设置开机启动否则后面初始化时也会出错 [root@K8sMaster ~]# systemctl enable docker
3.3 设置docker镜像加速
[root@K8sMaster ~]# mkdir -p /etc/docker[root@K8sMaster ~]# cat > /etc/docker/daemon.json << EOF{ "registry-mirrors": ["https://49qrnx21.mirror.aliyuncs.com"] }EOF[root@K8sMaster ~]# systemctl daemon-reload && systemctl restart docker
4. 使用kubeadm方式安装k8s4.1 设置K8S的仓库
[root@K8sMaster ~]# cat > /etc/yum.repos.d/kubernetes.repo <<EOF[kubernetes]name=Kubernetesbaseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/enabled=1gpgcheck=0repo_gpgcheck=0gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpgEOF
4.2 安装kubeadm kubectl kubelet
[root@K8sMaster ~]# yum install -y kubelet-1.20.0 kubeadm-1.20.0 kubectl-1.20.0[root@K8sMaster ~]# systemctl enable kubelet
4.3 初始化kubeadm
【master节点,这一步只在master节点运行】
[root@K8sMaster ~]# kubeadm init \ --apiserver-advertise-address=192.168.56.202 \ --image-repository registry.aliyuncs.com/google_containers \ --kubernetes-version 1.20.0 \ --service-cidr=10.96.0.0/12 \ --pod-network-cidr=10.244.0.0/16# 初始化完成之后会出现 如下字样Your Kubernetes control-plane has initialized successfully.......# 根据上述指示创建文件 mkdir -p $HOME/.kubesudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/configsudo chown $(id -u):$(id -g) $HOME/.kube/config# 并且出现 kubeadm join的提示 【此处的命令为node节点加入master的命令】
4.4 将node节点加入到 cluster集群
# 第一种情况在 master 节点 初始化完成之后会出现 加入集群的命令直接使用# 第二种情况在部署node节点时候 token过期或者忘记 并且没有--discovery-token-ca-cert-hash值# 则需要在master主机重新生成 token 和 --discovery-token-ca-cert-hash
4.4.1 查看token状态
[root@K8sMaster ~]# kubeadm token list TOKEN TTL EXPIRES USAGES DESCRIPTION EXTRA GROUPShkqc00.c0sn7s5mpauw11fx 3h 2022-08-22T21:25:46+08:00 authentication,signing The default bootstrap token generated by 'kubeadm init'. system:bootstrappers:kubeadm:default-node-tokenqlhylz.emm864nueu1ypv6p 23h 2022-08-23T17:44:21+08:00 authentication,signing system:bootstrappers:kubeadm:default-node-tokenxnwbbo.1ylplt1jrz10ms1u 23h 2022-08-23T17:44:27+08:00 authentication,signing system:bootstrappers:kubeadm:default-node-token
4.4.2 生成新的token
[root@K8sMaster ~]# kubeadm token create
4.4.3 获取–discovery-token-ca-cert-hash值
[root@K8sMaster ~]# openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | \openssl dgst -sha256 -hex | sed 's/^.* //'
4.5 node节点加入master 集群
[root@K8sMaster ~]# kubeadm join 192.168.56.203:6443 --token qlhylz.emm864nueu1ypv6p \--discovery-token-ca-cert-hash sha256:cd778ad01bdbc656eaff7d3b1273691f0070ebbadd2f1b8a3189a6dc1e88f39f# 这里的两个值使用刚才生成的值
4.6 查看节点状态
[root@K8sMaster ~]# kubectl get nodes NAME STATUS ROLES AGE VERSIONK8sMaster NotReady control-plane,master 20h v1.20.0K8sNode1 NotReady 13m v1.20.0[root@K8sMaster ~]#
5.部署容器网络插件[CNI]5.1 flannel网络插件的部署
kubectl apply -f https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml
5.2 查看是否部署完成
[root@master ~]# kubectl get pods --all-namespacesNAMESPACE NAME READY STATUS RESTARTS AGEkube-flannel kube-flannel-ds-c66ds 1/1 Running 0 14mkube-flannel kube-flannel-ds-pxhzc 1/1 Running 0 14mkube-system coredns-7f89b7bc75-gpgl9 1/1 Running 0 27hkube-system coredns-7f89b7bc75-sr5mx 1/1 Running 0 27hkube-system etcd-master 1/1 Running 2 27hkube-system kube-apiserver-master 1/1 Running 2 27hkube-system kube-controller-manager-master 1/1 Running 2 27hkube-system kube-proxy-hkmd9 1/1 Running 0 7h33mkube-system kube-proxy-mrxwf 1/1 Running 3 27hkube-system kube-scheduler-master 1/1 Running 3 27h
5.3 查看node状态
都是Ready状态
[root@master ~]# kubectl get nodeNAME STATUS ROLES AGE VERSIONmaster Ready control-plane,master 27h v1.20.0node1 Ready 7h31m v1.20.0
5.4 测试集群
# 创建nginx应用[root@master opt]# kubectl create deployment nginx --image=nginx# 查看是否下载完成[root@master opt]# kubectl get podsNAME READY STATUS RESTARTS AGEnginx-6799fc88d8-lx9nc 1/1 Running 0 3m44s# 暴露端口[root@master opt]# kubectl expose deployment nginx --port=80 --type=NodePort# 查看端口情况[root@master opt]# kubectl get pod,svcNAME READY STATUS RESTARTS AGEpod/nginx-6799fc88d8-lx9nc 1/1 Running 0 4m27sNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGEservice/kubernetes ClusterIP 10.96.0.1 443/TCP 28hservice/nginx NodePort 10.100.68.38 80:30572/TCP 3s
5.5 访问测试
访问任意一个node节点的http://ip:30572 查看访问结果 能访问到nginx初始界面几位成功
6. 安装DashBoard面板6.1下载Dashboard配置文件
[root@master opt]# wget https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml
由于用的是国外的源所以 原文大致如下 可直接复制使用
# Copyright 2017 The Kubernetes Authors.## Licensed under the Apache License, Version 2.0 (the "License");# you may not use this file except in compliance with the License.# You may obtain a copy of the License at## http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing, software# distributed under the License is distributed on an "AS IS" BASIS,# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# See the License for the specific language governing permissions and# limitations under the License.# Configuration to deploy release version of the Dashboard UI compatible with# Kubernetes 1.8.## Example usage: kubectl create -f # ------------------- Dashboard Secret ------------------- #apiVersion: v1kind: Secretmetadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard-certs namespace: kube-systemtype: Opaque---# ------------------- Dashboard Service Account ------------------- #apiVersion: v1kind: ServiceAccountmetadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kube-system---# ------------------- Dashboard Role & Role Binding ------------------- #kind: RoleapiVersion: rbac.authorization.k8s.io/v1metadata: name: kubernetes-dashboard-minimal namespace: kube-systemrules: # Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.- apiGroups: [""] resources: ["secrets"] verbs: ["create"] # Allow Dashboard to create 'kubernetes-dashboard-settings' config map.- apiGroups: [""] resources: ["configmaps"] verbs: ["create"] # Allow Dashboard to get, update and delete Dashboard exclusive secrets.- apiGroups: [""] resources: ["secrets"] resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"] verbs: ["get", "update", "delete"] # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.- apiGroups: [""] resources: ["configmaps"] resourceNames: ["kubernetes-dashboard-settings"] verbs: ["get", "update"] # Allow Dashboard to get metrics from heapster.- apiGroups: [""] resources: ["services"] resourceNames: ["heapster"] verbs: ["proxy"]- apiGroups: [""] resources: ["services/proxy"] resourceNames: ["heapster", "http:heapster:", "https:heapster:"] verbs: ["get"]---apiVersion: rbac.authorization.k8s.io/v1kind: RoleBindingmetadata: name: kubernetes-dashboard-minimal namespace: kube-systemroleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: kubernetes-dashboard-minimalsubjects:- kind: ServiceAccount name: kubernetes-dashboard namespace: kube-system---# ------------------- Dashboard Deployment ------------------- #kind: DeploymentapiVersion: apps/v1metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kube-systemspec: replicas: 1 revisionHistoryLimit: 10 selector: matchLabels: k8s-app: kubernetes-dashboard template: metadata: labels: k8s-app: kubernetes-dashboard spec: containers: - name: kubernetes-dashboard# 此处为修改后的值 国内源 image: registry.cn-beijing.aliyuncs.com/minminmsn/kubernetes-dashboard:v1.10.1 ports: - containerPort: 8443 protocol: TCP args: - --auto-generate-certificates # Uncomment the following line to manually specify Kubernetes API server Host # If not specified, Dashboard will attempt to auto discover the API server and connect # to it. Uncomment only if the default does not work. # - --apiserver-host=http://my-address:port volumeMounts: - name: kubernetes-dashboard-certs mountPath: /certs # Create on-disk volume to store exec logs - mountPath: /tmp name: tmp-volume livenessProbe: httpGet: scheme: HTTPS path: / port: 8443 initialDelaySeconds: 30 timeoutSeconds: 30 volumes: - name: kubernetes-dashboard-certs secret: secretName: kubernetes-dashboard-certs - name: tmp-volume emptyDir: {} serviceAccountName: kubernetes-dashboard # Comment the following tolerations if Dashboard must not be deployed on master tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule---# ------------------- Dashboard Service ------------------- #kind: ServiceapiVersion: v1metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kube-systemspec:# --此处为新增配置 type: NodePort # -- ports: - port: 443 targetPort: 8443# --此处为新增配置 nodePort: 30001# -- selector: k8s-app: kubernetes-dashboard
6.1 创建dashboard
[root@master opt]# kubectl apply -f kubernetes-dashboard.yaml
6.2 查看新建状态
[root@master opt]# kubectl get pods --namespace=kube-systemNAME READY STATUS RESTARTS AGEcoredns-7f89b7bc75-gpgl9 1/1 Running 0 33hcoredns-7f89b7bc75-sr5mx 1/1 Running 0 33hetcd-master 1/1 Running 2 33hkube-apiserver-master 1/1 Running 2 33hkube-controller-manager-master 1/1 Running 2 33hkube-proxy-hkmd9 1/1 Running 0 13hkube-proxy-mrxwf 1/1 Running 3 33hkube-scheduler-master 1/1 Running 3 33hkubernetes-dashboard-97c4799d9-4jp7k 1/1 Running 0 13m
6.3查看映射端口
[root@master opt]# kubectl get services kubernetes-dashboard -n kube-systemNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGEkubernetes-dashboard NodePort 10.108.118.239 443:30001/TCP 4h39m
6.4 查看被分配的node
[root@master opt]# kubectl get pods -o wide --namespace=kube-systemNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATEScoredns-7f89b7bc75-gpgl9 1/1 Running 0 33h 10.244.1.3 node1 coredns-7f89b7bc75-sr5mx 1/1 Running 0 33h 10.244.1.2 node1 etcd-master 1/1 Running 2 33h 192.168.56.202 master kube-apiserver-master 1/1 Running 2 33h 192.168.56.202 master kube-controller-manager-master 1/1 Running 2 33h 192.168.56.202 master kube-proxy-hkmd9 1/1 Running 0 13h 192.168.56.203 node1 kube-proxy-mrxwf 1/1 Running 3 33h 192.168.56.202 master kube-scheduler-master 1/1 Running 3 33h 192.168.56.202 master kubernetes-dashboard-97c4799d9-4jp7k 1/1 Running 0 16m 10.244.1.6 node1 [root@master opt]#
6.5查看集群权限对象
[root@master opt]# kubectl get clusterrolesNAME CREATED ATadmin 2022-08-21T13:25:44Zcluster-admin 2022-08-21T13:25:44Zedit 2022-08-21T13:25:44Zflannel 2022-08-22T17:06:43Zkubeadm:get-nodes 2022-08-21T13:25:46Zsystem:aggregate-to-admin 2022-08-21T13:25:44Zsystem:aggregate-to-edit 2022-08-21T13:25:45Zsystem:aggregate-to-view 2022-08-21T13:25:45Zsystem:auth-delegator 2022-08-21T13:25:45Zsystem:basic-user 2022-08-21T13:25:44Z
6.6 查看cluster-admin权限
[root@master opt]# kubectl describe clusterroles cluster-adminName: cluster-adminLabels: kubernetes.io/bootstrapping=rbac-defaultsAnnotations: rbac.authorization.kubernetes.io/autoupdate: truePolicyRule: Resources Non-Resource URLs Resource Names Verbs --------- ----------------- -------------- ----- *.* [] [] [*] [*] [] [*]
6.7 将服务账户 kubernetes-dashboard 跟 cluster-admin 这个集群管理员权限对象绑定
[root@master opt]# cat > kubernetes-dashboard-ClusterRoleBinding.yaml << EOFapiVersion: rbac.authorization.k8s.io/v1kind: ClusterRoleBindingmetadata: name: kubernetes-dashboard labels: k8s-app: kubernetes-dashboardroleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-adminsubjects:- kind: ServiceAccount name: kubernetes-dashboard namespace: kube-systemEOF[root@master opt]# kubectl create -f kubernetes-dashboard-ClusterRoleBinding.yaml
6.8获取登录Token
[root@master opt]# kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin | awk '{print $1}')Name: admin-token-t4sxgNamespace: kube-systemLabels: Annotations: kubernetes.io/service-account.name: admin kubernetes.io/service-account.uid: 9f6ec4cf-925f-4d9f-a104-8e7367d08c98Type: kubernetes.io/service-account-tokenData====ca.crt: 1066 bytesnamespace: 11 bytestoken: eyJhbGciOiJSUzI1NiIsImtpZCI6ImdyX0lGZm9xNlNrbTRxQnJoeU1CREZPUFdTR0F0WmNxekEzZ29ITkRFY0EifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi10b2tlbi10NHN4ZyIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJhZG1pbiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjlmNmVjNGNmLTkyNWYtNGQ5Zi1hMTA0LThlNzM2N2QwOGM5OCIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlLXN5c3RlbTphZG1pbiJ9.mv7wGLXAFcThtrS5_zuIKG-MaYJkQ2pud72G5vNfUrQsqSXNn3Kqf0OjFhagvZ1By5fHbyahTGWSWtYOoZUSKNXk42kEbh1LnhkYyjLKtFI1hUvk5Fw4Scc1PgGUpIyE1KPd4V3_jH3U7P6Kz5GfjkGhcTNDenoqdXtJN7c8UeF6QaG9hYxLyKCEH2M7SJDHq8q8LBuLT12yfxrFl6tGq5U_LwyYmJNUdpTuFOuvBYJ-8hi_ptxpxRDvUJskuZBW4HIxNi5dnNsQUt7euhSWePqfsSYAfTdPmTH6UvpKTAv87i9CA3rVeb46Jek7TMC6so5QNWnqBFb5RXkFqja8PA
6.9访问dashboard
特别注释:由于这里使用了非443的端口 所以google和ie浏览器打不开dashboard界面,可使用火狐打开连接地址:http://所分部的node节点的ip:30001 打开输入以上token
6.10 解决无法用google浏览器无法打开的问题
[root@master opt]# mkdir kubernetes-dashboard-key && cd kubernetes-dashboard-key # 生成证书请求的key[root@master opt]#openssl genrsa -out dashboard.key 2048 # 生成csr[root@master opt]#openssl req -new -out dashboard.csr -key dashboard.key -subj '/CN=192.168.56.202.113' # 生成自签证书[root@master opt]# openssl x509 -req -in dashboard.csr -signkey dashboard.key -out dashboard.crt # 删除原有证书[root@master opt]# kubectl delete secret kubernetes-dashboard-certs -n kube-system # 创建新证书的secret[root@master opt]# kubectl create secret generic kubernetes-dashboard-certs --from-file=dashboard.key --from-file=dashboard.crt -n kube-system[root@master opt]# kubectl get pod -n kube-system 删除pod,重启[root@master opt]# kubectl delete pod kubernetes-dashboard-7d6c598b5f-fvcg8 -n kube-system
6.11如果出现大量的404页面
# 由于使用的创建dashboard的yaml文件是 旧版本的文件 要使用新的yaml文件 可以解决文件内容可以参考如下:# Copyright 2017 The Kubernetes Authors.## Licensed under the Apache License, Version 2.0 (the "License");# you may not use this file except in compliance with the License.# You may obtain a copy of the License at## http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing, software# distributed under the License is distributed on an "AS IS" BASIS,# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# See the License for the specific language governing permissions and# limitations under the License.apiVersion: v1kind: Namespacemetadata: name: kubernetes-dashboard---apiVersion: v1kind: ServiceAccountmetadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kubernetes-dashboard---kind: ServiceapiVersion: v1metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kubernetes-dashboardspec: ports: - port: 443 targetPort: 8443 nodePort: 30000 selector: k8s-app: kubernetes-dashboard type: NodePort---apiVersion: v1kind: Secretmetadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard-certs namespace: kubernetes-dashboardtype: Opaque---apiVersion: v1kind: Secretmetadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard-csrf namespace: kubernetes-dashboardtype: Opaquedata: csrf: ""---apiVersion: v1kind: Secretmetadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard-key-holder namespace: kubernetes-dashboardtype: Opaque---kind: ConfigMapapiVersion: v1metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard-settings namespace: kubernetes-dashboard---kind: RoleapiVersion: rbac.authorization.k8s.io/v1metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kubernetes-dashboardrules: # Allow Dashboard to get, update and delete Dashboard exclusive secrets. - apiGroups: [""] resources: ["secrets"] resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"] verbs: ["get", "update", "delete"] # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. - apiGroups: [""] resources: ["configmaps"] resourceNames: ["kubernetes-dashboard-settings"] verbs: ["get", "update"] # Allow Dashboard to get metrics. - apiGroups: [""] resources: ["services"] resourceNames: ["heapster", "dashboard-metrics-scraper"] verbs: ["proxy"] - apiGroups: [""] resources: ["services/proxy"] resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"] verbs: ["get"]---kind: ClusterRoleapiVersion: rbac.authorization.k8s.io/v1metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboardrules: # Allow Metrics Scraper to get metrics from the Metrics server - apiGroups: ["metrics.k8s.io"] resources: ["pods", "nodes"] verbs: ["get", "list", "watch"]---apiVersion: rbac.authorization.k8s.io/v1kind: RoleBindingmetadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kubernetes-dashboardroleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: kubernetes-dashboardsubjects: - kind: ServiceAccount name: kubernetes-dashboard namespace: kubernetes-dashboard---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRoleBindingmetadata: name: kubernetes-dashboard namespace: kubernetes-dashboardroleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: kubernetes-dashboardsubjects: - kind: ServiceAccount name: kubernetes-dashboard namespace: kubernetes-dashboard---kind: DeploymentapiVersion: apps/v1metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kubernetes-dashboardspec: replicas: 1 revisionHistoryLimit: 10 selector: matchLabels: k8s-app: kubernetes-dashboard template: metadata: labels: k8s-app: kubernetes-dashboard spec: containers: - name: kubernetes-dashboard image: kubernetesui/dashboard:v2.0.0-beta4 imagePullPolicy: Always ports: - containerPort: 8443 protocol: TCP args: - --auto-generate-certificates - --namespace=kubernetes-dashboard # Uncomment the following line to manually specify Kubernetes API server Host # If not specified, Dashboard will attempt to auto discover the API server and connect # to it. Uncomment only if the default does not work. # - --apiserver-host=http://my-address:port volumeMounts: - name: kubernetes-dashboard-certs mountPath: /certs # Create on-disk volume to store exec logs - mountPath: /tmp name: tmp-volume livenessProbe: httpGet: scheme: HTTPS path: / port: 8443 initialDelaySeconds: 30 timeoutSeconds: 30 volumes: - name: kubernetes-dashboard-certs secret: secretName: kubernetes-dashboard-certs - name: tmp-volume emptyDir: {} serviceAccountName: kubernetes-dashboard # Comment the following tolerations if Dashboard must not be deployed on master tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule---kind: ServiceapiVersion: v1metadata: labels: k8s-app: dashboard-metrics-scraper name: dashboard-metrics-scraper namespace: kubernetes-dashboardspec: ports: - port: 8000 targetPort: 8000 selector: k8s-app: dashboard-metrics-scraper---kind: DeploymentapiVersion: apps/v1metadata: labels: k8s-app: dashboard-metrics-scraper name: dashboard-metrics-scraper namespace: kubernetes-dashboardspec: replicas: 1 revisionHistoryLimit: 10 selector: matchLabels: k8s-app: dashboard-metrics-scraper template: metadata: labels: k8s-app: dashboard-metrics-scraper spec: containers: - name: dashboard-metrics-scraper image: kubernetesui/metrics-scraper:v1.0.1 ports: - containerPort: 8000 protocol: TCP livenessProbe: httpGet: scheme: HTTP path: / port: 8000 initialDelaySeconds: 30 timeoutSeconds: 30 volumeMounts: - mountPath: /tmp name: tmp-volume serviceAccountName: kubernetes-dashboard # Comment the following tolerations if Dashboard must not be deployed on master tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule volumes: - name: tmp-volume emptyDir: {}
重启dashboard服务
[root@master opt]# kubectl delete -f kubernetes-dashboard.yaml[root@master opt]# kubectl create -f kubernetes-dashboard.yaml