Kubernetes v1.9.1 单机版本一键安装脚本

自己部署并测试通过的,脚本如下:

  1#!/bin/bash
  2# ----------------------------------------
  3# kubernetes v1.9.1 单机一键部署脚本
  4# 用于实验环境
  5# CentOS 7.2.1511下测试OK
  6# Powered by Jerry Wong
  7# 2018-03-15 hzde0128@live.cn
  8# ----------------------------------------
  9
 10function get_local_ip() {
 11	IP_ADDR=`ip addr | grep inet | grep -Ev '127|inet6' | awk '{print $2}' | awk -F'/' '{print $1}'`
 12	export NODE_IP=${IP_ADDR}
 13}
 14
 15function basic_settings() {
 16	getenforce  | grep Disabled > /dev/null
 17	if [ $? -ne 0 ]; then
 18		sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
 19	fi
 20	systemctl stop firewalld
 21	systemctl disable firewalld
 22}
 23
 24
 25function install_docker() {
 26	yum -y install yum-utils device-mapper-persistent-data lvm2
 27	yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
 28	yum -y install docker-ce
 29
 30	systemctl start docker
 31	systemctl status docker
 32	systemctl enable docker
 33
 34	# 使用国内(腾讯)加速器
 35	sed -i 's#ExecStart=/usr/bin/dockerd#ExecStart=/usr/bin/dockerd --registry-mirror=https://mirror.ccs.tencentyun.com#' /usr/lib/systemd/system/docker.service 
 36	systemctl daemon-reload
 37	systemctl restart docker
 38}
 39
 40
 41function install_etcd() {
 42	chmod +x etcd etcdctl
 43	mv etcd etcdctl /usr/bin/
 44}
 45
 46# 安装Kubernetes
 47function install_kubernetes() {
 48	chmod +x kube*
 49	mv kube{ctl,-apiserver,-scheduler,-controller-manager,let,-proxy} /usr/bin/       
 50
 51	# 查看版本信息
 52	kube-apiserver --version
 53}
 54
 55# 安装flanneld
 56function install_flanneld() {
 57	chmod +x flanneld mk-docker-opts.sh
 58	mv flanneld /usr/bin/
 59	mkdir /usr/libexec/flannel/
 60	mv mk-docker-opts.sh /usr/libexec/flannel/
 61
 62	# 查看版本信息
 63	flanneld --version
 64}
 65
 66# 配置并启用etcd
 67function config_etcd() {
 68cat > /usr/lib/systemd/system/etcd.service <<EOF
 69[Unit]
 70Description=etcd
 71After=network.target
 72After=network-online.target
 73Wants=network-online.target
 74Documentation=https://github.com/coreos/etcd
 75[Service]
 76Type=notify
 77WorkingDirectory=/var/lib/etcd
 78EnvironmentFile=-/etc/etcd/etcd.conf
 79ExecStart=/usr/bin/etcd --config-file /etc/etcd/etcd.conf
 80Restart=on-failure
 81LimitNOFILE=65536
 82Restart=on-failure
 83RestartSec=5
 84LimitNOFILE=65536
 85[Install]
 86WantedBy=multi-user.target
 87EOF
 88
 89mkdir -p /var/lib/etcd/
 90mkdir -p /etc/etcd/
 91export ETCD_NAME=etcd
 92cat > /etc/etcd/etcd.conf <<EOF 
 93name: '${ETCD_NAME}'
 94data-dir: "/var/lib/etcd/"
 95listen-peer-urls: http://${NODE_IP}:2380
 96listen-client-urls: http://${NODE_IP}:2379,http://127.0.0.1:2379
 97initial-advertise-peer-urls: http://${NODE_IP}:2380
 98advertise-client-urls: http://${NODE_IP}:2379
 99initial-cluster: "etcd=http://${NODE_IP}:2380"
100initial-cluster-token: 'etcd-cluster'
101initial-cluster-state: 'new'
102EOF
103
104systemctl start etcd
105[ $? -eq 0 ] || exit
106systemctl status etcd
107systemctl enable etcd
108
109# 检查安装情况
110etcdctl member list
111[ $? -eq 0 ] || exit
112# 查看集群健康状况
113etcdctl cluster-health
114[ $? -eq 0 ] || exit
115}
116
117# 配置并启用 `flanneld`
118function config_flanneld() {
119cat > /etc/systemd/system/flanneld.service <<EOF
120[Unit]
121Description=Flanneld overlay address etcd agent
122After=network.target
123After=network-online.target
124Wants=network-online.target
125After=etcd.service
126Before=docker.service
127[Service]
128Type=notify
129EnvironmentFile=/etc/sysconfig/flanneld
130EnvironmentFile=-/etc/sysconfig/docker-network
131ExecStart=/usr/bin/flanneld-start \$FLANNEL_OPTIONS
132ExecStartPost=/usr/libexec/flannel/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/docker
133Restart=on-failure
134[Install]
135WantedBy=multi-user.target
136RequiredBy=docker.service
137EOF
138
139cat > /usr/bin/flanneld-start <<EOF
140#!/bin/sh
141exec /usr/bin/flanneld \\
142        -etcd-endpoints=\${FLANNEL_ETCD_ENDPOINTS:-\${FLANNEL_ETCD}} \\
143        -etcd-prefix=\${FLANNEL_ETCD_PREFIX:-\${FLANNEL_ETCD_KEY}} \\
144        "\$@"
145EOF
146
147chmod 755 /usr/bin/flanneld-start
148
149etcdctl mkdir /kube/network
150etcdctl set /kube/network/config '{ "Network": "10.254.0.0/16" }'
151
152cat > /etc/sysconfig/flanneld <<EOF
153FLANNEL_ETCD_ENDPOINTS="http://${NODE_IP}:2379"
154FLANNEL_ETCD_PREFIX="/kube/network"
155EOF
156
157systemctl start flanneld
158[ $? -eq 0 ] || exit
159systemctl status flanneld
160systemctl enable flanneld
161
162
163
164
165# 更改docker网段为flannel分配的网段
166source /var/run/flannel/subnet.env
167
168cat > /etc/docker/daemon.json <<EOF 
169{
170  "bip" : "$FLANNEL_SUBNET"
171}
172EOF
173
174# 重启 docker
175systemctl daemon-reload
176systemctl restart docker
177[ $? -eq 0 ] || exit
178}
179
180
181function config_apiserver() {
182mkdir -p /etc/kubernetes/
183cat > /etc/kubernetes/config <<EOF
184KUBE_LOGTOSTDERR="--logtostderr=true"
185KUBE_LOG_LEVEL="--v=0"
186KUBE_ALLOW_PRIV="--allow-privileged=false"
187KUBE_MASTER="--master=http://${NODE_IP}:8080"
188KUBE_ADMISSION_CONTROL=ServiceAccount
189EOF
190
191# 配置kube-apiserver启动项
192cat > /etc/systemd/system/kube-apiserver.service <<EOF
193[Unit]
194Description=Kubernetes API Server
195Documentation=https://github.com/kubernetes/kubernetes
196After=network.target
197After=etcd.service
198[Service]
199EnvironmentFile=-/etc/kubernetes/config
200EnvironmentFile=-/etc/kubernetes/apiserver
201ExecStart=/usr/bin/kube-apiserver \\
202            \$KUBE_LOGTOSTDERR \\
203            \$KUBE_LOG_LEVEL \\
204            \$KUBE_ETCD_SERVERS \\
205            \$KUBE_API_ADDRESS \\
206            \$KUBE_API_PORT \\
207            \$KUBELET_PORT \\
208            \$KUBE_ALLOW_PRIV \\
209            \$KUBE_SERVICE_ADDRESSES \\
210            \$KUBE_ADMISSION_CONTROL \\
211            \$KUBE_API_ARGS
212Restart=on-failure
213Type=notify
214LimitNOFILE=65536
215[Install]
216WantedBy=multi-user.target
217EOF
218
219# 配置apiserver配置文件
220cat > /etc/kubernetes/apiserver <<EOF
221KUBE_API_ADDRESS="--advertise-address=${NODE_IP} --bind-address=${NODE_IP} --insecure-bind-address=0.0.0.0"
222KUBE_ETCD_SERVERS="--etcd-servers=http://${NODE_IP}:2379"
223KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
224KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ResourceQuota"
225KUBE_API_ARGS="--enable-swagger-ui=true --apiserver-count=3 --audit-log-maxage=30 --audit-log-maxbackup=3 --audit-log-maxsize=100 --audit-log-path=/var/log/apiserver.log"
226EOF
227
228# 启动kube-apiserver
229systemctl start kube-apiserver
230[ $? -eq 0 ] || exit
231systemctl status kube-apiserver
232systemctl enable kube-apiserver
233}
234
235
236
237# 配置kube-controller-manager
238function config_controller-manager() {
239cat > /etc/systemd/system/kube-controller-manager.service <<EOF
240[Unit]
241Description=Kubernetes Controller Manager
242Documentation=https://github.com/kubernetes/kubernetes
243[Service]
244EnvironmentFile=-/etc/kubernetes/config
245EnvironmentFile=-/etc/kubernetes/controller-manager
246ExecStart=/usr/bin/kube-controller-manager \\
247            \$KUBE_LOGTOSTDERR \\
248            \$KUBE_LOG_LEVEL \\
249            \$KUBE_MASTER \\
250            \$KUBE_CONTROLLER_MANAGER_ARGS
251Restart=on-failure
252LimitNOFILE=65536
253[Install]
254WantedBy=multi-user.target
255EOF
256
257cat > /etc/kubernetes/controller-manager <<EOF
258KUBE_CONTROLLER_MANAGER_ARGS="--address=127.0.0.1 --service-cluster-ip-range=10.254.0.0/16 --cluster-name=kubernetes"
259EOF
260
261# 启动kube-controller-manager
262systemctl start kube-controller-manager
263[ $? -eq 0 ] || exit
264systemctl status kube-controller-manager
265systemctl enable kube-controller-manager
266}
267
268
269
270# 配置kube-scheduler
271function config_scheduler() {
272cat > /usr/lib/systemd/system/kube-scheduler.service <<EOF
273[Unit]
274Description=Kubernetes Scheduler Plugin
275Documentation=https://github.com/kubernetes/kubernetes
276[Service]
277EnvironmentFile=-/etc/kubernetes/config
278EnvironmentFile=-/etc/kubernetes/scheduler
279ExecStart=/usr/bin/kube-scheduler \\
280            \$KUBE_LOGTOSTDERR \\
281            \$KUBE_LOG_LEVEL \\
282            \$KUBE_MASTER \\
283            \$KUBE_SCHEDULER_ARGS
284Restart=on-failure
285LimitNOFILE=65536
286[Install]
287WantedBy=multi-user.target
288EOF
289
290# 配置kube-scheduler配置文件
291cat > /etc/kubernetes/scheduler <<EOF
292KUBE_SCHEDULER_ARGS="--address=127.0.0.1"
293EOF
294
295# 启动kube-scheduler
296systemctl start kube-scheduler
297[ $? -eq 0 ] || exit
298systemctl status kube-scheduler
299systemctl enable kube-scheduler
300
301#验证Master节点
302kubectl get cs
303}
304
305
306## 9. 配置并启用  `Kubernetes Node`  节点 
307
308# 配置kubelet
309function config_kubelet() {
310cat > /usr/lib/systemd/system/kubelet.service <<EOF
311[Unit]
312Description=Kubernetes Kubelet Server
313Documentation=https://github.com/kubernetes/kubernetes
314After=docker.service
315Requires=docker.service
316[Service]
317WorkingDirectory=/var/lib/kubelet
318EnvironmentFile=-/etc/kubernetes/config
319EnvironmentFile=-/etc/kubernetes/kubelet
320ExecStart=/usr/bin/kubelet \\
321            \$KUBE_LOGTOSTDERR \\
322            \$KUBE_LOG_LEVEL \\
323            \$KUBELET_ADDRESS \\
324            \$KUBELET_PORT \\
325            \$KUBELET_HOSTNAME \\
326            \$KUBE_ALLOW_PRIV \\
327            \$KUBELET_POD_INFRA_CONTAINER \\
328            \$KUBELET_ARGS
329Restart=on-failure
330[Install]
331WantedBy=multi-user.target
332EOF
333
334mkdir -p /var/lib/kubelet
335export KUBECONFIG_DIR=/etc/kubernetes
336
337cat > "${KUBECONFIG_DIR}/kubelet.kubeconfig" <<EOF
338apiVersion: v1
339kind: Config
340clusters:
341  - cluster:
342      server: http://${NODE_IP}:8080/
343    name: local
344contexts:
345  - context:
346      cluster: local
347    name: local
348current-context: local
349EOF
350
351cat > /etc/kubernetes/kubelet <<EOF
352KUBELET_ADDRESS="--address=${NODE_IP}"
353KUBELET_PORT="--port=10250"
354KUBELET_HOSTNAME="--hostname-override=master"
355KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=hub.c.163.com/k8s163/pause-amd64:3.0"
356KUBELET_ARGS="--kubeconfig=/etc/kubernetes/kubelet.kubeconfig --fail-swap-on=false --cluster-dns=10.254.0.2 --cluster-domain=cluster.local. --serialize-image-pulls=false"
357EOF
358
359# 启动kubelet
360systemctl start kubelet
361[ $? -eq 0 ] || exit
362systemctl status kubelet
363systemctl enable kubelet
364}
365
366# 配置kube-proxy
367function config_proxy() {
368cat > /etc/systemd/system/kube-proxy.service <<EOF
369[Unit]
370Description=Kubernetes Kube-Proxy Server
371Documentation=https://github.com/kubernetes/kubernetes
372After=network.target
373[Service]
374EnvironmentFile=-/etc/kubernetes/config
375EnvironmentFile=-/etc/kubernetes/proxy
376ExecStart=/usr/bin/kube-proxy \\
377            \$KUBE_LOGTOSTDERR \\
378            \$KUBE_LOG_LEVEL \\
379            \$KUBE_MASTER \\
380            \$KUBE_PROXY_ARGS
381Restart=on-failure
382LimitNOFILE=65536
383[Install]
384WantedBy=multi-user.target
385EOF
386
387# 配置kube-proxy配置文件
388cat > /etc/kubernetes/proxy <<EOF
389KUBE_PROXY_ARGS="--bind-address=${NODE_IP} --hostname-override=${NODE_IP} --cluster-cidr=10.254.0.0/16"
390EOF
391
392# 启动kube-proxy
393systemctl start kube-proxy
394[ $? -eq 0 ] || exit
395systemctl status kube-proxy
396systemctl enable kube-proxy
397}
398
399### G. 查看  `Nodes` 相关信息
400function view_status() {
401kubectl get nodes -o wide
402kubectl get nodes --show-labels
403kubectl version --short
404kubectl cluster-info
405}
406
407#部署KubeDNS插件
408function deploy_kubedns() {
409cat > kube-dns.yaml <<EOF
410apiVersion: v1
411kind: Service
412metadata:
413  name: kube-dns
414  namespace: kube-system
415  labels:
416    k8s-app: kube-dns
417    kubernetes.io/cluster-service: "true"
418    addonmanager.kubernetes.io/mode: Reconcile
419    kubernetes.io/name: "KubeDNS"
420spec:
421  selector:
422    k8s-app: kube-dns
423  clusterIP: 10.254.0.2
424  ports:
425  - name: dns
426    port: 53
427    protocol: UDP
428  - name: dns-tcp
429    port: 53
430    protocol: TCP
431---
432apiVersion: v1
433kind: ServiceAccount
434metadata:
435  name: kube-dns
436  namespace: kube-system
437  labels:
438    kubernetes.io/cluster-service: "true"
439    addonmanager.kubernetes.io/mode: Reconcile
440---
441apiVersion: v1
442kind: ConfigMap
443metadata:
444  name: kube-dns
445  namespace: kube-system
446  labels:
447    addonmanager.kubernetes.io/mode: EnsureExists
448---
449apiVersion: extensions/v1beta1
450kind: Deployment
451metadata:
452  name: kube-dns
453  namespace: kube-system
454  labels:
455    k8s-app: kube-dns
456    kubernetes.io/cluster-service: "true"
457    addonmanager.kubernetes.io/mode: Reconcile
458spec:
459  # replicas: not specified here:
460  # 1. In order to make Addon Manager do not reconcile this replicas parameter.
461  # 2. Default is 1.
462  # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
463  strategy:
464    rollingUpdate:
465      maxSurge: 10%
466      maxUnavailable: 0
467  selector:
468    matchLabels:
469      k8s-app: kube-dns
470  template:
471    metadata:
472      labels:
473        k8s-app: kube-dns
474      annotations:
475        scheduler.alpha.kubernetes.io/critical-pod: ''
476    spec:
477      priorityClassName: system-cluster-critical
478      tolerations:
479      - key: "CriticalAddonsOnly"
480        operator: "Exists"
481      volumes:
482      - name: kube-dns-config
483        configMap:
484          name: kube-dns
485          optional: true
486      containers:
487      - name: kubedns
488        image: registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-dns-kube-dns-amd64:1.14.8
489        resources:
490          # TODO: Set memory limits when we've profiled the container for large
491          # clusters, then set request = limit to keep this container in
492          # guaranteed class. Currently, this container falls into the
493          # "burstable" category so the kubelet doesn't backoff from restarting it.
494          limits:
495            memory: 170Mi
496          requests:
497            cpu: 100m
498            memory: 70Mi
499        livenessProbe:
500          httpGet:
501            path: /healthcheck/kubedns
502            port: 10054
503            scheme: HTTP
504          initialDelaySeconds: 60
505          timeoutSeconds: 5
506          successThreshold: 1
507          failureThreshold: 5
508        readinessProbe:
509          httpGet:
510            path: /readiness
511            port: 8081
512            scheme: HTTP
513          # we poll on pod startup for the Kubernetes master service and
514          # only setup the /readiness HTTP server once that's available.
515          initialDelaySeconds: 3
516          timeoutSeconds: 5
517        args:
518        - --domain=cluster.local.
519        - --kube-master-url=http://${NODE_IP}:8080
520        - --dns-port=10053
521        - --config-dir=/kube-dns-config
522        - --v=2
523        env:
524        - name: PROMETHEUS_PORT
525          value: "10055"
526        ports:
527        - containerPort: 10053
528          name: dns-local
529          protocol: UDP
530        - containerPort: 10053
531          name: dns-tcp-local
532          protocol: TCP
533        - containerPort: 10055
534          name: metrics
535          protocol: TCP
536        volumeMounts:
537        - name: kube-dns-config
538          mountPath: /kube-dns-config
539      - name: dnsmasq
540        image: registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.8
541        livenessProbe:
542          httpGet:
543            path: /healthcheck/dnsmasq
544            port: 10054
545            scheme: HTTP
546          initialDelaySeconds: 60
547          timeoutSeconds: 5
548          successThreshold: 1
549          failureThreshold: 5
550        args:
551        - -v=2
552        - -logtostderr
553        - -configDir=/etc/k8s/dns/dnsmasq-nanny
554        - -restartDnsmasq=true
555        - --
556        - -k
557        - --cache-size=1000
558        - --no-negcache
559        - --log-facility=-
560        - --server=/cluster.local/127.0.0.1#10053
561        - --server=/in-addr.arpa/127.0.0.1#10053
562        - --server=/ip6.arpa/127.0.0.1#10053
563        ports:
564        - containerPort: 53
565          name: dns
566          protocol: UDP
567        - containerPort: 53
568          name: dns-tcp
569          protocol: TCP
570        # see: https://github.com/kubernetes/kubernetes/issues/29055 for details
571        resources:
572          requests:
573            cpu: 150m
574            memory: 20Mi
575        volumeMounts:
576        - name: kube-dns-config
577          mountPath: /etc/k8s/dns/dnsmasq-nanny
578      - name: sidecar
579        image: registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-dns-sidecar-amd64:1.14.8
580        livenessProbe:
581          httpGet:
582            path: /metrics
583            port: 10054
584            scheme: HTTP
585          initialDelaySeconds: 60
586          timeoutSeconds: 5
587          successThreshold: 1
588          failureThreshold: 5
589        args:
590        - --v=2
591        - --logtostderr
592        - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,SRV
593        - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,SRV
594        ports:
595        - containerPort: 10054
596          name: metrics
597          protocol: TCP
598        resources:
599          requests:
600            memory: 20Mi
601            cpu: 10m
602      dnsPolicy: Default  # Don't use cluster DNS.
603      serviceAccountName: kube-dns
604
605EOF
606
607kubectl create -f kube-dns.yaml
608sleep 30
609kubectl get pod -n kube-system
610
611kubectl get service -n kube-system | grep dns
612}
613
614# 部署Heapster组件
615function deploy_heapster() {
616cat > heapster-rbac.yaml <<EOF
617kind: ClusterRoleBinding
618apiVersion: rbac.authorization.k8s.io/v1beta1
619metadata:
620  name: heapster
621roleRef:
622  apiGroup: rbac.authorization.k8s.io
623  kind: ClusterRole
624  name: system:heapster
625subjects:
626- kind: ServiceAccount
627  name: heapster
628  namespace: kube-system
629EOF
630
631cat > grafana.yaml <<EOF
632apiVersion: extensions/v1beta1
633kind: Deployment
634metadata:
635  name: monitoring-grafana
636  namespace: kube-system
637spec:
638  replicas: 1
639  template:
640    metadata:
641      labels:
642        task: monitoring
643        k8s-app: grafana
644    spec:
645      containers:
646      - name: grafana
647        image: registry.cn-hangzhou.aliyuncs.com/google_containers/heapster-grafana-amd64:v4.4.3
648        ports:
649        - containerPort: 3000
650          protocol: TCP
651        volumeMounts:
652        - mountPath: /etc/ssl/certs
653          name: ca-certificates
654          readOnly: true
655        - mountPath: /var
656          name: grafana-storage
657        env:
658        - name: INFLUXDB_HOST
659          value: monitoring-influxdb
660        - name: GF_SERVER_HTTP_PORT
661          value: "3000"
662        - name: GF_AUTH_BASIC_ENABLED
663          value: "false"
664        - name: GF_AUTH_ANONYMOUS_ENABLED
665          value: "true"
666        - name: GF_AUTH_ANONYMOUS_ORG_ROLE
667          value: Admin
668        - name: GF_SERVER_ROOT_URL
669          value: /
670      volumes:
671      - name: ca-certificates
672        hostPath:
673          path: /etc/ssl/certs
674      - name: grafana-storage
675        emptyDir: {}
676---
677apiVersion: v1
678kind: Service
679metadata:
680  labels:
681    kubernetes.io/cluster-service: 'true'
682    kubernetes.io/name: monitoring-grafana
683  name: monitoring-grafana
684  namespace: kube-system
685spec:
686  ports:
687  - port: 80
688    targetPort: 3000
689  selector:
690    k8s-app: grafana
691EOF
692
693cat > heapster.yaml <<EOF
694apiVersion: v1
695kind: ServiceAccount
696metadata:
697  name: heapster
698  namespace: kube-system
699---
700apiVersion: extensions/v1beta1
701kind: Deployment
702metadata:
703  name: heapster
704  namespace: kube-system
705spec:
706  replicas: 1
707  template:
708    metadata:
709      labels:
710        task: monitoring
711        k8s-app: heapster
712    spec:
713      serviceAccountName: heapster
714      containers:
715      - name: heapster
716        image: registry.cn-hangzhou.aliyuncs.com/google-containers/heapster-amd64:v1.4.0
717        imagePullPolicy: IfNotPresent
718        command:
719        - /heapster
720        - --source=kubernetes:http://${NODE_IP}:8080?inClusterConfig=false
721        - --sink=influxdb:http://monitoring-influxdb.kube-system.svc:8086
722---
723apiVersion: v1
724kind: Service
725metadata:
726  labels:
727    task: monitoring
728    kubernetes.io/cluster-service: 'true'
729    kubernetes.io/name: Heapster
730  name: heapster
731  namespace: kube-system
732spec:
733  ports:
734  - port: 80
735    targetPort: 8082
736  selector:
737    k8s-app: heapster
738EOF
739
740cat > influxdb.yaml <<EOF
741apiVersion: extensions/v1beta1
742kind: Deployment
743metadata:
744  name: monitoring-influxdb
745  namespace: kube-system
746spec:
747  replicas: 1
748  template:
749    metadata:
750      labels:
751        task: monitoring
752        k8s-app: influxdb
753    spec:
754      containers:
755      - name: influxdb
756        image: registry.cn-hangzhou.aliyuncs.com/google_containers/heapster-influxdb-amd64:v1.3.3
757        volumeMounts:
758        - mountPath: /data
759          name: influxdb-storage
760      volumes:
761      - name: influxdb-storage
762        emptyDir: {}
763---
764apiVersion: v1
765kind: Service
766metadata:
767  labels:
768    task: monitoring
769    kubernetes.io/cluster-service: 'true'
770    kubernetes.io/name: monitoring-influxdb
771  name: monitoring-influxdb
772  namespace: kube-system
773spec:
774  ports:
775  - port: 8086
776    targetPort: 8086
777  selector:
778    k8s-app: influxdb
779EOF
780
781kubectl create -f heapster-rbac.yaml -f grafana.yaml -f heapster.yaml -f influxdb.yaml
782
783# 检查执行结果
784kubectl get deployments -n kube-system | grep -E 'heapster|monitoring'
785
786# 检查Pods
787kubectl get pods -n kube-system | grep -E 'heapster|monitoring'
788kubectl get svc -n kube-system  | grep -E 'heapster|monitoring'
789
790# 查看集群信息
791kubectl cluster-info
792}
793
794# 部署Kubernetes Dashboard
795function deploy_dashboard() {
796cat > kubernetes-dashboard.yaml <<EOF
797# ------------------- Dashboard Service Account ------------------- #
798
799apiVersion: v1
800kind: ServiceAccount
801metadata:
802  labels:
803    k8s-app: kubernetes-dashboard
804  name: kubernetes-dashboard
805  namespace: kube-system
806
807---
808# ------------------- Dashboard Role & Role Binding ------------------- #
809
810kind: Role
811apiVersion: rbac.authorization.k8s.io/v1
812metadata:
813  name: kubernetes-dashboard-minimal
814  namespace: kube-system
815rules:
816  # Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
817- apiGroups: [""]
818  resources: ["secrets"]
819  verbs: ["create"]
820  # Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
821- apiGroups: [""]
822  resources: ["configmaps"]
823  verbs: ["create"]
824  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
825- apiGroups: [""]
826  resources: ["secrets"]
827  resourceNames: ["kubernetes-dashboard-key-holder"]
828  verbs: ["get", "update", "delete"]
829  # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
830- apiGroups: [""]
831  resources: ["configmaps"]
832  resourceNames: ["kubernetes-dashboard-settings"]
833  verbs: ["get", "update"]
834  # Allow Dashboard to get metrics from heapster.
835- apiGroups: [""]
836  resources: ["services"]
837  resourceNames: ["heapster"]
838  verbs: ["proxy"]
839- apiGroups: [""]
840  resources: ["services/proxy"]
841  resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
842  verbs: ["get"]
843
844---
845apiVersion: rbac.authorization.k8s.io/v1
846kind: RoleBinding
847metadata:
848  name: kubernetes-dashboard-minimal
849  namespace: kube-system
850roleRef:
851  apiGroup: rbac.authorization.k8s.io
852  kind: Role
853  name: kubernetes-dashboard-minimal
854subjects:
855- kind: ServiceAccount
856  name: kubernetes-dashboard
857  namespace: kube-system
858
859---
860# ------------------- Dashboard Deployment ------------------- #
861
862kind: Deployment
863apiVersion: apps/v1beta2
864metadata:
865  labels:
866    k8s-app: kubernetes-dashboard
867  name: kubernetes-dashboard
868  namespace: kube-system
869spec:
870  replicas: 1
871  revisionHistoryLimit: 10
872  selector:
873    matchLabels:
874      k8s-app: kubernetes-dashboard
875  template:
876    metadata:
877      labels:
878        k8s-app: kubernetes-dashboard
879    spec:
880      containers:
881      - name: kubernetes-dashboard
882        image: registry.cn-hangzhou.aliyuncs.com/google_containers/kubernetes-dashboard-amd64:v1.8.3
883        ports:
884        - containerPort: 9090
885          protocol: TCP
886        args:
887          # Uncomment the following line to manually specify Kubernetes API server Host
888          # If not specified, Dashboard will attempt to auto discover the API server and connect
889          # to it. Uncomment only if the default does not work.
890          # - --apiserver-host=http://my-address:port
891          - --apiserver-host=http://${NODE_IP}:8080
892        volumeMounts:
893          # Create on-disk volume to store exec logs
894        - mountPath: /tmp
895          name: tmp-volume
896        livenessProbe:
897          httpGet:
898            path: /
899            port: 9090
900          initialDelaySeconds: 30
901          timeoutSeconds: 30
902      volumes:
903      - name: tmp-volume
904        emptyDir: {}
905      serviceAccountName: kubernetes-dashboard
906      # Comment the following tolerations if Dashboard must not be deployed on master
907      tolerations:
908      - key: node-role.kubernetes.io/master
909        effect: NoSchedule
910
911---
912# ------------------- Dashboard Service ------------------- #
913
914kind: Service
915apiVersion: v1
916metadata:
917  labels:
918    k8s-app: kubernetes-dashboard
919  name: kubernetes-dashboard
920  namespace: kube-system
921spec:
922  ports:
923  - port: 80
924    targetPort: 9090
925  selector:
926    k8s-app: kubernetes-dashboard
927EOF
928
929kubectl create -f kubernetes-dashboard.yaml
930
931# 检查kubernetes-dashboard服务
932kubectl get pods -n kube-system | grep dashboard
933}
934
935function main() {
936    get_local_ip
937    basic_settings
938    install_docker
939    install_etcd
940    install_kubernetes
941    install_flanneld
942    config_etcd
943    config_flanneld
944    config_apiserver
945    config_controller-manager
946    config_scheduler
947    config_kubelet
948    config_proxy
949    view_status
950    deploy_kubedns
951    deploy_heapster
952    deploy_dashboard
953}
954
955main
956

访问kubernetes-dashboard

http://localhost:8080/api/v1/namespaces/kube-system/services/kubernetes-dashboard/proxy/

文中所涉及到的文件

链接:https://share.weiyun.com/229533b44fef0ba8506d8073a76cc426 密码:fmkczc