- 关闭防火墙
[root@iZ2zei14p015t8wkktdhn2Z ~]# systemctl stop firewalld [root@iZ2zei14p015t8wkktdhn2Z ~]# systemctl disable firewalld
- 关闭selinux
[root@iZ2zei14p015t8wkktdhn2Z ~]# sed -i 's/enforcing/disabled/' /etc/selinux/config [root@iZ2zei14p015t8wkktdhn2Z ~]# setenforce 0 setenforce: SELinux is disabled
- 关闭swap
[root@iZ2zei14p015t8wkktdhn2Z ~]# swapoff -a [root@iZ2zei14p015t8wkktdhn2Z ~]# vim /etc/fstab 注释或者删掉里面的swap
- 将桥接的IPv4流量传递到iptables的链
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 net.ipv4.ip_forward = 1 EOF sysctl --system modprobe br_netfilter
- 安装Docker
[root@iZ2zei14p015t8wkktdhn3Z ~]# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
[root@iZ2zei14p015t8wkktdhn3Z ~]# yum install -y docker-ce-19.03.9-3.el7
[root@iZ2zei14p015t8wkktdhn3Z ~]# curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://f1361db2.m.daocloud.io
[root@iZ2zei14p015t8wkktdhn3Z ~]# systemctl enable docker && systemctl start docker
[root@iZ2zei14p015t8wkktdhn2Z ~]# docker --version
Docker version 19.03.8, build afacb8b
- 添加阿里云YUM软件源
cat > /etc/yum.repos.d/kubernetes.repo << EOF [kubernetes] name=Kubernetes baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64 enabled=1 gpgcheck=0 repo_gpgcheck=0 gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg EOF
安装kubeadm,kubelet和kubectl
- 安装
[root@iZ2ze9lpuyxnf8j4d15vkgZ ~]# yum install -y kubelet-1.18.3 kubeadm-1.18.3 kubectl-1.18.3
- docker的Cgroup Driver和kubelet的Cgroup Driver一致
[root@iZ2ze9lpuyxnf8j4d15vkgZ ~]# vim /usr/lib/systemd/system/docker.service ExecStart=/usr/bin/dockerd --exec-opt native.cgroupdriver=systemd -H fd:// --containerd=/run/containerd/containerd.sock [root@iZ2ze9lpuyxnf8j4d15vkgZ ~]# systemctl daemon-reload [root@iZ2ze9lpuyxnf8j4d15vkgZ ~]# systemctl restart docker [root@iZ2ze9lpuyxnf8j4d15vkgZ ~]# docker info | grep Cgroup Cgroup Driver: systemd [root@iZ2ze2jlupmjlwwfuyg30gZ ~]# systemctl enable kubelet.service Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
- 初始化
[root@iZ2ze2jlupmjlwwfuyg30gZ ~]# kubeadm init \ --apiserver-advertise-address=10.10.1.0 \ --image-repository registry.aliyuncs.com/google_containers \ --kubernetes-version v1.18.3 \ --service-cidr=10.1.0.0/16 \ --pod-network-cidr=10.244.0.0/16 W0701 13:48:17.371453 2194 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io] [init] Using Kubernetes version: v1.18.3 [preflight] Running pre-flight checks [preflight] Pulling images required for setting up a Kubernetes cluster [preflight] This might take a minute or two, depending on the speed of your internet connection [preflight] You can also perform this action in beforehand using 'kubeadm config images pull' [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Starting the kubelet [certs] Using certificateDir folder "/etc/kubernetes/pki" [certs] Generating "ca" certificate and key [certs] Generating "apiserver" certificate and key [certs] apiserver serving cert is signed for DNS names [iz2ze2jlupmjlwwfuyg30gz kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.1.0.1 10.10.1.0] [certs] Generating "apiserver-kubelet-client" certificate and key [certs] Generating "front-proxy-ca" certificate and key [certs] Generating "front-proxy-client" certificate and key [certs] Generating "etcd/ca" certificate and key [certs] Generating "etcd/server" certificate and key [certs] etcd/server serving cert is signed for DNS names [iz2ze2jlupmjlwwfuyg30gz localhost] and IPs [10.10.1.0 127.0.0.1 ::1] [certs] Generating "etcd/peer" certificate and key [certs] etcd/peer serving cert is signed for DNS names [iz2ze2jlupmjlwwfuyg30gz localhost] and IPs [10.10.1.0 127.0.0.1 ::1] [certs] Generating "etcd/healthcheck-client" certificate and key [certs] Generating "apiserver-etcd-client" certificate and key [certs] Generating "sa" key and public key [kubeconfig] Using kubeconfig folder "/etc/kubernetes" [kubeconfig] Writing "admin.conf" kubeconfig file [kubeconfig] Writing "kubelet.conf" kubeconfig file [kubeconfig] Writing "controller-manager.conf" kubeconfig file [kubeconfig] Writing "scheduler.conf" kubeconfig file [control-plane] Using manifest folder "/etc/kubernetes/manifests" [control-plane] Creating static Pod manifest for "kube-apiserver" [control-plane] Creating static Pod manifest for "kube-controller-manager" W0701 13:48:49.219224 2194 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC" [control-plane] Creating static Pod manifest for "kube-scheduler" W0701 13:48:49.220708 2194 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC" [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s [apiclient] All control plane components are healthy after 21.501457 seconds [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace [kubelet] Creating a ConfigMap "kubelet-config-1.18" in namespace kube-system with the configuration for the kubelets in the cluster [upload-certs] Skipping phase. Please see --upload-certs [mark-control-plane] Marking the node iz2ze2jlupmjlwwfuyg30gz as control-plane by adding the label "node-role.kubernetes.io/master=''" [mark-control-plane] Marking the node iz2ze2jlupmjlwwfuyg30gz as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule] [bootstrap-token] Using token: njjyan.y1c6mpmr2v5wz319 [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials [bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token [bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key [addons] Applied essential addon: CoreDNS [addons] Applied essential addon: kube-proxy Your Kubernetes control-plane has initialized successfully! To start using your cluster, you need to run the following as a regular user: mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config You should now deploy a pod network to the cluster. Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: https://kubernetes.io/docs/concepts/cluster-administration/addons/ Then you can join any number of worker nodes by running the following on each as root: kubeadm join 10.10.1.0:6443 --token njjyan.y1c6mpmr2v5wz319 \ --discovery-token-ca-cert-hash sha256:25cbaa4ad6e4f37f2d56868e6fe25120063530387d31b4b3d29de78e3b8bc198 [root@iZ2ze2jlupmjlwwfuyg30gZ ~]# mkdir -p $HOME/.kube [root@iZ2ze2jlupmjlwwfuyg30gZ ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config [root@iZ2ze2jlupmjlwwfuyg30gZ ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
- 单机版kubernetes为了运行Pod.需要删除主机上的Train.允许master执行Pod.
[root@iZ2ze2jlupmjlwwfuyg30gZ ~]# kubectl taint nodes --all node-role.kubernetes.io/master- node/iz2ze2jlupmjlwwfuyg30gz untainted
- 至此,单机版的Kubernetes就搭建完成了。
验证master节点信息
[root@iZ2ze2jlupmjlwwfuyg30gZ ~]# kubectl get cs NAME STATUS MESSAGE ERROR controller-manager Healthy ok scheduler Healthy ok etcd-0 Healthy {"health":"true"} [root@iZ2ze2jlupmjlwwfuyg30gZ ~]# kubectl get node NAME STATUS ROLES AGE VERSION iz2ze2jlupmjlwwfuyg30gz NotReady master 2m44s v1.18.3
- 安装Pod网络插件
[root@iZ2ze2jlupmjlwwfuyg30gZ ~]# wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml [root@iZ2ze2jlupmjlwwfuyg30gZ ~]# kubectl apply -f kube-flannel.yml podsecuritypolicy.policy/psp.flannel.unprivileged created clusterrole.rbac.authorization.k8s.io/flannel created clusterrolebinding.rbac.authorization.k8s.io/flannel created serviceaccount/flannel created configmap/kube-flannel-cfg created daemonset.apps/kube-flannel-ds-amd64 created daemonset.apps/kube-flannel-ds-arm64 created daemonset.apps/kube-flannel-ds-arm created daemonset.apps/kube-flannel-ds-ppc64le created daemonset.apps/kube-flannel-ds-s390x created
安装完成后使用 kubectl get pods 命令可以查看到我们集群中的组件运行状态,如果都是Running 状态的话,那么恭喜你,你的 单节点安装成功了。
[root@iZ2ze2jlupmjlwwfuyg30gZ ~]# kubectl get pods --all-namespaces NAMESPACE NAME READY STATUS RESTARTS AGE kube-system coredns-7ff77c879f-2qc68 1/1 Running 0 12m kube-system coredns-7ff77c879f-khbjf 1/1 Running 0 12m kube-system etcd-iz2ze2jlupmjlwwfuyg30gz 1/1 Running 0 12m kube-system kube-apiserver-iz2ze2jlupmjlwwfuyg30gz 1/1 Running 0 12m kube-system kube-controller-manager-iz2ze2jlupmjlwwfuyg30gz 1/1 Running 0 12m kube-system kube-flannel-ds-amd64-dfmck 1/1 Running 0 2m15s kube-system kube-proxy-tbqrx 1/1 Running 0 12m kube-system kube-scheduler-iz2ze2jlupmjlwwfuyg30gz 1/1 Running 0 12m [root@iZ2ze2jlupmjlwwfuyg30gZ ~]# kubectl get pods -n kube-system NAME READY STATUS RESTARTS AGE coredns-7ff77c879f-2qc68 1/1 Running 0 12m coredns-7ff77c879f-khbjf 1/1 Running 0 12m etcd-iz2ze2jlupmjlwwfuyg30gz 1/1 Running 0 12m kube-apiserver-iz2ze2jlupmjlwwfuyg30gz 1/1 Running 0 12m kube-controller-manager-iz2ze2jlupmjlwwfuyg30gz 1/1 Running 0 12m kube-flannel-ds-amd64-dfmck 1/1 Running 0 2m25s kube-proxy-tbqrx 1/1 Running 0 12m kube-scheduler-iz2ze2jlupmjlwwfuyg30gz 1/1 Running 0 12m
- 在Kubernetes集群中创建一个pod,验证是否正常运行
[root@iZ2ze2jlupmjlwwfuyg30gZ ~]# kubectl create deployment nginx --image=nginx deployment.apps/nginx created [root@iZ2ze2jlupmjlwwfuyg30gZ ~]# kubectl expose deployment nginx --port=80 --type=NodePort service/nginx exposed [root@iZ2ze2jlupmjlwwfuyg30gZ ~]# kubectl get pod,svc NAME READY STATUS RESTARTS AGE pod/nginx 1/1 Running 0 47s pod/nginx-f89759699-c4dfv 1/1 Running 0 21s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/kubernetes ClusterIP 10.1.0.1 <none> 443/TCP 18m service/nginx NodePort 10.1.11.16 <none> 80:32693/TCP 11s
- 验证,输入节点IP加暴露的端口即可
[root@iZ2ze2jlupmjlwwfuyg30gZ ~]# curl 10.10.1.0:32693 <!DOCTYPE html> <html> <head> <title>Welcome to nginx!</title> <style> body { width: 35em; margin: 0 auto; font-family: Tahoma, Verdana, Arial, sans-serif; } </style> </head> <body> <h1>Welcome to nginx!</h1> <p>If you see this page, the nginx web server is successfully installed and working. Further configuration is required.</p> <p>For online documentation and support please refer to <a href="http://nginx.org/">nginx.org</a>.<br/> Commercial support is available at <a href="http://nginx.com/">nginx.com</a>.</p> <p><em>Thank you for using nginx.</em></p> </body> </html>
- 扩容副本
[root@iZ2ze2jlupmjlwwfuyg30gZ ~]# kubectl scale deployment nginx --replicas=3 deployment.apps/nginx scaled [root@iZ2ze2jlupmjlwwfuyg30gZ ~]# kubectl get pod,svc NAME READY STATUS RESTARTS AGE pod/nginx 1/1 Running 0 3m41s pod/nginx-f89759699-c4dfv 1/1 Running 0 3m15s pod/nginx-f89759699-sz889 1/1 Running 0 10s pod/nginx-f89759699-vhwd5 1/1 Running 0 10s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/kubernetes ClusterIP 10.1.0.1 <none> 443/TCP 21m service/nginx NodePort 10.1.11.16 <none> 80:32693/TCP 3m5s
- 查看pod的详细信息
[root@iZ2ze2jlupmjlwwfuyg30gZ ~]# kubectl get pod -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES nginx 1/1 Running 0 4m13s 10.244.0.6 iz2ze2jlupmjlwwfuyg30gz <none> <none> nginx-f89759699-c4dfv 1/1 Running 0 3m47s 10.244.0.7 iz2ze2jlupmjlwwfuyg30gz <none> <none> nginx-f89759699-sz889 1/1 Running 0 42s 10.244.0.9 iz2ze2jlupmjlwwfuyg30gz <none> <none> nginx-f89759699-vhwd5 1/1 Running 0 42s 10.244.0.8 iz2ze2jlupmjlwwfuyg30gz <none> <none>
继续阅读
- 我的QQ
- QQ扫一扫
-
- 我的头条
- 头条扫一扫
-
评论