• 欢迎访问运维搬运工网站,推荐使用最新版火狐浏览器和Chrome浏览器访问本网站。
  • 本站一年会员:100元 ,两年会员:180元 ,永久会员:380元
  • 这世界就是,一些人总在昼夜不停地运转,而另外一些人,起床就发现世界已经变了。
  • 本博客推广的是知识付费,用赞助的方式实现博客维护,不以赚钱为目的的博客

ansible安装kubernetes集群

硬件配置

4核8G50GB硬盘

四台服务器

机器规划:

ip

主机名

角色

192.168.1.210

master

deploy、master1、lb1、etcd

192.168.1.211

node1

etcd、node

192.168.1.212

node2

etcd、node

192.168.1.213

master2

master2、lb2

192.168.1.215

vip

软件

配置主机名

[root@elasticsearch1 ~]# hostnamectl set-hostname master

[root@localhost ~]# hostnamectl set-hostname node1

[root@localhost ~]# hostnamectl set-hostname node2

[root@bogon ~]# hostnamectl set-hostname master2

更新时间

[root@master ~]# yum install ntp -y 
[root@master ~]# ntpdate ntp1.aliyun.com

四台机器都执行

注意:本段内容须“登录”后方可查看!

[root@elasticsearch ~]# yum install epel-release -y
[root@bogon ~]# yum install python -y
[root@bogon ~]# cat /etc/redhat-release 
CentOS Linux release 7.6.1810 (Core)
[root@elasticsearch ~]# yum install -y python-pip git
[root@elasticsearch ~]# pip install pip --upgrade -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com
[root@elasticsearch ~]# pip install --no-cache-dir ansible -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com

主节点安装免密登陆

[root@elasticsearch ~]# ssh-keygen 
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa): 
Created directory '/root/.ssh'.
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:KEB2qQlmGxa019u9HwFa/j/uvrzUXbu9+pOgBcZ4TeI root@elasticsearch.gr-data.uat
The key's randomart image is:
+---[RSA 2048]----+
|.o+ .. |
|.B..o . . |
|+o++ . o+ + |
| .+. o.=..E . |
| ...oSoo.. .|
| . o .o. +|
| . oo..oo|
| ..=. oo|
| .+X*++|
+----[SHA256]-----+

拷贝秘钥

[root@elasticsearch ~]# for ip in 211 212 213; do ssh-copy-id 192.168.1.$ip; done

主节点上编排k8s

[root@elasticsearch ~]# cd /usr/local/src/
[root@elasticsearch src]# git clone https://github.com/gjmzj/kubeasz.git
正克隆到 'kubeasz'...
remote: Enumerating objects: 42, done.
remote: Counting objects: 100% (42/42), done.
remote: Compressing objects: 100% (32/32), done.
remote: Total 5545 (delta 16), reused 25 (delta 6), pack-reused 5503
接收对象中: 100% (5545/5545), 2.38 MiB | 213.00 KiB/s, done.
处理 delta 中: 100% (3060/3060), done.
[root@elasticsearch src]# mkdir -p /etc/ansible
[root@elasticsearch src]# mv kubeasz/* /etc/ansible/

配置ansible角色

[root@elasticsearch src]# cd /etc/ansible/
[root@elasticsearch ansible]# ll
总用量 84
-rw-r--r--. 1 root root 499 3月 7 16:08 01.prepare.yml
-rw-r--r--. 1 root root 58 3月 7 16:08 02.etcd.yml
-rw-r--r--. 1 root root 87 3月 7 16:08 03.docker.yml
-rw-r--r--. 1 root root 532 3月 7 16:08 04.kube-master.yml
-rw-r--r--. 1 root root 72 3月 7 16:08 05.kube-node.yml
-rw-r--r--. 1 root root 346 3月 7 16:08 06.network.yml
-rw-r--r--. 1 root root 77 3月 7 16:08 07.cluster-addon.yml
-rw-r--r--. 1 root root 1521 3月 7 16:08 11.harbor.yml
-rw-r--r--. 1 root root 411 3月 7 16:08 22.upgrade.yml
-rw-r--r--. 1 root root 1394 3月 7 16:08 23.backup.yml
-rw-r--r--. 1 root root 1391 3月 7 16:08 24.restore.yml
-rw-r--r--. 1 root root 1723 3月 7 16:08 90.setup.yml
-rw-r--r--. 1 root root 5917 3月 7 16:08 99.clean.yml
-rw-r--r--. 1 root root 10283 3月 7 16:08 ansible.cfg
drwxr-xr-x. 2 root root 23 3月 7 16:08 bin
drwxr-xr-x. 4 root root 36 3月 7 16:08 dockerfiles
drwxr-xr-x. 8 root root 92 3月 7 16:08 docs
drwxr-xr-x. 2 root root 47 3月 7 16:08 down
drwxr-xr-x. 2 root root 254 3月 7 16:08 example
drwxr-xr-x. 14 root root 218 3月 7 16:08 manifests
drwxr-xr-x. 2 root root 245 3月 7 16:08 pics
-rw-r--r--. 1 root root 5020 3月 7 16:08 README.md
drwxr-xr-x. 22 root root 4096 3月 7 16:08 roles
drwxr-xr-x. 2 root root 4096 3月 7 16:08 tools
[root@elasticsearch ansible]# cp example/hosts.m-masters.example hosts

配置hosts内容

[root@elasticsearch ansible]# vim hosts

# 集群部署节点:一般为运行ansible 脚本的节点
# 变量 NTP_ENABLED (=yes/no) 设置集群是否安装 chrony 时间同步
[deploy]
192.168.1.210 NTP_ENABLED=no

# etcd集群请提供如下NODE_NAME,注意etcd集群必须是1,3,5,7...奇数个节点
[etcd]
192.168.1.210 NODE_NAME=etcd1
192.168.1.211 NODE_NAME=etcd2
192.168.1.212 NODE_NAME=etcd3

[kube-master]
192.168.1.210
192.168.1.213

[kube-node]
192.168.1.211
192.168.1.212

# 参数 NEW_INSTALL:yes表示新建,no表示使用已有harbor服务器
# 如果不使用域名,可以设置 HARBOR_DOMAIN=""
[harbor]
#192.168.1.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no

# 负载均衡(目前已支持多于2节点,一般2节点就够了) 安装 haproxy+keepalived
[lb]
192.168.1.210 LB_ROLE=backup
192.168.1.213 LB_ROLE=master

#【可选】外部负载均衡,用于自有环境负载转发 NodePort 暴露的服务等
[ex-lb]
#192.168.1.6 LB_ROLE=backup EX_VIP=192.168.1.250
#192.168.1.7 LB_ROLE=master EX_VIP=192.168.1.250

[all:vars]
# ---------集群主要参数---------------
#集群部署模式:allinone, single-master, multi-master
DEPLOY_MODE=multi-master

# 集群 MASTER IP即 LB节点VIP地址,为区别与默认apiserver端口,设置VIP监听的服务端口8443
# 公有云上请使用云负载均衡内网地址和监听端口
MASTER_IP="192.168.1.215"
KUBE_APISERVER="https://{{ MASTER_IP }}:8443"

# 集群网络插件,目前支持calico, flannel, kube-router, cilium
CLUSTER_NETWORK="flannel"

# 服务网段 (Service CIDR),注意不要与内网已有网段冲突
SERVICE_CIDR="10.68.0.0/16"

# POD 网段 (Cluster CIDR),注意不要与内网已有网段冲突
CLUSTER_CIDR="172.20.0.0/16"

# 服务端口范围 (NodePort Range)
NODE_PORT_RANGE="20000-40000"

# kubernetes 服务 IP (预分配,一般是 SERVICE_CIDR 中第一个IP)
CLUSTER_KUBERNETES_SVC_IP="10.68.0.1"

# 集群 DNS 服务 IP (从 SERVICE_CIDR 中预分配)
CLUSTER_DNS_SVC_IP="10.68.0.2"

# 集群 DNS 域名
CLUSTER_DNS_DOMAIN="cluster.local."

# 集群basic auth 使用的用户名和密码 (运行时会生成随机密码)
BASIC_AUTH_USER="admin"
BASIC_AUTH_PASS="test1234"


# ---------附加参数--------------------
#默认二进制文件目录
bin_dir="/opt/kube/bin"

#证书目录
ca_dir="/etc/kubernetes/ssl"

#部署目录,即 ansible 工作目录,建议不要修改
base_dir="/etc/ansible"

测试

[root@master ~]# ansible all -m ping
192.168.1.213 | SUCCESS => {
"changed": false, 
"ping": "pong"
}
192.168.1.211 | SUCCESS => {
"changed": false, 
"ping": "pong"
}
192.168.1.212 | SUCCESS => {
"changed": false, 
"ping": "pong"
}
192.168.1.210 | SUCCESS => {
"changed": false, 
"ping": "pong"
}

二进制文件下载地址

https://pan.baidu.com/s/1c4RFaA#list/path=%2FK8S

解压

[root@master ~]# tar zxf k8s.1-13-3.tar.gz
[root@master ~]# mv bin/* /etc/ansible/bin/
mv:是否覆盖"/etc/ansible/bin/readme.md"? yes

分步骤安装

创建证书和安装准备

[root@master ansible]# ansible-playbook 01.prepare.yml

安装etcd集群

[root@master ansible]# ansible-playbook 02.etcd.yml
[root@master ansible]# bash

检查etcd节点健康状况:

export NODE_IPS="192.168.1.210 192.168.1.211 192.168.1.212"
for ip in ${NODE_IPS}; do
  ETCDCTL_API=3 etcdctl \
  --endpoints=https://${ip}:2379  \
  --cacert=/etc/kubernetes/ssl/ca.pem \
  --cert=/etc/etcd/ssl/etcd.pem \
  --key=/etc/etcd/ssl/etcd-key.pem \
  endpoint health; done

输出结果如下说明正常

https://192.168.1.210:2379 is healthy: successfully committed proposal: took = 1.112025ms
https://192.168.1.211:2379 is healthy: successfully committed proposal: took = 1.574933ms
https://192.168.1.212:2379 is healthy: successfully committed proposal: took = 1.526637ms

安装docker

[root@master ansible]# ansible-playbook 03.docker.yml

安装master节点

[root@master ansible]# ansible-playbook 04.kube-master.yml

查看集群状态

[root@master ansible]# kubectl get componentstatus
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok 
scheduler Healthy ok 
etcd-1 Healthy {"health": "true"} 
etcd-2 Healthy {"health": "true"} 
etcd-0 Healthy {"health": "true"}

安装node节点

[root@master ansible]# ansible-playbook 05.kube-node.yml

查看node节点

[root@master ansible]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
192.168.1.210 Ready,SchedulingDisabled master 115s v1.13.3
192.168.1.211 Ready node 19s v1.13.3
192.168.1.212 Ready node 19s v1.13.3
192.168.1.213 Ready,SchedulingDisabled master 111s v1.13.3

部署集群网络

[root@master ansible]# ansible-playbook 06.network.yml

查看kube-system namespace上的pod

[root@master ansible]# kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
kube-flannel-ds-amd64-ll4f8 1/1 Running 0 60s
kube-flannel-ds-amd64-lnp8m 1/1 Running 0 60s
kube-flannel-ds-amd64-npxbx 1/1 Running 0 60s
kube-flannel-ds-amd64-tn6p4 1/1 Running 0 60s

安装集群插件(dns, dashboard)

[root@master ansible]# ansible-playbook 07.cluster-addon.yml

查看kube-system namespace下的服务

[root@master ansible]# kubectl get svc -n kube-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kube-dns ClusterIP 10.68.0.2 <none> 53/UDP,53/TCP,9153/TCP 109s
kubernetes-dashboard NodePort 10.68.78.255 <none> 443:38582/TCP 107s
metrics-server ClusterIP 10.68.139.128 <none> 443/TCP 112s

如果不想像上面一步一步的安装可以直接一步执行如下命令安装

ansible-playbook 90.setup.yml

查看集群信息

[root@master ansible]# kubectl cluster-info
Kubernetes master is running at https://192.168.1.215:8443
CoreDNS is running at https://192.168.1.215:8443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
kubernetes-dashboard is running at https://192.168.1.215:8443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.

查看node/pod使用资源情况:

[root@master ansible]# kubectl top node
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% 
192.168.1.210 79m 1% 2635Mi 35% 
192.168.1.211 41m 1% 974Mi 12% 
192.168.1.212 44m 1% 1010Mi 13% 
192.168.1.213 44m 1% 1523Mi 20% 
[root@master ansible]# kubectl top pod --all-namespaces
NAMESPACE NAME CPU(cores) MEMORY(bytes) 
kube-system coredns-dc8bbbcf9-h5c4h 2m 13Mi 
kube-system coredns-dc8bbbcf9-tnhkl 2m 14Mi 
kube-system kube-flannel-ds-amd64-ll4f8 1m 12Mi 
kube-system kube-flannel-ds-amd64-lnp8m 2m 10Mi 
kube-system kube-flannel-ds-amd64-npxbx 1m 15Mi 
kube-system kube-flannel-ds-amd64-tn6p4 1m 14Mi 
kube-system kubernetes-dashboard-6685cb584f-jtz2s 1m 12Mi 
kube-system metrics-server-79558444c6-f2hrf 1m 15Mi

测试DNS

[root@master ansible]# kubectl run nginx --image=nginx --expose --port=80
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
service/nginx created
deployment.apps/nginx created
[root@master ansible]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.68.0.1 <none> 443/TCP 16m
nginx ClusterIP 10.68.242.35 <none> 80/TCP 29s

查看有没有起来

[root@master ansible]# kubectl get pod
NAME READY STATUS RESTARTS AGE
nginx-57867cc648-bwzfq 1/1 Running 0 16m

创建busybox 测试pod

[root@master ansible]# kubectl run busybox --rm -it --image=busybox /bin/sh
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
If you don't see a command prompt, try pressing enter.

查看内部信息

/ # nslookup nginx.default.svc.cluster.local
Server: 10.68.0.2
Address: 10.68.0.2:53

Name: nginx.default.svc.cluster.local
Address: 10.68.242.35

*** Can't find nginx.default.svc.cluster.local: No answer

增加node节点

deploy节点免密码登录node

ssh-copy-id 新node ip

修改/etc/ansible/hosts

[new-node]
172.7.15.117

执行安装脚本

 ansible-playbook /etc/ansible/20.addnode.yml

验证

kubectl get node
kubectl get pod -n kube-system -o wide

后续工作 修改/etc/ansible/hosts,将new-node里面的所有ip全部移动到kube-node组里去

增加master节点(略)

https://github.com/gjmzj/kubeasz/blob/master/docs/op/AddMaster.m%5B/vip%5Dd


运维搬运工 , 版权所有丨如未注明 , 均为原创丨本网站采用BY-NC-SA协议进行授权
转载请注明原文链接:ansible安装kubernetes集群
喜欢 (0)
[扫描二维码]
分享 (0)
大自然搬运工
关于作者:
不是路不平,而是你不行。到底行不行,看你停不停。只要你不停,早晚都能行。
发表我的评论
取消评论
表情 贴图 加粗 删除线 居中 斜体 签到

Hi,您需要填写昵称和邮箱!

  • 昵称 (必填)
  • 邮箱 (必填)
  • 网址