1. 环境准备
- M1 Mac
- Vmware Fusion Tech Preview
- 终端连接工具 Termius
- Centos7 虚拟机三台,内存都为 4G,磁盘 30G,线程数为 4
- master: 192.168.2.3
- node-1: 192.168.2.4
- node-2: 192.168.2.5
2. 修改主机名
在三台节点上都执行
1
2
3
4
5
|
cat >> /etc/hosts << EOF
192.168.2.3 master
192.168.2.4 node-1
192.168.2.5 node-2
EOF
|
1
2
3
4
5
6
|
# master上执行
hostnamectl set-hostname master
# node-1上执行
hostnamectl set-hostname node-1
# node-2上执行
hostnamectl set-hostname node-2
|
3. 关闭 selinux、swap
1
2
3
4
5
6
|
swapoff -a
sed -i 's/enforcing/disabled/' /etc/selinux/config
setenforce 0
echo "net.bridge.bridge-nf-call-iptables = 1 ">>/etc/sysctl.d/k8s.conf
echo "net.bridge.bridge-nf-call-ip6tables = 1 ">>/etc/sysctl.d/k8s.conf
sysctl --system
|
4. ipvs 安装
1
2
3
4
|
modprobe br_netfilter
echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables
echo 1 > /proc/sys/net/ipv4/ip_forward
lsmod | grep br_netfilter
|
1
|
yum -y install ipset ipvsadm
|
1
2
3
4
5
6
7
|
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF
|
1
|
chmod a+x /etc/sysconfig/modules/ipvs.modules
|
执行脚本
/etc/sysconfig/modules/ipvs.modules
验证 ipvs 模块
lsmod | grep -e ip_vs -e nf_conntrack_ipv4
1
2
3
4
5
|
# 执行脚本
/etc/sysconfig/modules/ipvs.modules
# 验证ipvs模块
lsmod | grep -e ip_vs -e nf_conntrack_ipv4
|
1
2
3
4
|
cat << EOF > /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF
|
1
2
|
modprobe overlay
modprobe br_netfilter
|
5. 安装 docker
1
2
3
4
5
6
7
|
yum install -y yum-utils
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
yum install docker-ce-19.03.5 docker-ce-cli-19.03.5
systemctl enable docker
systemctl start docker
|
配置 docker
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
|
cat << EOF > /etc/docker/daemon.json
{
"insecure-registries":["47.99.140.12:8077"],
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": [
"https://l10nt4hq.mirror.aliyuncs.com",
"https://registry.docker-cn.com",
"http://hub-mirror.c.163.com",
"https://docker.mirrors.ustc.edu.cn"
]
}
EOF
#重新加载服务的配置文件
systemctl daemon-reload
#重启docker
systemctl restart docker
systemctl status docker.service
|
6. 配置 k8s 源
1
2
3
4
5
6
7
8
9
10
11
12
13
|
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-aarch64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum clean all
yum makecache
|
7. 安装 kubeadm、kubectl、kubelet
1
2
3
4
5
|
yum install -y kubeadm-1.23.3-0 kubectl-1.23.3-0 kubelet-1.23.3-0 --disableexcludes=kubernetes
# 只在master节点执行
systemctl enable kubelet
systemctl start kubelet
|
8. master 节点加入 K8s 集群
1
|
kubeadm init --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.23.3 --apiserver-advertise-address 192.168.2.2 --pod-network-cidr=10.244.0.0/16 --service-cidr=10.1.0.0/16
|
9. master 节点部署 flannel 网络
1
2
3
4
|
tar zxvf cni-plugins-linux-arm-v0.8.6.tgz
cp flannel /opt/cni/bin
chmod 777 /opt/cni/bin/flannel
kubectl apply -f kube-flannel.yml
|
10. node 节点加入 k8s 集群
1
2
|
kubeadm join 192.168.2.2:6443 --token uxohvw.84c30cwr4g6t8he8 \
--discovery-token-ca-cert-hash sha256:cc7e139e28e61db426dc91a3ecccc0a72f26b09af6dfd12c6171d58f4359c0e5
|
token 失效后重新生成
1
2
3
4
5
|
kubeadm token create
# master 执行
openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
|
11. node 节点加入 flannel 网络
1
2
3
4
5
6
7
8
|
# 子节点上创建文件夹
mkdir -p /etc/cni/net.d
# master节点拷贝文件到node节点
scp /etc/cni/net.d/10-flannel.conflist root@192.168.2.3:/etc/cni/net.d
systemctl daemon-reload
systemctl restart kubelet
|