节点初始化
节点信息
主机名
ip地址
角色
rke01
192.168.2.13
master
rke02
192.168.2.11
agent
修改主机名并添加hosts解析
hostnamectl set-hostname <hostname> bash cat >> /etc/hosts << EOF 192.168.2.13 rke01 192.168.2.11 rke02 EOF
配置NetworkManager忽略k8s网络接口
cat > /etc/NetworkManager/conf.d/rke2-canal.conf << EOF [keyfile] unmanaged-devices=interface-name:cali*;interface-name:flannel* EOF systemctl reload NetworkManager
k8s部署 Master部署 keepalived 安装keepalived
yum install -y keepalived
编辑配置文件/etc/keepalived/keepalived.conf
! Configuration File for keepalived global_defs { router_id 100 } vrrp_instance VI_1 { state MASTER interface ens33 virtual_router_id 100 priority 100 advert_int 1 authentication { auth_type PASS auth_pass 2222 } virtual_ipaddress { 192.168.2.100/24 dev ens33 } }
启动服务
systemctl start keepalived systemctl enable keepalived
准备离线安装包 mkdir /root/rke2-artifacts && cd /root/rke2-artifacts/ wget https://github.com/rancher/rke2/releases/download/v1.28.10%2Brke2r1/rke2.linux-amd64.tar.gz wget https://github.com/rancher/rke2/releases/download/v1.28.10%2Brke2r1/rke2-images.linux-amd64.tar.gz wget https://github.com/rancher/rke2/releases/download/v1.28.10%2Brke2r1/sha256sum-amd64.txt curl -sfL https://get.rke2.io --output install.sh
部署RKE2 Server 创建server配置文件
mkdir -p /etc/rancher/rke2cat >> /etc/rancher/rke2/config.yaml << EOF #server: "https://192.168.2.100:9345" #keepalived vip write-kubeconfig: "/root/.kube/config" write-kubeconfig-mode: "0644" cluster-domain: "rke2.local" data-dir: "/data/rancher/rke2" #rancher以及containerd的数据目录 ## 自定义一个 token 标识 token: "RKE2@Cluster" ## tls-san 填写LB的统一入口ip地址或域名 tls-san: - "192.168.2.100" - "192.168.2.13" ## 指定使用国内镜像 #system-default-registry: "registry.cn-hangzhou.aliyuncs.com" ## 为集群中每个节点设置不同 node-name 参数,与当前主机名保持一致 node-name: "rke01" node-label: - role=control-plane ## 打上污点,不让用户工作负载调度到该节点上 node-taint: - "CriticalAddonsOnly=true:NoExecute" #### 网络 ## CNI 插件的名称,例如:calino、flannel 和 canal cni: "calico" cluster-cidr: "10.244.0.0/16" service-cidr: "10.96.0.0/16" service-node-port-range: "30000-60000" #### 数据库 etcd-snapshot-schedule-cron: "0 */12 * * *" ## 快照文件个数,删除旧的保存新的 etcd-snapshot-retention: "6" etcd-snapshot-dir: "/data/rancher/rke2/db/snapshots" # 目录需要手动创建 kube-proxy-arg: # 不指定的话,默认是 iptables 模式 - "proxy-mode=iptables" disable: # rke2 会默认安装一些 charts,可以取消安装 - "rke2-ingress-nginx" EOF
安装master
cd /root/rke2-artifacts/INSTALL_RKE2_TYPE="server" INSTALL_RKE2_MIRROR=cn INSTALL_RKE2_ARTIFACT_PATH=/root/rke2-artifacts INSTALL_RKE2_CHANNEL=v1.28.10+rke2r1 sh install.sh
检查rke2-server、rke2-common版本
rpm -qa |grep -E "rke2-server|rke2-common" #安装指定版本 yum list rke2-server --show yum install rke2-server-1.28.10~rke2r1-0.el7 rke2-common-1.28.10~rke2r1-0.el7
复制镜像到rke2镜像目录
mkdir -p /data/rancher/rke2/agent/images cp rke2-images.linux-amd64.tar.gz /data/rancher/rke2/agent/images/
设置开机自动启动,如果启动失败使用journalctl -u rke2-server.service -f命令查看日志
systemctl enable rke2-server systemctl start rke2-server
添加环境变量
cat > /etc/profile.d/rke2.sh << 'EOF' RKE2_HOME=/data/rancher/rke2/bin PATH=$RKE2_HOME :$PATH EOF source /etc/profile
查看k8s节点状态
$ kubectl get nodes NAME STATUS ROLES AGE VERSION rke01 NotReady control-plane,etcd,master 21m v1.28.10+rke2r1
部署rke2默认containerd sock位于/run/k3s/containerd/containerd.sock,为ctr创建环境变量
cat > /etc/profile.d/containerd.sh << EOF export CONTAINERD_ADDRESS=/run/k3s/containerd/containerd.sock EOF source /etc/profile
如果缺少镜像使用以下命令加载
for i in `ls |grep .tar$`;do ctr -n k8s.io image import $i ;done
Worker部署 同步安装包
scp -r /root/rke2-artifacts/ rke02:/root/
创建配置文件
mkdir -p /etc/rancher/rke2cat > /etc/rancher/rke2/config.yaml << EOF server: "https://192.168.2.100:9345" write-kubeconfig: "/root/.kube/config" write-kubeconfig-mode: "0644" cluster-domain: "rke2.local" data-dir: "/data/rancher/rke2" ## 自定义一个 token 标识 token: "RKE2@Cluster" ## 指定使用国内镜像 #system-default-registry: "registry.cn-hangzhou.aliyuncs.com" ## 为集群中每个节点设置不同 node-name 参数,与当前主机名保持一致 node-name: "rke02" # 对应节点主机名 node-label: - role=worker kube-proxy-arg: # 不指定的话,默认是 iptables 模式 - "proxy-mode=iptables" EOF
安装组件
cd /root/rke2-artifactsINSTALL_RKE2_TYPE="agent" INSTALL_RKE2_MIRROR=cn INSTALL_RKE2_ARTIFACT_PATH=/root/rke2-artifacts INSTALL_RKE2_AGENT_IMAGES_DIR=/root/rke2-artifacts INSTALL_RKE2_CHANNEL=v1.28.10+rke2r1 sh install.sh
安装指定版本rke2
rpm -qa |grep -E "rke2-server|rke2-common" #安装指定版本 yum list rke2-server --show yum install rke2-agent-1.28.10~rke2r1-0.el7 rke2-common-1.28.10~rke2r1-0.el7
复制镜像到rke2目录
mkdir -p /data/rancher/rke2/agent/images cp rke2-images.linux-amd64.tar.gz /data/rancher/rke2/agent/images/
启动rke2-agent服务,如果启动失败使用`journalctl -u rke2-agent.service -f命令查看日志
systemctl enable rke2-agent.service systemctl start rke2-agent.service
添加环境变量
cat > /etc/profile.d/rke2.sh << 'EOF' #!/bin/bash RKE2_HOME=/data/rancher/rke2/bin PATH=$RKE2_HOME:$PATH EOF source /etc/profile
部署rke2默认containerd sock位于/run/k3s/containerd/containerd.sock,为ctr创建环境变量
cat > /etc/profile.d/containerd.sh << EOF export CONTAINERD_ADDRESS=/run/k3s/containerd/containerd.sock EOF source /etc/profile
为worker节点打标签
kubectl label node rke02 node-role.kubernetes.io/worker=true