基于ceph rbd实现harbor HA后端存储

keepalived配置

1.创建keepalived配置文件

vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 252
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 123456
}
virtual_ipaddress {
192.168.103.134/24
}
notify_master "/etc/keepalived/notify.sh master"
notify_backup "/etc/keepalived/notify.sh backup"
notify_stop "/etc/keepalived/notify.sh stop"
}

2.创建notify.sh脚本

#!/bin/bash
#
contact='root@localhost'
notify() {
mailsubject="$(hostname) to be $1, vip floating"
mailbody="$(date +'%F %T'): vrrp transition, $(hostname) changed to be $1"
echo "$mailbody" | mail -s "$mailsubject" $contact
}
case $1 in
master)
if [ -d "/data" ];then
echo "data dir is exists"
else
mkdir /data
fi
rbd_device=`lsblk|grep rbd0`
if [ $? == "1" ];then
rbd map harbor/harbor
else
echo "rbd map device is exists"
fi
mount /dev/rbd0 /data
docker-compose -f /opt/harbor/docker-compose.yml up
;;
backup)
for i in nginx harbor-jobservice harbor-ui harbor-adminserver registry harbor-log;do docker rm -f $i;done
umount /dev/rbd0 /data
;;
stop)
for i in nginx harbor-jobservice harbor-ui harbor-adminserver registry harbor-log;do docker rm -f $i;done
umount /dev/rbd0 /data
;;
*)
echo "Usage: $(basename $0) {master|backup}"
exit 1
;;
esac

3.修改notify.sh脚本权限

chmod 755 notify.sh
chmod +x notify.sh

4.启动keepalived

systemctl enable keepalived
systemctl start keepalived

haproxy配置

1.创建haproxy目录

mkdir /opt/haproxy

2.创建harbor配置文件

cat >/opt/haproxy/haproxy.cfg <<EOF
global
log 127.0.0.1 local2
pidfile /var/run/haproxy.pid
maxconn 4000
daemon

defaults
mode http
log global
option httplog
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000

listen status
bind 192.168.103.134:8888
mode http
stats uri /
stats realm Global\ statistics

listen mariadb
bind 192.168.103.134:3306
mode tcp
balance roundrobin
option tcplog
server harbor01 192.168.103.126:3306 check weight 1 maxconn 2000
server harbor02 192.168.103.127:3306 check weight 1 maxconn 2000

listen redis
bind 192.168.103.134:6379
mode tcp
balance roundrobin
option tcplog
server harbor01 192.168.103.126:6379 check weight 1 maxconn 2000
server harbor02 192.168.103.127:6379 check weight 1 maxconn 2000
EOF

3.在harbor01、harbor02启动haproxy容器

docker run -d --net host \
-v /opt/haproxy:/usr/local/etc/haproxy \
--restart always \
--name haproxy \
haproxy

mysql高可用

redis+sentinel高可用

使用Dockerfile构建redis镜像

1.创建构建redis需要的目录

mkdir /tmp/redis

2.创建Dockerfile

cat > /tmp/redis/Dockerfile <<EOF
FROM redis:5.0.1
COPY redis.conf /usr/local/etc/redis/redis.conf
CMD [ "redis-server", "/usr/local/etc/redis/redis.conf" ]
EOF

3.下载redis配置文件

wget http://download.redis.io/redis-stable/redis.conf -P /tmp/redis/

4.编辑redis配置文件

bind 0.0.0.0
requirepass 123456

xxxxxxxxxx          BOOTROM MENU​    1. Boot with default mode    2. Enter serial submenu    3. Enter startup submenu    4. Enter ethernet submenu    5. Enter filesystem submenu    6. Modify BOOTROM password    7. Reboot​Enter your choice(1-7): 1bash

docker build -t redis:build-1 /tmp/redis/

使用Dockerfile构建sentinel镜像

1.创建sentinel目录

mkdir /tmp/redis-sentinel

2.创建Dockerfile

cat > /tmp/redis-sentinel/Dockerfile <<EOF
FROM redis:5.0.1
COPY redis-sentinel.conf /usr/local/etc/redis/redis-sentinel.conf
CMD [ "redis-sentinel", "/usr/local/etc/redis/redis-sentinel.conf" ]
EOF

3.创建sentinel配置文件

cat > /tmp/redis-sentinel/redis-sentinel.conf  <<EOF 
daemonize no
port 26379
dir "/tmp"
sentinel monitor harbor01 192.168.100.11 6379 2
sentinel down-after-milliseconds harbor01 60000
sentinel auth-pass harbor01 123456
sentinel config-epoch harbor01 0
sentinel leader-epoch harbor01 0
EOF

4.构建sentinel镜像

docker build -t redis-sentinel:build-1 /tmp/redis-sentinel/

harbor01节点部署

1.创建redis目录

mkdir -p /opt/redis/conf /opt/redis/data

2.创建redis master、sentinel配置文件

cat >/opt/redis/conf/redis-master.conf <<EOF
bind 192.168.103.126
port 6379
daemonize no
appendonly no
appendfsync always
requirepass 123456
masterauth 123456
dir /data
EOF
cat >/opt/redis/conf/sentinel.conf  <<EOF 
daemonize no
port 26379
dir "/tmp"
sentinel myid 649e8d8f77590b4e3b1f41ed64678925deafd5da
sentinel deny-scripts-reconfig yes
sentinel monitor harbor01 192.168.103.126 6379 2
sentinel down-after-milliseconds harbor01 60000
sentinel auth-pass harbor01 123456
# Generated by CONFIG REWRITE
sentinel config-epoch harbor01 0
sentinel leader-epoch harbor01 0
sentinel current-epoch 0
EOF

3.harbor01节点启动redis、sentinel容器

docker run -d \
--net host \
--name redis-harbor01 \
--restart=always \
-p 6379:6379 \
-v /opt/redis/conf/redis-master.conf:/usr/local/etc/redis/redis.conf \
-v /data/:/data/ \
redis:build-1
docker run -d \
--net host \
--name redis-sentinel-harbor01 \
--restart=always \
-p 6378:6378 \
-v /opt/redis/conf/sentinel.conf:/usr/local/etc/redis/redis-sentinel.conf \
redis-sentinel:build-1

harbor02节点部署

1.创建redis目录

mkdir -p /opt/redis/conf /opt/redis/data

2.创建redis master、sentinel配置文

cat >/opt/redis/conf/redis-master.conf <<EOF
bind 192.168.103.127
port 6379
daemonize no
dir /data
masterauth 123456
requirepass 123456
#master node IP and port
slaveof 192.168.103.126 6379
appendonly no
appendfsync always
EOF
cat >/opt/redis/conf/sentinel.conf  <<EOF 
daemonize no
port 26379
dir "/tmp"
sentinel myid 649e8d8f77590b4e3b1f41ed64678925deafd5da
sentinel deny-scripts-reconfig yes
sentinel monitor harbor01 192.168.103.126 6379 2
sentinel down-after-milliseconds harbor01 60000
sentinel auth-pass harbor02 123456
# Generated by CONFIG REWRITE
sentinel config-epoch harbor01 0
sentinel leader-epoch harbor01 0
sentinel current-epoch 0
EOF

3.harbor01节点启动redis、sentinel容器

docker run -d \
--net host \
--name redis-harbor02 \
--restart=always \
-p 6379:6379 \
-v /opt/redis/conf/redis-master.conf:/usr/local/etc/redis/redis.conf \
-v /data/:/data/ \
redis:build-1
docker run -d \
--net host \
--name redis-sentinel-harbor02 \
--restart=always \
-p 6378:6378 \
-v /opt/redis/conf/sentinel.conf:/usr/local/etc/redis/redis-sentinel.conf \
redis-sentinel:build-1

ceph配置

1.创建ceph rbd存储池

ceph osd create pool harbor 32

2.创建volume

rbd create harbor/harbor --size 50G

3.使用rbd map映射harbor卷为rbd0设备(harbor01、harbor02两个节点都要应该块设备)

rbd map harbor/harbor

由于内核版本太低,rbd map时会出现以下错误:

rbd: sysfs write failed
RBD image feature set mismatch. Try disabling features unsupported by the kernel with "rbd feature disable".
In some cases useful info is found in syslog - try "dmesg | tail".
rbd: map failed: (6) No such device or address

解决方式:禁用内核不支持的feature即可

rbd feature disable harbor/harbor exclusive-lock object-map fast-diff deep-flatten

harbor配置

1.关闭harbor02节点keepalived服务,让vip漂移在harbor01节点

systemctl stop keepalived

2.在harbor01节点解压harbor安装包到/opt目录下

tar xf harbor-offline-installer-v1.5.0.tgz -C /opt

3.修改harbor.cfg配置文件

#hostname指定keepalived vip地址
hostname = 192.168.103.134
ssl_cert = /data/cert/server.crt
ssl_cert_key = /data/cert/server.key
secretkey_path = /data
db_host = 192.168.103.126
db_password = 123
db_port = 3306
db_user = root
redis_url = 192.168.103.126:6379,100,123456,1
registry_storage_provider_name = filesystem

4.部署harbor

./prepare
./install --ha

5.使用vip测试访问harbor

http://192.168.103.134

6.关闭harbor01节点keepalived服务,让vip漂移在harbor02节点

systemctl stop keepalived

7.在harbor01节点解压harbor安装包到/opt目录下

tar xf harbor-offline-installer-v1.5.0.tgz -C /opt

8.复制harbor01节点harbor.cfg到harbor02节点

scp /opt/harbor/harbor.cfg harbor02:/opt/harbor/harbor.cfg

9.复制keepalived配置文件到harbor02节点

scp /etc/keepalived/keepalived.conf harbor02:/etc/keepalived/
scp /etc/keepalived/notify.sh harbor02:/etc/keepalived/

10.harbor02节点部署harbor

./prepare
./install --ha

harbor HA测试

1.关闭harbor02节点keepalived服务,待harbor01节点harbor服务恢复正常测试harbor是否正常访问
2.关闭harbor01节点keepalived服务,待harbor02节点harbor服务恢复正常测试harbor是否正常访问

文章作者: 慕容峻才
文章链接: https://www.acaiblog.top/基于ceph-rbd实现harbor-HA后端存储/
版权声明: 本博客所有文章除特别声明外,均采用 CC BY-NC-SA 4.0 许可协议。转载请注明来自 阿才的博客
微信打赏
支付宝打赏