怎么做網(wǎng)絡(luò)推廣網(wǎng)站百度招聘電話
Ceph
- 環(huán)境初始化
- 搭建Ceph
本次實(shí)驗(yàn)基于VMware17
節(jié)點(diǎn) | IP |
---|---|
storage01 | 192.168.200.161 |
storage01 | 192.168.200.162 |
storage01 | 192.168.200.163 |
環(huán)境初始化
初始化基礎(chǔ)環(huán)境,三節(jié)點(diǎn)執(zhí)行
#!/bin/bash# 定義節(jié)點(diǎn)信息
NODES=("192.168.200.161 storage01 root" "192.168.200.162 storage02 root" "192.168.200.163 storage03 root")# 定義當(dāng)前節(jié)點(diǎn)的密碼(默認(rèn)集群統(tǒng)一密碼)
HOST_PASS="000000"# 時(shí)間同步的目標(biāo)節(jié)點(diǎn)
TIME_SERVER=storage01# 時(shí)間同步的地址段
TIME_SERVER_IP=192.160.200.0/24# 歡迎界面
cat > /etc/motd <<EOF################################# Welcome to openstack #################################
EOF# 修改主機(jī)名
for node in "${NODES[@]}"; doip=$(echo "$node" | awk '{print $1}')hostname=$(echo "$node" | awk '{print $2}')# 獲取當(dāng)前節(jié)點(diǎn)的主機(jī)名和 IPcurrent_ip=$(hostname -I | awk '{print $1}')current_hostname=$(hostname)# 檢查當(dāng)前節(jié)點(diǎn)與要修改的節(jié)點(diǎn)信息是否匹配if [[ "$current_ip" == "$ip" && "$current_hostname" != "$hostname" ]]; thenecho "Updating hostname to $hostname on $current_ip..."hostnamectl set-hostname "$hostname"if [ $? -eq 0 ]; thenecho "Hostname updated successfully."elseecho "Failed to update hostname."fibreakfi
done# 遍歷節(jié)點(diǎn)信息并添加到 hosts 文件
for node in "${NODES[@]}"; doip=$(echo "$node" | awk '{print $1}')hostname=$(echo "$node" | awk '{print $2}')# 檢查 hosts 文件中是否已存在相應(yīng)的解析if grep -q "$ip $hostname" /etc/hosts; thenecho "Host entry for $hostname already exists in /etc/hosts."else# 添加節(jié)點(diǎn)的解析條目到 hosts 文件sudo sh -c "echo '$ip $hostname' >> /etc/hosts"echo "Added host entry for $hostname in /etc/hosts."fi
doneif [[ ! -s ~/.ssh/id_rsa.pub ]]; thenssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa -q -b 2048
fi# 檢查并安裝 sshpass 工具
if ! which sshpass &> /dev/null; thenecho "sshpass 工具未安裝,正在安裝 sshpass..."sudo apt-get install -y sshpass
fi# 遍歷所有節(jié)點(diǎn)進(jìn)行免密操作
for node in "${NODES[@]}"; doip=$(echo "$node" | awk '{print $1}')hostname=$(echo "$node" | awk '{print $2}')user=$(echo "$node" | awk '{print $3}')# 使用 sshpass 提供密碼,并自動(dòng)確認(rèn)密鑰sshpass -p "$HOST_PASS" ssh-copy-id -o StrictHostKeyChecking=no -i /root/.ssh/id_rsa.pub "$user@$hostname"
done# 時(shí)間同步
apt install -y chrony
if [[ $TIME_SERVER_IP == *$(hostname -I)* ]]; then# 配置當(dāng)前節(jié)點(diǎn)為時(shí)間同步源sed -i '20,23s/^/#/g' /etc/chrony/chrony.confecho "server $TIME_SERVER iburst maxsources 2" >> /etc/chrony/chrony.confecho "allow $TIME_SERVER_IP" >> /etc/chrony/chrony.confecho "local stratum 10" >> /etc/chrony/chrony.conf
else# 配置當(dāng)前節(jié)點(diǎn)同步到目標(biāo)節(jié)點(diǎn)sed -i '20,23s/^/#/g' /etc/chrony/chrony.confecho "pool $TIME_SERVER iburst maxsources 2" >> /etc/chrony/chrony.conf
fi# 重啟并啟用 chrony 服務(wù)
systemctl restart chronyd
systemctl enable chronydecho "###############################################################"
echo "################# 集群初始化成功 #####################"
echo "###############################################################"
搭建Ceph
配置離線源
tar zxvf ceph_quincy.tar.gz -C /opt/cp /etc/apt/sources.list{,.bak}cat > /etc/apt/sources.list << EOF
deb [trusted=yes] file:// /opt/ceph_quincy/debs/
EOFapt-get clean all
apt-get update
配置時(shí)間同步
# 可配置開啟
timedatectl set-ntp true# 配置上海時(shí)區(qū)
timedatectl set-timezone Asia/Shanghai# 系統(tǒng)時(shí)鐘與硬件時(shí)鐘同步
hwclock --systohc
所有節(jié)點(diǎn)安裝docker
apt -y install docker-ce
01節(jié)點(diǎn)安裝cephadm和ceph工具
apt install -y cephadm ceph-common
所有節(jié)點(diǎn)導(dǎo)入鏡像
docker load -i cephadm_images_v17.tar
01節(jié)點(diǎn)配置倉庫
# 導(dǎo)入鏡像
docker load -i registry.tar# 啟動(dòng)
docker run -d --name registry -p 5000:5000 --restart always 3a0f7b0a13ef
所有節(jié)點(diǎn)配置地址
cat >> /etc/docker/daemon.json << EOF
{
"insecure-registries":["192.168.200.161:5000"]
}
EOFsystemctl daemon-reload
systemctl restart docker
01節(jié)點(diǎn)推送
docker tag 0912465dcea5 192.168.200.161:5000/ceph:v17
docker push 192.168.200.161:5000/ceph:v17
cd /etc/ceph
01節(jié)點(diǎn)初始化集群
cephadm --image 192.168.200.161:5000/ceph:v17 bootstrap --mon-ip 192.168.200.161 --initial-dashboard-user admin --initial-dashboard-password 000000 --skip-pull
修改HTTPS端口號(往下選做)
ceph config set mgr mgr/dashboard/ssl_server_port 5050
關(guān)閉dashboard證書認(rèn)證
ceph config set mgr mgr/dashboard/ssl false
指定 dashboard 監(jiān)聽地址
ceph config set mgr mgr/dashboard/server_addr 0.0.0.0
指定 dashboard 監(jiān)聽端口
ceph config set mgr mgr/dashboard/server_port 5050
重啟dashboard模塊生效(往上選做)
ceph mgr module disable dashboardceph mgr module enable dashboard
加入集群
ssh-copy-id -f -i /etc/ceph/ceph.pub storage02ssh-copy-id -f -i /etc/ceph/ceph.pub storage03
ceph orch host add storage02ceph orch host add storage03
查看集群
root@storage01:/etc/ceph# ceph -scluster:id: 4569c748-fc81-11ee-872a-7f1819cf2453health: HEALTH_WARN1 stray daemon(s) not managed by cephadmOSD count 0 < osd_pool_default_size 3services:mon: 2 daemons, quorum storage02,storage03 (age 10m)mgr: storage03.lnyuay(active, since 68s)osd: 0 osds: 0 up, 0 indata:pools: 0 pools, 0 pgsobjects: 0 objects, 0 Busage: 0 B used, 0 B / 0 B availpgs:root@storage01:/etc/ceph#
界面訪問:IP:8443
有問題可刪除集群
cephadm rm-cluster --fsid d92b85c0-3ecd-11ed-a617-3f7cf3e2d6d8 --force
查看可用磁盤設(shè)備
ceph orch device ls
root@storage01:/etc/ceph# ceph orch device ls
HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS
storage01 /dev/sdb hdd 107G Yes 19m ago
storage02 /dev/sdb hdd 107G Yes 12m ago
storage03 /dev/sdb hdd 107G Yes 12m ago
創(chuàng)建OSD
ceph orch daemon add osd storage01:/dev/sdb
ceph orch daemon add osd storage02:/dev/sdb
ceph orch daemon add osd storage03:/dev/sdb
查看驗(yàn)證
root@storage01:/etc/ceph# ceph -scluster:id: 4569c748-fc81-11ee-872a-7f1819cf2453health: HEALTH_OKservices:mon: 3 daemons, quorum storage03,storage01,storage02 (age 54s)mgr: storage01.gitwte(active, since 110s)osd: 3 osds: 3 up (since 5m), 3 in (since 5m)data:pools: 1 pools, 1 pgsobjects: 2 objects, 449 KiBusage: 62 MiB used, 300 GiB / 300 GiB availpgs: 1 active+cleanroot@storage01:/etc/ceph# ceph df
--- RAW STORAGE ---
CLASS SIZE AVAIL USED RAW USED %RAW USED
hdd 300 GiB 300 GiB 62 MiB 62 MiB 0.02
TOTAL 300 GiB 300 GiB 62 MiB 62 MiB 0.02--- POOLS ---
POOL ID PGS STORED OBJECTS USED %USED MAX AVAIL
.mgr 1 1 449 KiB 2 1.3 MiB 0 95 GiB
root@storage01:/etc/ceph#
CephFS 需要兩個(gè) Pools,cephfs-data 和 cephfs-metadata,分別存儲文件數(shù)據(jù)和文件元數(shù)據(jù)
ceph osd pool create cephfs-metadata 16 16ceph osd pool create cephfs-data 32 32ceph fs new cephfs cephfs-metadata cephfs-dataceph orch apply mds cephfs --placement="3 storage01 storage02 storage03"root@storage01:/etc/ceph# ceph -scluster:id: 4569c748-fc81-11ee-872a-7f1819cf2453health: HEALTH_OKservices:mon: 3 daemons, quorum storage03,storage02,storage01 (age 10s)mgr: storage01.gitwte(active, since 10m)mds: 1/1 daemons up, 2 standbyosd: 3 osds: 3 up (since 14m), 3 in (since 14m)rgw: 3 daemons active (3 hosts, 1 zones)data:volumes: 1/1 healthypools: 7 pools, 177 pgsobjects: 216 objects, 457 KiBusage: 104 MiB used, 300 GiB / 300 GiB availpgs: 177 active+cleanroot@storage01:/etc/ceph#
存儲對象存儲
ceph orch apply rgw myorg cn-east-1 --placement="3 storage01 storage02 storage03"
root@storage01:/etc/ceph# ceph orch ls
NAME PORTS RUNNING REFRESHED AGE PLACEMENT
alertmanager ?:9093,9094 1/1 47s ago 36m count:1
crash 3/3 3m ago 36m *
grafana ?:3000 1/1 47s ago 36m count:1
mds.cephfs 3/3 3m ago 5m storage01;storage02;storage03;count:3
mgr 1/1 47s ago 11m storage01
mon 3/1 3m ago 55s storage01
node-exporter ?:9100 3/3 3m ago 36m *
osd.all-available-devices 3 3m ago 15m *
prometheus ?:9095 1/1 47s ago 36m count:1
rgw.myorg ?:80 3/3 3m ago 4m storage01;storage02;storage03;count:3
root@storage01:/etc/ceph#