当前位置: 首页 > news >正文

ceph集群部署

系统版本

[root@test-ceph1 cephadm-ansible]# cat /etc/redhat-release 
Red Hat Enterprise Linux release 9.1 (Plow)

磁盘情况

[root@localhost ~]# lsblk
NAME          MAJ:MIN RM   SIZE RO TYPE MOUNTPOINTS
sr0            11:0    1 101.7M  0 rom  
sr1            11:1    1   8.4G  0 rom  
nvme0n1       259:0    0   200G  0 disk 
├─nvme0n1p1   259:1    0   600M  0 part /boot/efi
├─nvme0n1p2   259:2    0     1G  0 part /boot
└─nvme0n1p3   259:3    0 198.4G  0 part ├─rhel-root 253:0    0    70G  0 lvm  /├─rhel-swap 253:1    0   7.9G  0 lvm  [SWAP]└─rhel-home 253:2    0 120.6G  0 lvm  /home
nvme0n2       259:4    0   100G  0 disk 
nvme0n3       259:5    0   100G  0 disk 
nvme0n4       259:6    0   100G  0 disk 

主机配置

# 设置免密登录
ssh-keygen -t rsa -b 4096
for i in {1..6};do echo 192.168.222.13$i;ssh-copy-id root@192.168.222.13$i ;done# 打开网卡
for i in {1..6};do echo 192.168.222.13$i;ssh root@192.168.222.13$i "nmcli connection up ens192" ;done# 设置主机名称
hostnamectl set-hostname test-ceph1
hostnamectl set-hostname test-ceph2
hostnamectl set-hostname test-ceph3
hostnamectl set-hostname test-ceph4
hostnamectl set-hostname test-ceph5
hostnamectl set-hostname test-ceph6
# 或者
for i in {1..6};do echo 192.168.222.13$i;ssh 192.168.222.13$i "hostnamectl set-hostname test-ceph$i";done# 设置dns解析
vim /etc/hosts
192.168.222.131 test-ceph1
192.168.222.132 test-ceph2
192.168.222.133 test-ceph3
192.168.222.134 test-ceph4
192.168.222.135 test-ceph5
192.168.222.136 test-ceph6for i in {1..6};do echo 192.168.222.13$i;scp /etc/hosts 192.168.222.13$i:/etc/hosts;done

下载项目

git clone https://github.com/ceph/cephadm-ansible.git

yum 源

vim redhat91-huaiwei.repo[BaseOS-redhat9]
baseurl = https://mirrors.huaweicloud.com/centos-stream/9-stream/AppStream/x86_64/os/
enabled = 1
gpgcheck = 0
name = BaseOS-redhat9[AppStream-redhat9]
baseurl = https://mirrors.huaweicloud.com/centos-stream/9-stream/BaseOS/x86_64/os/
enabled = 1
gpgcheck = 0
name = AppStream-redhat9[epel-redhat9]
baseurl = https://mirrors.huaweicloud.com/epel/9/Everything/x86_64
# baseurl = https://mirrors.cloud.tencent.com/epel/9/Everything/x86_64
enabled = 1
gpgcheck = 0
name = epel-redhat9[docker-redhat9]
baseurl = https://mirrors.huaweicloud.com/docker-ce/linux/centos/9/x86_64/stable
enabled = 1
gpgcheck = 0
name = dockerStable[kube-redhat9]
# baseurl = https://mirrors.tuna.tsinghua.edu.cn/kubernetes/core:/stable:/v1.33/rpm/
baseurl = https://mirrors.cloud.tencent.com/kubernetes_new/core:/stable:/v1.33/rpm/
# baseurl = https://mirrors.cloud.tencent.com/kubernetes_new/core:/stable:/v1.30/rpm/
enabled = 1
gpgcheck = 0
name = kube-redhat9for i in {1..6};do echo 192.168.222.13$i;scp redhat91-huaiwei.repo 192.168.222.13$i:/etc/yum.repos.d/;done# 安装ssh
for i in {1..6};do echo 192.168.222.13$i;ssh 192.168.222.13$i "yum -y install openssl openssh";done

安装依赖

yum -y install ansible python3-pip
pip install -r requirements.txt

创建主机清单文件

cd cephadm-ansible
vim hosts# 监控节点
192.168.222.131
192.168.222.132
192.168.222.133# OSD 节点
192.168.222.131
192.168.222.132
192.168.222.133
192.168.222.134
192.168.222.135
192.168.222.136# 客户端节点
192.168.222.133
192.168.222.134[ceph_cluster]
192.168.222.131 labels="['_admin', 'mons', 'mgrs', 'monitoring','osds']"
192.168.222.132 labels="['_admin','mons', 'mgrs','osds']"
192.168.222.133 labels="['_admin','client','mons', 'mgrs','osds']"
192.168.222.134 labels="['clinet','osds']"
192.168.222.135 labels="['osds']"
192.168.222.136 labels="['osds']"# 管理节点组 (必须)
[admin]
192.168.222.131
192.168.222.132
192.168.222.133# 客户端组 (必须)
[clients]
192.168.222.133
192.168.222.134# 监控组
[mons]
192.168.222.131
192.168.222.132
192.168.222.133# 管理节点
[mgrs]
192.168.222.131
192.168.222.132
192.168.222.133# OSD 组
[osds]
192.168.222.131
192.168.222.132
192.168.222.133
192.168.222.134
192.168.222.135
192.168.222.136

创建变量文件

mkdir group_vars
vim group_vars/all.yml# 集群基础配置
ceph_origin: community          # 可选: community, rhcs, custom, shaman
ceph_release: quincy           # Ceph 版本
ceph_container_registry: quay.io/ceph/ceph# 容器引擎选择
docker: false                  # true 使用 Docker, false 使用 Podman# 网络配置
monitor_address: "192.168.222.131"
public_network: "192.168.222.0/24"
cluster_network: "192.168.222.0/24"# 存储配置
osd_devices:- nvme0n2- nvme0n3- nvme0n4# 监控配置
dashboard: true
monitoring: true

运行完整预检检查

for i in {1..6};do echo 192.168.222.13$i;ssh 192.168.222.13$i "rm -rf /etc/yum.repos.d/redhat91-huaiwei.repo";doneansible-playbook -i hosts cephadm-preflight.yml

集群部署

ansible-playbook -i hosts tests/functional/deploy-cluster.yml \--extra-vars "monitor_address=192.168.222.131"

集群情况

[root@test-ceph1 cephadm-ansible]# ceph -scluster:id:     672db5d2-8543-11f0-9378-000c29adccddhealth: HEALTH_OKservices:mon: 3 daemons, quorum test-ceph1,test-ceph3,test-ceph2 (age 20m)mgr: test-ceph1.tdhqqf(active, since 23m), standbys: test-ceph3.xjbstp, test-ceph2.jwprvuosd: 15 osds: 15 up (since 18m), 15 in (since 18m)data:pools:   1 pools, 1 pgsobjects: 2 objects, 449 KiBusage:   4.3 GiB used, 1.5 TiB / 1.5 TiB availpgs:     1 active+clean

查看主机节点标签

[root@test-ceph1 cephadm-ansible]# ceph orch host ls
HOST        ADDR             LABELS                            STATUS  
test-ceph1  192.168.222.131  _admin,mgrs,osds,monitoring,mons          
test-ceph2  192.168.222.132  _admin,mons,mgrs,osds                     
test-ceph3  192.168.222.133  _admin,client,mons,mgrs,osds              
test-ceph4  192.168.222.134  clinet,osds                               
test-ceph6  192.168.222.136  osds  

查看磁盘设备

[root@test-ceph1 cephadm-ansible]# ceph orch device ls
HOST        PATH          TYPE  DEVICE ID                                             SIZE  AVAILABLE  REFRESHED  REJECT REASONS                                                           
test-ceph1  /dev/nvme0n2  ssd   VMware_Virtual_NVMe_Disk_VMware_NVME_0000             100G  No         22m ago    Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected  
test-ceph1  /dev/nvme0n3  ssd   VMware_Virtual_NVMe_Disk_VMware_NVME_0000             100G  No         22m ago    Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected  
test-ceph1  /dev/nvme0n4  ssd   VMware_Virtual_NVMe_Disk_VMware_NVME_0000             100G  No         22m ago    Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected  
test-ceph1  /dev/sr0      hdd   VMware_Virtual_SATA_CDRW_Drive_00000000000000000001   101M  No         22m ago    Has a FileSystem, Insufficient space (<5GB)                              
test-ceph1  /dev/sr1      hdd   VMware_Virtual_SATA_CDRW_Drive_01000000000000000001  8641M  No         22m ago    Has a FileSystem                                                         
test-ceph2  /dev/nvme0n2  ssd   VMware_Virtual_NVMe_Disk_VMware_NVME_0000             100G  No         22m ago    Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected  
test-ceph2  /dev/nvme0n3  ssd   VMware_Virtual_NVMe_Disk_VMware_NVME_0000             100G  No         22m ago    Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected  
test-ceph2  /dev/nvme0n4  ssd   VMware_Virtual_NVMe_Disk_VMware_NVME_0000             100G  No         22m ago    Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected  
test-ceph2  /dev/sr0      hdd   VMware_Virtual_SATA_CDRW_Drive_00000000000000000001   101M  No         22m ago    Has a FileSystem, Insufficient space (<5GB)                              
test-ceph2  /dev/sr1      hdd   VMware_Virtual_SATA_CDRW_Drive_01000000000000000001  8641M  No         22m ago    Has a FileSystem                                                         
test-ceph3  /dev/nvme0n2  ssd   VMware_Virtual_NVMe_Disk_VMware_NVME_0000             100G  No         22m ago    Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected  
test-ceph3  /dev/nvme0n3  ssd   VMware_Virtual_NVMe_Disk_VMware_NVME_0000             100G  No         22m ago    Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected  
test-ceph3  /dev/nvme0n4  ssd   VMware_Virtual_NVMe_Disk_VMware_NVME_0000             100G  No         22m ago    Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected  
test-ceph3  /dev/sr0      hdd   VMware_Virtual_SATA_CDRW_Drive_00000000000000000001   101M  No         22m ago    Has a FileSystem, Insufficient space (<5GB)                              
test-ceph3  /dev/sr1      hdd   VMware_Virtual_SATA_CDRW_Drive_01000000000000000001  8641M  No         22m ago    Has a FileSystem                                                         
test-ceph4  /dev/nvme0n2  ssd   VMware_Virtual_NVMe_Disk_VMware_NVME_0000             100G  No         22m ago    Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected  
test-ceph4  /dev/nvme0n3  ssd   VMware_Virtual_NVMe_Disk_VMware_NVME_0000             100G  No         22m ago    Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected  
test-ceph4  /dev/nvme0n4  ssd   VMware_Virtual_NVMe_Disk_VMware_NVME_0000             100G  No         22m ago    Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected  
test-ceph4  /dev/sr0      hdd   VMware_Virtual_SATA_CDRW_Drive_00000000000000000001   101M  No         22m ago    Has a FileSystem, Insufficient space (<5GB)                              
test-ceph4  /dev/sr1      hdd   VMware_Virtual_SATA_CDRW_Drive_01000000000000000001  8641M  No         22m ago    Has a FileSystem                                                         
test-ceph6  /dev/nvme0n2  ssd   VMware_Virtual_NVMe_Disk_VMware_NVME_0000             100G  No         22m ago    Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected  
test-ceph6  /dev/nvme0n3  ssd   VMware_Virtual_NVMe_Disk_VMware_NVME_0000             100G  No         22m ago    Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected  
test-ceph6  /dev/nvme0n4  ssd   VMware_Virtual_NVMe_Disk_VMware_NVME_0000             100G  No         22m ago    Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected  
test-ceph6  /dev/sr0      hdd   VMware_Virtual_SATA_CDRW_Drive_00000000000000000001   101M  No         22m ago    Has a FileSystem, Insufficient space (<5GB)                              
test-ceph6  /dev/sr1      hdd   VMware_Virtual_SATA_CDRW_Drive_01000000000000000001  8641M  No         22m ago    Has a FileSystem      

查看集群所有的服务

[root@test-ceph1 cephadm-ansible]# ceph orch ls
NAME           PORTS        RUNNING  REFRESHED  AGE  PLACEMENT         
alertmanager   ?:9093,9094      1/1  113s ago   28m  label:monitoring  
crash                           5/5  3m ago     28m  *                 
grafana        ?:3000           1/1  113s ago   28m  label:monitoring  
mgr                             3/3  115s ago   28m  label:mgrs        
mon                             3/3  115s ago   33m  label:mons        
node-exporter  ?:9100           5/5  3m ago     28m  *                 
osd.osd                          15  3m ago     28m  label:osds        
prometheus     ?:9095           1/1  113s ago   28m  label:monitoring  

ceph版本

[root@test-ceph1 cephadm-ansible]# ceph -v
ceph version 17.2.9 (69bf48f20731a4b0d742613f6c6335ccb54dd217) quincy (stable)

镜像版本

[root@test-ceph1 cephadm-ansible]# podman images
REPOSITORY                        TAG         IMAGE ID      CREATED        SIZE
quay.io/ceph/ceph                 v17         259b35566514  9 months ago   1.25 GB
quay.io/ceph/ceph-grafana         9.4.7       954c08fa6188  21 months ago  647 MB
quay.io/prometheus/prometheus     v2.43.0     a07b618ecd1d  2 years ago    235 MB
quay.io/prometheus/alertmanager   v0.25.0     c8568f914cd2  2 years ago    66.5 MB
quay.io/prometheus/node-exporter  v1.5.0      0da6a335fe13  2 years ago    23.9 MB

重置ceph web管理页密码

# web 地址
# https://192.168.222.131:8443/#/dashboard[root@test-ceph1 ~]# vim pass
[root@test-ceph1 ~]# cat pass 
redhat123456*[root@test-ceph1 ~]# ceph dashboard ac-user-set-password admin -i pass
{"username": "admin", "password": "$2b$12$vL9oIc6otLvhKDPJYsCdLOerlSDSasc6zeVxHIj3iLP.x3JRSpvuO", "roles": ["administrator"], "name": null, "email": null, "lastUpdate": 1756527044, "enabled": true, "pwdExpirationDate": null, "pwdUpdateRequired": false}
http://www.xdnf.cn/news/1400473.html

相关文章:

  • 接雨水,leetCode热题100,C++实现
  • 嵌入式linux相机(2)
  • PostgreSQL数据类型一览(数值类型)
  • opencv实现轮廓绘制和选择
  • 生成式 AI 重构内容生产:效率提升背后的创作版权边界争议
  • day43-Ansible-PlayBook
  • 如何使用快照将 AWS OpenSearch 服务中的数据从开发环境复制到生产环境
  • 知料觅得-新一代AI搜索引擎
  • Linux网络服务发现在VPS云服务器自动化配置的关键技术与实践
  • 给某个conda环境安装CUDA 12.4版本 全局CUDA不变
  • C++的迭代器和指针的区别
  • 【小白笔记】基本的Linux命令来查看服务器的CPU、内存、磁盘和系统信息
  • Java SpringAI应用开发面试全流程解析:RAG、流式推理与企业落地
  • 物联网(IoT)中常用的通信协议
  • GD32VW553-IOT 基于 vscode 的 bootloader 移植(基于Cmake)
  • 微论-突触的作用赋能思考(可能是下一代人工智能架构的启发式理论)
  • 响应式编程框架Reactor【5】
  • Spring代理的特点
  • AI-调查研究-65-机器人 机械臂控制技术的前世今生:从PLC到MPC
  • 【MCP系列教程】 Python 实现 FastMCP StreamableHTTP MCP:在通义灵码 IDE 开发并部署至阿里云百炼
  • JsMind 常用配置项
  • 【计算机网络】HTTP是什么?
  • 基于Docker部署的Teable应用
  • Linux驱动开发重要操作汇总
  • “人工智能+”政策驱动下的技术重构、商业变革与实践路径研究 ——基于国务院《关于深入实施“人工智能+”行动的意见》的深度解读
  • wpf之依赖属性
  • 桌面GIS软件FlatGeobuf转Shapefile代码分享
  • 学习游戏制作记录(视觉上的优化)
  • 第三弹、AI、LLM大模型是什么?
  • Visual Studio(vs)免费版下载安装C/C++运行环境配置