ceph配置集群
ceph 配置文件模版的位置
[root@test-ceph1 ~]# cat /usr/share/doc/ceph/sample.ceph.conf
ceph 配置优先级别
优先级从低到高
程序编译时默认值
中心数据库 # 推荐使用方式
本地配置文件
环境变量
命令行参数
运行时覆盖参数
列出所有的配置选项
[root@test-ceph1 ~]# ceph config ls
查看某个配置选项的帮助信息
[root@test-ceph1 ~]# ceph config help osd_pool_default_size
osd_pool_default_size - the number of copies of an object for new replicated pools(uint, advanced)Default: 3Minimum: 0Maximum: 10Can update at runtime: trueServices: [mon]
显示集群数据库的配置
[root@test-ceph1 ~]# ceph config dump
WHO MASK LEVEL OPTION VALUE RO
global basic container_image quay.io/ceph/ceph@sha256:a0f373aaaf5a5ca5c4379c09da24c771b8266a09dc9e2181f90eacf423d7326f *
mon advanced auth_allow_insecure_global_id_reclaim false
mon advanced public_network 192.168.222.0/24 *
mgr advanced mgr/cephadm/container_init True *
mgr advanced mgr/cephadm/migration_current 6 *
mgr advanced mgr/dashboard/ALERTMANAGER_API_HOST http://test-ceph1:9093 *
mgr advanced mgr/dashboard/GRAFANA_API_SSL_VERIFY false *
mgr advanced mgr/dashboard/GRAFANA_API_URL https://test-ceph1:3000 *
mgr advanced mgr/dashboard/PROMETHEUS_API_HOST http://test-ceph1:9095 *
mgr advanced mgr/dashboard/ssl_server_port 8443 *
mgr advanced mgr/orchestrator/orchestrator cephadm
osd host:test-ceph4 basic osd_memory_target 1532527138
osd host:test-ceph6 basic osd_memory_target 1532527138
osd advanced osd_memory_target_autotune true
osd.0 basic osd_mclock_max_capacity_iops_ssd 5392.182966
osd.1 basic osd_mclock_max_capacity_iops_ssd 1492.521681
osd.10 basic osd_mclock_max_capacity_iops_ssd 1324.061469
osd.11 basic osd_mclock_max_capacity_iops_ssd 3227.997224
osd.12 basic osd_mclock_max_capacity_iops_ssd 2034.782394
osd.13 basic osd_mclock_max_capacity_iops_ssd 3684.510859
osd.14 basic osd_mclock_max_capacity_iops_ssd 9492.307118
osd.2 basic osd_mclock_max_capacity_iops_ssd 9926.606247
osd.3 basic osd_mclock_max_capacity_iops_ssd 12794.165806
osd.4 basic osd_mclock_max_capacity_iops_ssd 1969.199850
osd.5 basic osd_mclock_max_capacity_iops_ssd 4431.331962
osd.6 basic osd_mclock_max_capacity_iops_ssd 4615.562726
osd.7 basic osd_mclock_max_capacity_iops_ssd 3730.672106
osd.8 basic osd_mclock_max_capacity_iops_ssd 10064.632014
osd.9 basic osd_mclock_max_capacity_iops_ssd 3597.473051
查看某个服务的进程信息
[root@test-ceph1 ~]# ceph config show osd.0
NAME VALUE SOURCE OVERRIDES IGNORES
container_image quay.io/ceph/ceph@sha256:a0f373aaaf5a5ca5c4379c09da24c771b8266a09dc9e2181f90eacf423d7326f mon
daemonize false override
keyring $osd_data/keyring default
leveldb_log default
log_to_file false default
log_to_journald true default
log_to_stderr false default
mon_host [v2:192.168.222.131:3300/0,v1:192.168.222.131:6789/0] [v2:192.168.222.132:3300/0,v1:192.168.222.132:6789/0] [v2:192.168.222.133:3300/0,v1:192.168.222.133:6789/0] file
no_config_file false override
osd_delete_sleep 0.000000 override
osd_delete_sleep_hdd 0.000000 override
osd_delete_sleep_hybrid 0.000000 override
osd_delete_sleep_ssd 0.000000 override
osd_max_backfills 1 default
osd_mclock_max_capacity_iops_ssd 5392.182966 mon
osd_mclock_scheduler_background_best_effort_lim 0.900000 default
osd_mclock_scheduler_background_best_effort_res 0.000000 default
osd_mclock_scheduler_background_best_effort_wgt 1 default
osd_mclock_scheduler_background_recovery_lim 0.000000 default
osd_mclock_scheduler_background_recovery_res 0.500000 default
osd_mclock_scheduler_background_recovery_wgt 1 default
osd_mclock_scheduler_client_lim 0.000000 default
osd_mclock_scheduler_client_res 0.500000 default
osd_mclock_scheduler_client_wgt 1 default
osd_memory_target 1532527138 mon
osd_memory_target_autotune true mon
osd_recovery_max_active 0 default
osd_recovery_max_active_hdd 3 default
osd_recovery_max_active_ssd 10 default
osd_recovery_sleep 0.000000 override
osd_recovery_sleep_hdd 0.000000 override
osd_recovery_sleep_hybrid 0.000000 override
osd_recovery_sleep_ssd 0.000000 override
osd_scrub_sleep 0.000000 override
osd_snap_trim_sleep 0.000000 override
osd_snap_trim_sleep_hdd 0.000000 override
osd_snap_trim_sleep_hybrid 0.000000 override
osd_snap_trim_sleep_ssd 0.000000 override
rbd_default_features 61 default
rbd_qos_exclude_ops 0 default
setgroup ceph cmdline
setuser ceph cmdline
显示特定ceph进程的所有参数
# 相对应的容器中执行
ceph daemon osd.1 config show # 查看进程容器
[root@test-ceph1 ~]# podman ps -a|grep 'osd.1'
045b80e7f869 quay.io/ceph/ceph@sha256:a0f373aaaf5a5ca5c4379c09da24c771b8266a09dc9e2181f90eacf423d7326f -n osd.1 -f --set... 2 hours ago Up 2 hours ago ceph-672db5d2-8543-11f0-9378-000c29adccdd-osd-1
84bf27c3385d quay.io/ceph/ceph@sha256:a0f373aaaf5a5ca5c4379c09da24c771b8266a09dc9e2181f90eacf423d7326f -n osd.12 -f --se... 2 hours ago Up 2 hours ago ceph-672db5d2-8543-11f0-9378-000c29adccdd-osd-12# 进入容器
[root@test-ceph1 ~]# podman exec -it 045b80e7f869 /bin/bash
[root@test-ceph1 /]#
[root@test-ceph1 /]# # 执行命令
[root@test-ceph1 /]# ceph daemon osd.1 config show
读取并修改配置文件
# 读取并修改配置文件并应用到中心配置
[root@test-ceph1 ~]# ceph config assimilate-conf -i /etc/ceph/ceph.conf# 获取特定进程的当前配置
# ceph tell $type.$id config get# 设置某个进程的运行配置,重启后无效
# ceph tell $type.$id config set# 设置某个进程的配置,永久有效
# ceph config set $type.$id# 查看某个进程的配置
# ceph config get $type.$id # 显示集群数据库的配置
ceph config dump
# 查看 osd.1 的配置
ceph config show osd.1
[root@test-ceph1 ~]# ceph config show osd.1 debug_ms
0/0
[root@test-ceph1 ~]# ceph config get osd.1 debug_ms
0/0# 设置 osd.1 的配置,set 为永久设置
[root@test-ceph1 ~]# ceph config set osd.1 debug_ms 10
[root@test-ceph1 ~]# ceph config show osd.1 debug_ms
10/10
[root@test-ceph1 ~]# ceph config get osd.1 debug_ms
10/10# 重启 osd.1 并再次查看配置
[root@test-ceph1 ~]# ceph orch daemon restart osd.1
Scheduled to restart osd.1 on host 'test-ceph1'
[root@test-ceph1 ~]# ceph config show osd.1 debug_ms
10/10
[root@test-ceph1 ~]# ceph config get osd.1 debug_ms
10/10# 临时设置参数
[root@test-ceph1 ~]# ceph tell osd.1 config get debug_ms
{"debug_ms": "10/10"
}
[root@test-ceph1 ~]# ceph tell osd.1 config set debug_ms 5
{"success": "osd_delete_sleep = '' osd_delete_sleep_hdd = '' osd_delete_sleep_hybrid = '' osd_delete_sleep_ssd = '' osd_max_backfills = '' osd_recovery_max_active = '' osd_recovery_max_active_hdd = '' osd_recovery_max_active_ssd = '' osd_recovery_sleep = '' osd_recovery_sleep_hdd = '' osd_recovery_sleep_hybrid = '' osd_recovery_sleep_ssd = '' osd_scrub_sleep = '' osd_snap_trim_sleep = '' osd_snap_trim_sleep_hdd = '' osd_snap_trim_sleep_hybrid = '' osd_snap_trim_sleep_ssd = '' "
}
[root@test-ceph1 ~]# ceph tell osd.1 config get debug_ms
{"debug_ms": "5/5"
}
[root@test-ceph1 ~]# ceph orch daemon restart osd.1
Scheduled to restart osd.1 on host 'test-ceph1'
[root@test-ceph1 ~]# ceph tell osd.1 config get debug_ms
{"debug_ms": "10/10"
}# ceph 管理页面也可以进行参数的设置
ceph 地图
# ceph mon 地图
[root@test-ceph1 ~]# ceph mon stat
e3: 3 mons at {test-ceph1=[v2:192.168.222.131:3300/0,v1:192.168.222.131:6789/0],test-ceph2=[v2:192.168.222.132:3300/0,v1:192.168.222.132:6789/0],test-ceph3=[v2:192.168.222.133:3300/0,v1:192.168.222.133:6789/0]} removed_ranks: {} disallowed_leaders: {}, election epoch 14, leader 0 test-ceph1, quorum 0,1,2 test-ceph1,test-ceph3,test-ceph2
[root@test-ceph1 ~]#[root@test-ceph1 ~]# ceph -s -f json-pretty{"fsid": "672db5d2-8543-11f0-9378-000c29adccdd","health": {"status": "HEALTH_OK","checks": {},"mutes": []},"election_epoch": 14,"quorum": [0,1,2],"quorum_names": ["test-ceph1","test-ceph3","test-ceph2"],"quorum_age": 21005,"monmap": {"epoch": 3,"min_mon_release_name": "quincy","num_mons": 3},"osdmap": {"epoch": 40,"num_osds": 15,"num_up_osds": 15,"osd_up_since": 1756526274,"num_in_osds": 15,"osd_in_since": 1756518979,"num_remapped_pgs": 0},"pgmap": {"pgs_by_state": [{"state_name": "active+clean","count": 1}],"num_pgs": 1,"num_pools": 1,"num_objects": 2,"data_bytes": 459280,"bytes_used": 4584620032,"bytes_avail": 1605965201408,"bytes_total": 1610549821440},"fsmap": {"epoch": 1,"by_rank": [],"up:standby": 0},"mgrmap": {"available": true,"num_standbys": 2,"modules": ["cephadm","dashboard","iostat","nfs","prometheus","restful"],"services": {"dashboard": "https://192.168.222.131:8443/","prometheus": "http://192.168.222.131:9283/"}},"servicemap": {"epoch": 202,"modified": "2025-08-30T07:43:01.733669+0000","services": {}},"progress_events": {}
}[root@test-ceph1 ~]# ceph mon dump
epoch 3
fsid 672db5d2-8543-11f0-9378-000c29adccdd
last_changed 2025-08-30T01:54:54.827495+0000
created 2025-08-30T01:48:21.223003+0000
min_mon_release 17 (quincy)
election_strategy: 1
0: [v2:192.168.222.131:3300/0,v1:192.168.222.131:6789/0] mon.test-ceph1
1: [v2:192.168.222.133:3300/0,v1:192.168.222.133:6789/0] mon.test-ceph3
2: [v2:192.168.222.132:3300/0,v1:192.168.222.132:6789/0] mon.test-ceph2
dumped monmap epoch 3
mon 主机维护的配置数据库
# 数据库位置
# /var/lib/ceph/$fsid/mon.test-ceph1/store.db
[root@test-ceph1 ~]# ls /var/lib/ceph/672db5d2-8543-11f0-9378-000c29adccdd/mon.test-ceph1/store.db/# 当数据库比较大时候,可以进行数据压缩
[root@test-ceph1 ~]# ceph tell mon.test-ceph1 compact# 修改配置,每次进程启动时压缩数据库
[root@test-ceph1 ~]# ceph tell mon.test-ceph1 compact
compacted rocksdb in 0 seconds
[root@test-ceph1 ~]# ceph config get mon mon_compact_on_start
false
[root@test-ceph1 ~]# ceph config set mon mon_compact_on_start true
[root@test-ceph1 ~]# ceph config get mon mon_compact_on_start
true
其他几个数据库相关的配置参数
设置 | 默认 | 描述 |
---|---|---|
mon_data_size_warn | 15GB | 数据库文件多大时候,集群报警 |
mon_data_avail_warn | 30% | 存储数据库的文件系统剩余30%,集群报警 |
mon_data_avail_crit | 5% | 存储数据库的文件系统剩余5%,集群Error |
ceph 集群间的认证协议 cephx
# 服务认证协议
[root@test-ceph1 ~]# ceph config get mon auth_service_required
cephx
# 集群认证协议
[root@test-ceph1 ~]# ceph config get mon auth_cluster_required
cephx
# 客户端认证协议
[root@test-ceph1 ~]# ceph config get mon auth_client_required
cephx, none# 认证密钥文件存放的位置
# ls /etc/ceph/ceph.client.admin.keyring
# ls /var/lib/ceph/$fsid/mon.$host/keyring
# ls /var/lib/ceph/$fsid/osd.$id/keyring[root@test-ceph1 ~]# ls /etc/ceph/ceph.client.admin.keyring
/etc/ceph/ceph.client.admin.keyring
[root@test-ceph1 ~]# ls /var/lib/ceph/672db5d2-8543-11f0-9378-000c29adccdd/mon.test-ceph1/keyring
/var/lib/ceph/672db5d2-8543-11f0-9378-000c29adccdd/mon.test-ceph1/keyring
[root@test-ceph1 ~]# ls /var/lib/ceph/672db5d2-8543-11f0-9378-000c29adccdd/osd.1/keyring
/var/lib/ceph/672db5d2-8543-11f0-9378-000c29adccdd/osd.1/keyring
ceph 用户创建并授权
# 创建一个对集群拥有所有权限的密钥
[root@test-ceph1 ~]# ceph-authtool --create-keyring /tmp/ceph.mon.keyring \
--gen-key -n mon.testuser1 --cap mon 'allow *'
creating /tmp/ceph.mon.keyring
[root@test-ceph1 ~]# ls /tmp/ceph.mon.keyring
/tmp/ceph.mon.keyring# 创建client.testuser2 用户,并对任何存储池都有读取的权限
[root@test-ceph1 ~]# ceph auth get-or-create client.testuser2 mon 'allow r' osd 'allow rw'
[client.testuser2]key = AQDosrJoiGdsHRAA1FnBSpH707DQtqT/W+y1QA==# 查看用户信息
[root@test-ceph1 ~]# ceph auth list
osd.0key: AQA7WrJoTepeDhAA/fyXMHM2zO1/IuD1IBk+3A==caps: [mgr] allow profile osdcaps: [mon] allow profile osdcaps: [osd] allow *.......# 查看ceph集群状态
[root@test-ceph1 ~]# ceph status
# 查看 mon 地图
[root@test-ceph1 ~]# ceph mon dump
# 查看 mon 的 mon_host 值
[root@test-ceph1 ~]# ceph config show mon.test-ceph1 mon_host
[v2:192.168.222.131:3300/0,v1:192.168.222.131:6789/0]
# 查看 mon 值
[root@test-ceph1 ~]# ceph mon stat # 查看集群认证设置
[root@test-ceph1 ~]# ceph auth ls# 导出 client.admin 认证值
[root@test-ceph1 ~]# ceph auth get client.admin -o /tmp/adminkey
[root@test-ceph1 ~]# cat /tmp/adminkey
[client.admin]key = AQBkWLJo9xK1ExAAny0++X3nanZ/iQpshqjryA==caps mds = "allow *"caps mgr = "allow *"caps mon = "allow *"caps osd = "allow *"# 创建用户 client.app1,对 mon r权限,对osd rw权限,密钥在屏幕上输出
[root@test-ceph1 ~]# ceph auth get-or-create client.app1 \
mon 'allow r' osd 'allow rw'
[client.app1]key = AQArtrJoDLcGMxAAGJeAJuOsKBqBdxNgVF1C9w==# 创建用户 client.app2,对 mon r权限,对osd rw权限,密钥保存在文件中
[root@test-ceph1 ~]# ceph auth get-or-create client.app1 \
mon 'allow r' osd 'allow rw' -o /etc/ceph/ceph.client.app2.keyring
[root@test-ceph1 ~]# cat /etc/ceph/ceph.client.app2.keyring
[client.app1]key = AQArtrJoDLcGMxAAGJeAJuOsKBqBdxNgVF1C9w==# 查看创建的用户权限
[root@test-ceph1 ~]# ceph auth ls
ceph 网络
# 主机需要配置多张网卡,设置好合适的ip
# 获取插件的网络
ceph config get osd public_network
ceph config get osd cluster_network
ceph config get mon public_network# 使用配置好的文件进行设置 集群网络
ceph config assimilate-conf -i /mnt/osd-cluster-network.conf
ceph cofnig get osd cluster_network# 将mon的 public_network 设置为 172.25.250.0/24
ceph config get mon public_network
ceph config set mon public_network 172.25.250.0/24
ceph config get mon public_network
ceph 配置模版
[root@test-ceph1 ~]# cat /usr/share/doc/ceph/sample.ceph.conf
##
# Sample ceph ceph.conf file.
##
# This file defines cluster membership, the various locations
# that Ceph stores data, and any other runtime options.# If a 'host' is defined for a daemon, the init.d start/stop script will
# verify that it matches the hostname (or else ignore it). If it is
# not defined, it is assumed that the daemon is intended to start on
# the current host (e.g., in a setup with a startup.conf on each
# node).## Metavariables
# $cluster ; Expands to the Ceph Storage Cluster name. Useful
# ; when running multiple Ceph Storage Clusters
# ; on the same hardware.
# ; Example: /etc/ceph/$cluster.keyring
# ; (Default: ceph)
#
# $type ; Expands to one of mds, osd, or mon, depending on
# ; the type of the instant daemon.
# ; Example: /var/lib/ceph/$type
#
# $id ; Expands to the daemon identifier. For osd.0, this
# ; would be 0; for mds.a, it would be a.
# ; Example: /var/lib/ceph/$type/$cluster-$id
#
# $host ; Expands to the host name of the instant daemon.
#
# $name ; Expands to $type.$id.
# ; Example: /var/run/ceph/$cluster-$name.asok[global]
### http://docs.ceph.com/en/latest/rados/configuration/general-config-ref/;fsid = {UUID} # use `uuidgen` to generate your own UUID;public network = 192.168.0.0/24;cluster network = 192.168.0.0/24# Each running Ceph daemon has a running process identifier (PID) file.# The PID file is generated upon start-up.# Type: String (optional)# (Default: N/A). The default path is /var/run/$cluster/$name.pid.pid file = /var/run/ceph/$name.pid# If set, when the Ceph Storage Cluster starts, Ceph sets the max open fds# at the OS level (i.e., the max # of file descriptors).# It helps prevents Ceph OSD Daemons from running out of file descriptors.# Type: 64-bit Integer (optional)# (Default: 0);max open files = 131072### http://docs.ceph.com/en/latest/rados/operations/
### http://docs.ceph.com/en/latest/rados/configuration/auth-config-ref/# If enabled, the Ceph Storage Cluster daemons (i.e., ceph-mon, ceph-osd,# and ceph-mds) must authenticate with each other.# Type: String (optional); Valid settings are "cephx" or "none".# (Default: cephx)auth cluster required = cephx# If enabled, the Ceph Storage Cluster daemons require Ceph Clients to# authenticate with the Ceph Storage Cluster in order to access Ceph# services.# Type: String (optional); Valid settings are "cephx" or "none".# (Default: cephx)auth service required = cephx# If enabled, the Ceph Client requires the Ceph Storage Cluster to# authenticate with the Ceph Client.# Type: String (optional); Valid settings are "cephx" or "none".# (Default: cephx)auth client required = cephx# If set to true, Ceph requires signatures on all message traffic between# the Ceph Client and the Ceph Storage Cluster, and between daemons# comprising the Ceph Storage Cluster.# Type: Boolean (optional)# (Default: false);cephx require signatures = true# kernel RBD client do not support authentication yet:cephx cluster require signatures = truecephx service require signatures = false# The path to the keyring file.# Type: String (optional)# Default: /etc/ceph/$cluster.$name.keyring,/etc/ceph/$cluster.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin;keyring = /etc/ceph/$cluster.$name.keyring### http://docs.ceph.com/en/latest/rados/configuration/pool-pg-config-ref/## Replication level, number of data copies.# Type: 32-bit Integer# (Default: 3);osd pool default size = 3## Replication level in degraded state, less than 'osd pool default size' value.# Sets the minimum number of written replicas for objects in the# pool in order to acknowledge a write operation to the client. If# minimum is not met, Ceph will not acknowledge the write to the# client. This setting ensures a minimum number of replicas when# operating in degraded mode.# Type: 32-bit Integer# (Default: 0), which means no particular minimum. If 0, minimum is size - (size / 2).;osd pool default min size = 2## Ensure you have a realistic number of placement groups. We recommend## approximately 100 per OSD. E.g., total number of OSDs multiplied by 100## divided by the number of replicas (i.e., osd pool default size). So for## 10 OSDs and osd pool default size = 3, we'd recommend approximately## (100 * 10) / 3 = 333## always round to the nearest power of 2# Description: The default number of placement groups for a pool. The# default value is the same as pg_num with mkpool.# Type: 32-bit Integer# (Default: 8);osd pool default pg num = 128# Description: The default number of placement groups for placement for a# pool. The default value is the same as pgp_num with mkpool.# PG and PGP should be equal (for now).# Type: 32-bit Integer# (Default: 8);osd pool default pgp num = 128# The default CRUSH rule to use when creating a pool# Type: 32-bit Integer# (Default: 0);osd pool default crush rule = 0# The bucket type to use for chooseleaf in a CRUSH rule.# Uses ordinal rank rather than name.# Type: 32-bit Integer# (Default: 1) Typically a host containing one or more Ceph OSD Daemons.;osd crush chooseleaf type = 1### http://docs.ceph.com/en/latest/rados/troubleshooting/log-and-debug/# The location of the logging file for your cluster.# Type: String# Required: No# Default: /var/log/ceph/$cluster-$name.log;log file = /var/log/ceph/$cluster-$name.log# Determines if logging messages should appear in syslog.# Type: Boolean# Required: No# (Default: false);log to syslog = true### http://docs.ceph.com/en/latest/rados/configuration/ms-ref/# Enable if you want your daemons to bind to IPv6 address instead of# IPv4 ones. (Not required if you specify a daemon or cluster IP.)# Type: Boolean# (Default: false);ms bind ipv6 = true##################
## Monitors
## You need at least one. You need at least three if you want to
## tolerate any node failures. Always create an odd number.
[mon]
### http://docs.ceph.com/en/latest/rados/configuration/mon-config-ref/
### http://docs.ceph.com/en/latest/rados/configuration/mon-osd-interaction/# The IDs of initial monitors in a cluster during startup.# If specified, Ceph requires an odd number of monitors to form an# initial quorum (e.g., 3).# Type: String# (Default: None);mon initial members = mycephhost;mon host = cephhost01,cephhost02;mon addr = 192.168.0.101,192.168.0.102# The monitor's data location# Default: /var/lib/ceph/mon/$cluster-$id;mon data = /var/lib/ceph/mon/$name# The clock drift in seconds allowed between monitors.# Type: Float# (Default: .050);mon clock drift allowed = .15# Exponential backoff for clock drift warnings# Type: Float# (Default: 5);mon clock drift warn backoff = 30 # Tell the monitor to backoff from this warning for 30 seconds# The percentage of disk space used before an OSD is considered full.# Type: Float# (Default: .95);mon osd full ratio = .95# The percentage of disk space used before an OSD is considered nearfull.# Type: Float# (Default: .85);mon osd nearfull ratio = .85# The number of seconds Ceph waits before marking a Ceph OSD# Daemon "down" and "out" if it doesn't respond.# Type: 32-bit Integer# (Default: 600);mon osd down out interval = 600# The grace period in seconds before declaring unresponsive Ceph OSD# Daemons "down".# Type: 32-bit Integer# (Default: 900);mon osd report timeout = 300### http://docs.ceph.com/en/latest/rados/troubleshooting/log-and-debug/# logging, for debugging monitor crashes, in order of# their likelihood of being helpful :);debug ms = 1;debug mon = 20;debug paxos = 20;debug auth = 20;[mon.alpha]
; host = alpha
; mon addr = 192.168.0.10:6789;[mon.beta]
; host = beta
; mon addr = 192.168.0.11:6789;[mon.gamma]
; host = gamma
; mon addr = 192.168.0.12:6789##################
## Metadata servers
# You must deploy at least one metadata server to use CephFS. There is
# experimental support for running multiple metadata servers. Do not run
# multiple metadata servers in production.
[mds]
### http://docs.ceph.com/en/latest/cephfs/mds-config-ref/# where the mds keeps it's secret encryption keys;keyring = /var/lib/ceph/mds/$name/keyring# Determines whether a 'ceph-mds' daemon should poll and# replay the log of an active MDS (hot standby).# Type: Boolean# (Default: false);mds standby replay = true; mds logging to debug issues.;debug ms = 1;debug mds = 20;debug journaler = 20# The memory limit the MDS should enforce for its cache.# (Default: 1G);mds cache memory limit = 2G;[mds.alpha]
; host = alpha;[mds.beta]
; host = beta##################
## osd
# You need at least one. Two or more if you want data to be replicated.
# Define as many as you like.
[osd]
### http://docs.ceph.com/en/latest/rados/configuration/osd-config-ref/# The path to the OSDs data.# You must create the directory when deploying Ceph.# You should mount a drive for OSD data at this mount point.# We do not recommend changing the default.# Type: String# Default: /var/lib/ceph/osd/$cluster-$id;osd data = /var/lib/ceph/osd/$name## You can change the number of recovery operations to speed up recovery## or slow it down if your machines can't handle it# The number of active recovery requests per OSD at one time.# More requests will accelerate recovery, but the requests# places an increased load on the cluster.# Type: 32-bit Integer# (Default: 5);osd recovery max active = 3# The maximum number of backfills allowed to or from a single OSD.# Type: 64-bit Integer# (Default: 10);osd max backfills = 5# The maximum number of simultaneous scrub operations for a Ceph OSD Daemon.# Type: 32-bit Int# (Default: 1);osd max scrubs = 2# You may add settings for ceph-deploy so that it will create and mount# the correct type of file system. Remove the comment `#` character for# the following settings and replace the values in parenthesis# with appropriate values, or leave the following settings commented# out to accept the default values.#osd mkfs type = {fs-type}#osd mkfs options {fs-type} = {mkfs options} # default for xfs is "-f"#osd mount options {fs-type} = {mount options} # default mount option is "rw, noatime";osd mkfs type = btrfs;osd mount options btrfs = noatime,nodiratime## Ideally, make this a separate disk or partition. A few## hundred MB should be enough; more if you have fast or many## disks. You can use a file under the osd data dir if need be## (e.g. /data/$name/journal), but it will be slower than a## separate disk or partition.# The path to the OSD's journal. This may be a path to a file or a block# device (such as a partition of an SSD). If it is a file, you must# create the directory to contain it.# We recommend using a drive separate from the osd data drive.# Type: String# Default: /var/lib/ceph/osd/$cluster-$id/journal;osd journal = /var/lib/ceph/osd/$name/journal# Check log files for corruption. Can be computationally expensive.# Type: Boolean# (Default: false);osd check for log corruption = true### http://docs.ceph.com/en/latest/rados/configuration/journal-ref/# The size of the journal in megabytes. If this is 0,# and the journal is a block device, the entire block device is used.# Since v0.54, this is ignored if the journal is a block device,# and the entire block device is used.# Type: 32-bit Integer# (Default: 5120)# Recommended: Begin with 1GB. Should be at least twice the product# of the expected speed multiplied by "filestore max sync interval".;osd journal size = 2048 ; journal size, in megabytes## If you want to run the journal on a tmpfs, disable DirectIO# Enables direct i/o to the journal.# Requires "journal block align" set to "true".# Type: Boolean# Required: Yes when using aio.# (Default: true);journal dio = false# osd logging to debug osd issues, in order of likelihood of being helpful;debug ms = 1;debug osd = 20;debug filestore = 20;debug journal = 20### http://docs.ceph.com/en/latest/rados/configuration/filestore-config-ref/# The maximum interval in seconds for synchronizing the filestore.# Type: Double (optional)# (Default: 5);filestore max sync interval = 5# Enable snapshots for a btrfs filestore.# Type: Boolean# Required: No. Only used for btrfs.# (Default: true);filestore btrfs snap = false# Enables the filestore flusher.# Type: Boolean# Required: No# (Default: false);filestore flusher = true# Defines the maximum number of in progress operations the file store# accepts before blocking on queuing new operations.# Type: Integer# Required: No. Minimal impact on performance.# (Default: 500);filestore queue max ops = 500## Filestore and OSD settings can be tweak to achieve better performance### http://docs.ceph.com/en/latest/rados/configuration/filestore-config-ref/#misc# Min number of files in a subdir before merging into parent NOTE: A negative value means to disable subdir merging# Type: Integer# Required: No# Default: -10;filestore merge threshold = -10# filestore_split_multiple * abs(filestore_merge_threshold) * 16 is the maximum number of files in a subdirectory before splitting into child directories.# Type: Integer# Required: No# Default: 2;filestore split multiple = 2# The number of filesystem operation threads that execute in parallel.# Type: Integer# Required: No# Default: 2;filestore op threads = 4## CRUSH# By default OSDs update their details (location, weight and root) on the CRUSH map during startup# Type: Boolean# Required: No;# (Default: true);osd crush update on start = false;[osd.0]
; host = delta;[osd.1]
; host = epsilon;[osd.2]
; host = zeta;[osd.3]
; host = eta##################
## client settings
[client]### http://docs.ceph.com/en/latest/rbd/rbd-config-ref/# Enable caching for RADOS Block Device (RBD).# Type: Boolean# Required: No# (Default: true)rbd cache = true# The RBD cache size in bytes.# Type: 64-bit Integer# Required: No# (Default: 32 MiB);rbd cache size = 33554432# The dirty limit in bytes at which the cache triggers write-back.# If 0, uses write-through caching.# Type: 64-bit Integer# Required: No# Constraint: Must be less than rbd cache size.# (Default: 24 MiB);rbd cache max dirty = 25165824# The dirty target before the cache begins writing data to the data storage.# Does not block writes to the cache.# Type: 64-bit Integer# Required: No# Constraint: Must be less than rbd cache max dirty.# (Default: 16 MiB);rbd cache target dirty = 16777216# The number of seconds dirty data is in the cache before writeback starts.# Type: Float# Required: No# (Default: 1.0);rbd cache max dirty age = 1.0# Start out in write-through mode, and switch to write-back after the# first flush request is received. Enabling this is a conservative but# safe setting in case VMs running on rbd are too old to send flushes,# like the virtio driver in Linux before 2.6.32.# Type: Boolean# Required: No# (Default: true);rbd cache writethrough until flush = true# The Ceph admin socket allows you to query a daemon via a socket interface# From a client perspective this can be a virtual machine using librbd# Type: String# Required: No;admin socket = /var/run/ceph/$cluster-$type.$id.$pid.$cctid.asok##################
## radosgw client settings
[client.radosgw.gateway]### http://docs.ceph.com/en/latest/radosgw/config-ref/# Sets the location of the data files for Ceph Object Gateway.# You must create the directory when deploying Ceph.# We do not recommend changing the default.# Type: String# Default: /var/lib/ceph/radosgw/$cluster-$id;rgw data = /var/lib/ceph/radosgw/$name# Client's hostname;host = ceph-radosgw# where the radosgw keeps it's secret encryption keys;keyring = /etc/ceph/ceph.client.radosgw.keyring# FastCgiExternalServer uses this socket.# If you do not specify a socket path, Ceph Object Gateway will not run as an external server.# The path you specify here must be the same as the path specified in the rgw.conf file.# Type: String# Default: None;rgw socket path = /var/run/ceph/ceph.radosgw.gateway.fastcgi.sock# The location of the logging file for your radosgw.# Type: String# Required: No# Default: /var/log/ceph/$cluster-$name.log;log file = /var/log/ceph/client.radosgw.gateway.log# Enable 100-continue if it is operational.# Type: Boolean# Default: true;rgw print continue = false# The DNS name of the served domain.# Type: String# Default: None;rgw dns name = radosgw.ceph.internal