Execution
Date
14 Dec 2025 10:15:01 +0000
Duration
00:06:33.21
Controller
aio1.openstack.local
User
root
Versions
Ansible
2.18.6
ara
1.7.4 / 1.7.4
Python
3.13.5
Summary
7
Hosts
567
Tasks
554
Results
37
Plays
221
Files
0
Records
File: /etc/ansible/roles/ceph-ansible/roles/ceph-osd/defaults/main.yml
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 | --- ########### # GENERAL # ########### # Even though OSD nodes should not have the admin key # at their disposal, some people might want to have it # distributed on OSD nodes. Setting 'copy_admin_key' to 'true' # will copy the admin key to the /etc/ceph/ directory copy_admin_key: false ############## # CEPH OPTIONS ############## # Devices to be used as OSDs # You can pre-provision disks that are not present yet. # Ansible will just skip them. Newly added disk will be # automatically configured during the next run. # # Declare devices to be used as OSDs # All scenario(except 3rd) inherit from the following device declaration # Note: This scenario uses the ceph-volume lvm batch method to provision OSDs # devices: # - /dev/sdb # - /dev/sdc # - /dev/sdd # - /dev/sde devices: [] # Declare devices to be used as block.db devices # dedicated_devices: # - /dev/sdx # - /dev/sdy dedicated_devices: [] # Declare devices to be used as block.wal devices # bluestore_wal_devices: # - /dev/nvme0n1 # - /dev/nvme0n2 bluestore_wal_devices: [] # 'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above. # Device discovery is based on the Ansible fact 'ansible_facts["devices"]' # which reports all the devices on a system. If chosen, all the disks # found will be passed to ceph-volume lvm batch. You should not be worried on using # this option since ceph-volume has a built-in check which looks for empty devices. # Thus devices with existing partition tables will not be used. # osd_auto_discovery: false # Encrypt your OSD device using dmcrypt # If set to True, no matter which osd_objecstore you use the data will be encrypted dmcrypt: false # Use ceph-volume to create OSDs from logical volumes. # lvm_volumes is a list of dictionaries. # # Filestore: Each dictionary must contain a data, journal and vg_name key. Any # logical volume or logical group used must be a name and not a path. data # can be a logical volume, device or partition. journal can be either a lv or partition. # You can not use the same journal for many data lvs. # data_vg must be the volume group name of the data lv, only applicable when data is an lv. # journal_vg is optional and must be the volume group name of the journal lv, if applicable. # For example: # lvm_volumes: # - data: data-lv1 # data_vg: vg1 # journal: journal-lv1 # journal_vg: vg2 # crush_device_class: foo # - data: data-lv2 # journal: /dev/sda1 # data_vg: vg1 # - data: data-lv3 # journal: /dev/sdb1 # data_vg: vg2 # - data: /dev/sda # journal: /dev/sdb1 # - data: /dev/sda1 # journal: /dev/sdb1 # # Bluestore: Each dictionary must contain at least data. When defining wal or # db, it must have both the lv name and vg group (db and wal are not required). # This allows for four combinations: just data, data and wal, data and wal and # db, data and db. # For example: # lvm_volumes: # - data: data-lv1 # data_vg: vg1 # wal: wal-lv1 # wal_vg: vg1 # crush_device_class: foo # - data: data-lv2 # db: db-lv2 # db_vg: vg2 # - data: data-lv3 # wal: wal-lv1 # wal_vg: vg3 # db: db-lv3 # db_vg: vg3 # - data: data-lv4 # data_vg: vg4 # - data: /dev/sda # - data: /dev/sdb1 lvm_volumes: [] crush_device_class: "" osds_per_device: 1 ############### # CRUSH RULES # ############### crush_rule_config: false crush_rule_hdd: name: HDD root: default type: host class: hdd default: false crush_rule_ssd: name: SSD root: default type: host class: ssd default: false crush_rules: - "{{ crush_rule_hdd }}" - "{{ crush_rule_ssd }}" ceph_ec_profiles: {} # Caution: this will create crush roots and racks according to hostvars {{ osd_crush_location }} # and will move hosts into them which might lead to significant data movement in the cluster! # # In order for the playbook to create CRUSH hierarchy, you have to setup your Ansible inventory file like so: # # [osds] # ceph-osd-01 osd_crush_location="{ 'root': 'mon-roottt', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'ceph-osd-01' }" # # Note that 'host' is mandatory and that you need to submit at least two bucket type (including the host) create_crush_tree: false ########## # DOCKER # ########## ceph_config_keys: [] # DON'T TOUCH ME # Resource limitation # For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints # Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations ceph_osd_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m" ceph_osd_docker_cpu_limit: 4 # The next two variables are undefined, and thus, unused by default. # If `lscpu | grep NUMA` returned the following: # NUMA node0 CPU(s): 0,2,4,6,8,10,12,14,16 # NUMA node1 CPU(s): 1,3,5,7,9,11,13,15,17 # then, the following would run the OSD on the first NUMA node only. # ceph_osd_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16" # ceph_osd_docker_cpuset_mems: "0" # PREPARE DEVICE # # WARNING /!\ DMCRYPT scenario ONLY works with Docker version 1.12.5 and above # ceph_osd_docker_devices: "{{ devices }}" ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} # ACTIVATE DEVICE # ceph_osd_numactl_opts: "" # If you want to add parameters, you should retain the existing ones and include the new ones. ceph_osd_container_params: volumes: - /dev:/dev - /var/lib/ceph/bootstrap-osd/ceph.keyring:/var/lib/ceph/bootstrap-osd/ceph.keyring:z - /var/lib/ceph/osd/{{ cluster }}-"${OSD_ID}":/var/lib/ceph/osd/{{ cluster }}-"${OSD_ID}":z - /var/run/udev/:/var/run/udev/ - /run/lvm/:/run/lvm/ envs: OSD_ID: ${OSD_ID} args: - -f - -i=${OSD_ID} ########### # SYSTEMD # ########### # ceph_osd_systemd_overrides will override the systemd settings # for the ceph-osd services. # For example,to set "PrivateDevices=false" you can specify: # ceph_osd_systemd_overrides: # Service: # PrivateDevices: false ########### # CHECK # ########### nb_retry_wait_osd_up: 60 delay_wait_osd_up: 10 |