Execution
Date 14 Dec 2025 10:15:01 +0000
Duration 00:06:33.21
Controller aio1.openstack.local
User root
Versions
Ansible 2.18.6
ara 1.7.4 / 1.7.4
Python 3.13.5
Summary
7 Hosts
567 Tasks
554 Results
37 Plays
221 Files
0 Records

File: /etc/ansible/roles/ceph-ansible/roles/ceph-osd/defaults/main.yml

---
###########
# GENERAL #
###########

# Even though OSD nodes should not have the admin key
# at their disposal, some people might want to have it
# distributed on OSD nodes. Setting 'copy_admin_key' to 'true'
# will copy the admin key to the /etc/ceph/ directory
copy_admin_key: false


##############
# CEPH OPTIONS
##############

# Devices to be used as OSDs
# You can pre-provision disks that are not present yet.
# Ansible will just skip them. Newly added disk will be
# automatically configured during the next run.
#


# Declare devices to be used as OSDs
# All scenario(except 3rd) inherit from the following device declaration
# Note: This scenario uses the ceph-volume lvm batch method to provision OSDs

# devices:
#   - /dev/sdb
#   - /dev/sdc
#   - /dev/sdd
#   - /dev/sde

devices: []

# Declare devices to be used as block.db devices

# dedicated_devices:
#   - /dev/sdx
#   - /dev/sdy

dedicated_devices: []

# Declare devices to be used as block.wal devices

# bluestore_wal_devices:
#   - /dev/nvme0n1
#   - /dev/nvme0n2

bluestore_wal_devices: []

# 'osd_auto_discovery'  mode prevents you from filling out the 'devices' variable above.
# Device discovery is based on the Ansible fact 'ansible_facts["devices"]'
# which reports all the devices on a system. If chosen, all the disks
# found will be passed to ceph-volume lvm batch. You should not be worried on using
# this option since ceph-volume has a built-in check which looks for empty devices.
# Thus devices with existing partition tables will not be used.
#
osd_auto_discovery: false

# Encrypt your OSD device using dmcrypt
# If set to True, no matter which osd_objecstore you use the data will be encrypted
dmcrypt: false

# Use ceph-volume to create OSDs from logical volumes.
# lvm_volumes is a list of dictionaries.
#
# Filestore: Each dictionary must contain a data, journal and vg_name key. Any
# logical volume or logical group used must be a name and not a path.  data
# can be a logical volume, device or partition. journal can be either a lv or partition.
# You can not use the same journal for many data lvs.
# data_vg must be the volume group name of the data lv, only applicable when data is an lv.
# journal_vg is optional and must be the volume group name of the journal lv, if applicable.
# For example:
# lvm_volumes:
#   - data: data-lv1
#     data_vg: vg1
#     journal: journal-lv1
#     journal_vg: vg2
#     crush_device_class: foo
#   - data: data-lv2
#     journal: /dev/sda1
#     data_vg: vg1
#   - data: data-lv3
#     journal: /dev/sdb1
#     data_vg: vg2
#   - data: /dev/sda
#     journal: /dev/sdb1
#   - data: /dev/sda1
#     journal: /dev/sdb1
#
# Bluestore: Each dictionary must contain at least data. When defining wal or
# db, it must have both the lv name and vg group (db and wal are not required).
# This allows for four combinations: just data, data and wal, data and wal and
# db, data and db.
# For example:
# lvm_volumes:
#   - data: data-lv1
#     data_vg: vg1
#     wal: wal-lv1
#     wal_vg: vg1
#     crush_device_class: foo
#   - data: data-lv2
#     db: db-lv2
#     db_vg: vg2
#   - data: data-lv3
#     wal: wal-lv1
#     wal_vg: vg3
#     db: db-lv3
#     db_vg: vg3
#   - data: data-lv4
#     data_vg: vg4
#   - data: /dev/sda
#   - data: /dev/sdb1

lvm_volumes: []
crush_device_class: ""
osds_per_device: 1

###############
# CRUSH RULES #
###############
crush_rule_config: false

crush_rule_hdd:
  name: HDD
  root: default
  type: host
  class: hdd
  default: false

crush_rule_ssd:
  name: SSD
  root: default
  type: host
  class: ssd
  default: false

crush_rules:
  - "{{ crush_rule_hdd }}"
  - "{{ crush_rule_ssd }}"

ceph_ec_profiles: {}

# Caution: this will create crush roots and racks according to hostvars {{ osd_crush_location }}
# and will move hosts into them which might lead to significant data movement in the cluster!
#
# In order for the playbook to create CRUSH hierarchy, you have to setup your Ansible inventory file like so:
#
# [osds]
# ceph-osd-01 osd_crush_location="{ 'root': 'mon-roottt', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'ceph-osd-01' }"
#
# Note that 'host' is mandatory and that you need to submit at least two bucket type (including the host)
create_crush_tree: false

##########
# DOCKER #
##########

ceph_config_keys: [] # DON'T TOUCH ME

# Resource limitation
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
ceph_osd_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
ceph_osd_docker_cpu_limit: 4

# The next two variables are undefined, and thus, unused by default.
# If `lscpu | grep NUMA` returned the following:
#  NUMA node0 CPU(s):     0,2,4,6,8,10,12,14,16
#  NUMA node1 CPU(s):     1,3,5,7,9,11,13,15,17
# then, the following would run the OSD on the first NUMA node only.
# ceph_osd_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16"
# ceph_osd_docker_cpuset_mems: "0"

# PREPARE DEVICE
#
# WARNING /!\ DMCRYPT scenario ONLY works with Docker version 1.12.5 and above
#
ceph_osd_docker_devices: "{{ devices }}"
ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }}

# ACTIVATE DEVICE
#
ceph_osd_numactl_opts: ""

# If you want to add parameters, you should retain the existing ones and include the new ones.
ceph_osd_container_params:
  volumes:
    - /dev:/dev
    - /var/lib/ceph/bootstrap-osd/ceph.keyring:/var/lib/ceph/bootstrap-osd/ceph.keyring:z
    - /var/lib/ceph/osd/{{ cluster }}-"${OSD_ID}":/var/lib/ceph/osd/{{ cluster }}-"${OSD_ID}":z
    - /var/run/udev/:/var/run/udev/
    - /run/lvm/:/run/lvm/
  envs:
    OSD_ID: ${OSD_ID}
  args:
    - -f
    - -i=${OSD_ID}

###########
# SYSTEMD #
###########

# ceph_osd_systemd_overrides will override the systemd settings
# for the ceph-osd services.
# For example,to set "PrivateDevices=false" you can specify:
# ceph_osd_systemd_overrides:
#   Service:
#     PrivateDevices: false


###########
#  CHECK  #
###########

nb_retry_wait_osd_up: 60
delay_wait_osd_up: 10