{"id":275,"sha1":"b66869a442a6dbfe402c76cf77b1be0f7cdd42bb","playbook":{"id":3,"items":{"plays":46,"tasks":924,"results":1203,"hosts":15,"files":212,"records":0},"arguments":{"version":null,"verbosity":0,"private_key_file":null,"remote_user":null,"connection":"openstack.osa.ssh","timeout":null,"ssh_common_args":null,"sftp_extra_args":null,"scp_extra_args":null,"ssh_extra_args":null,"ask_pass":false,"connection_password_file":null,"force_handlers":true,"flush_cache":false,"become":false,"become_method":"sudo","become_user":null,"become_ask_pass":false,"become_password_file":null,"tags":["all"],"skip_tags":[],"check":false,"diff":false,"inventory":["/home/zuul/src/opendev.org/openstack/openstack-ansible/inventory/dynamic_inventory.py","/home/zuul/src/opendev.org/openstack/openstack-ansible/inventory/inventory.ini","/etc/openstack_deploy/inventory.ini"],"listhosts":false,"subset":null,"extra_vars":"Not saved by ARA as configured by 'ignored_arguments'","vault_ids":[],"ask_vault_pass":false,"vault_password_files":[],"forks":8,"module_path":null,"syntax":false,"listtasks":false,"listtags":false,"step":false,"start_at_task":null,"args":["setup-infrastructure.yml"]},"labels":[{"id":1,"name":"check:False"},{"id":2,"name":"tags:all"}],"started":"2025-12-15T09:55:36.904008Z","ended":"2025-12-15T10:16:50.367261Z","duration":"00:21:13.463253","name":null,"ansible_version":"2.18.6","client_version":"1.7.4","python_version":"3.12.3","server_version":"1.7.4","status":"completed","path":"/home/zuul/src/opendev.org/openstack/openstack-ansible/playbooks/setup-infrastructure.yml","controller":"aio1.openstack.local","user":"root"},"content":"---\n###########\n# GENERAL #\n###########\n\n# Even though OSD nodes should not have the admin key\n# at their disposal, some people might want to have it\n# distributed on OSD nodes. Setting 'copy_admin_key' to 'true'\n# will copy the admin key to the /etc/ceph/ directory\ncopy_admin_key: false\n\n\n##############\n# CEPH OPTIONS\n##############\n\n# Devices to be used as OSDs\n# You can pre-provision disks that are not present yet.\n# Ansible will just skip them. Newly added disk will be\n# automatically configured during the next run.\n#\n\n\n# Declare devices to be used as OSDs\n# All scenario(except 3rd) inherit from the following device declaration\n# Note: This scenario uses the ceph-volume lvm batch method to provision OSDs\n\n# devices:\n#   - /dev/sdb\n#   - /dev/sdc\n#   - /dev/sdd\n#   - /dev/sde\n\ndevices: []\n\n# Declare devices to be used as block.db devices\n\n# dedicated_devices:\n#   - /dev/sdx\n#   - /dev/sdy\n\ndedicated_devices: []\n\n# Declare devices to be used as block.wal devices\n\n# bluestore_wal_devices:\n#   - /dev/nvme0n1\n#   - /dev/nvme0n2\n\nbluestore_wal_devices: []\n\n# 'osd_auto_discovery'  mode prevents you from filling out the 'devices' variable above.\n# Device discovery is based on the Ansible fact 'ansible_facts[\"devices\"]'\n# which reports all the devices on a system. If chosen, all the disks\n# found will be passed to ceph-volume lvm batch. You should not be worried on using\n# this option since ceph-volume has a built-in check which looks for empty devices.\n# Thus devices with existing partition tables will not be used.\n#\nosd_auto_discovery: false\n\n# Encrypt your OSD device using dmcrypt\n# If set to True, no matter which osd_objecstore you use the data will be encrypted\ndmcrypt: false\n\n# Use ceph-volume to create OSDs from logical volumes.\n# lvm_volumes is a list of dictionaries.\n#\n# Filestore: Each dictionary must contain a data, journal and vg_name key. Any\n# logical volume or logical group used must be a name and not a path.  data\n# can be a logical volume, device or partition. journal can be either a lv or partition.\n# You can not use the same journal for many data lvs.\n# data_vg must be the volume group name of the data lv, only applicable when data is an lv.\n# journal_vg is optional and must be the volume group name of the journal lv, if applicable.\n# For example:\n# lvm_volumes:\n#   - data: data-lv1\n#     data_vg: vg1\n#     journal: journal-lv1\n#     journal_vg: vg2\n#     crush_device_class: foo\n#   - data: data-lv2\n#     journal: /dev/sda1\n#     data_vg: vg1\n#   - data: data-lv3\n#     journal: /dev/sdb1\n#     data_vg: vg2\n#   - data: /dev/sda\n#     journal: /dev/sdb1\n#   - data: /dev/sda1\n#     journal: /dev/sdb1\n#\n# Bluestore: Each dictionary must contain at least data. When defining wal or\n# db, it must have both the lv name and vg group (db and wal are not required).\n# This allows for four combinations: just data, data and wal, data and wal and\n# db, data and db.\n# For example:\n# lvm_volumes:\n#   - data: data-lv1\n#     data_vg: vg1\n#     wal: wal-lv1\n#     wal_vg: vg1\n#     crush_device_class: foo\n#   - data: data-lv2\n#     db: db-lv2\n#     db_vg: vg2\n#   - data: data-lv3\n#     wal: wal-lv1\n#     wal_vg: vg3\n#     db: db-lv3\n#     db_vg: vg3\n#   - data: data-lv4\n#     data_vg: vg4\n#   - data: /dev/sda\n#   - data: /dev/sdb1\n\nlvm_volumes: []\ncrush_device_class: \"\"\nosds_per_device: 1\n\n###############\n# CRUSH RULES #\n###############\ncrush_rule_config: false\n\ncrush_rule_hdd:\n  name: HDD\n  root: default\n  type: host\n  class: hdd\n  default: false\n\ncrush_rule_ssd:\n  name: SSD\n  root: default\n  type: host\n  class: ssd\n  default: false\n\ncrush_rules:\n  - \"{{ crush_rule_hdd }}\"\n  - \"{{ crush_rule_ssd }}\"\n\nceph_ec_profiles: {}\n\n# Caution: this will create crush roots and racks according to hostvars {{ osd_crush_location }}\n# and will move hosts into them which might lead to significant data movement in the cluster!\n#\n# In order for the playbook to create CRUSH hierarchy, you have to setup your Ansible inventory file like so:\n#\n# [osds]\n# ceph-osd-01 osd_crush_location=\"{ 'root': 'mon-roottt', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'ceph-osd-01' }\"\n#\n# Note that 'host' is mandatory and that you need to submit at least two bucket type (including the host)\ncreate_crush_tree: false\n\n##########\n# DOCKER #\n##########\n\nceph_config_keys: [] # DON'T TOUCH ME\n\n# Resource limitation\n# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints\n# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations\nceph_osd_docker_memory_limit: \"{{ ansible_facts['memtotal_mb'] }}m\"\nceph_osd_docker_cpu_limit: 4\n\n# The next two variables are undefined, and thus, unused by default.\n# If `lscpu | grep NUMA` returned the following:\n#  NUMA node0 CPU(s):     0,2,4,6,8,10,12,14,16\n#  NUMA node1 CPU(s):     1,3,5,7,9,11,13,15,17\n# then, the following would run the OSD on the first NUMA node only.\n# ceph_osd_docker_cpuset_cpus: \"0,2,4,6,8,10,12,14,16\"\n# ceph_osd_docker_cpuset_mems: \"0\"\n\n# PREPARE DEVICE\n#\n# WARNING /!\\ DMCRYPT scenario ONLY works with Docker version 1.12.5 and above\n#\nceph_osd_docker_devices: \"{{ devices }}\"\nceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }}\n\n# ACTIVATE DEVICE\n#\nceph_osd_numactl_opts: \"\"\n\n# If you want to add parameters, you should retain the existing ones and include the new ones.\nceph_osd_container_params:\n  volumes:\n    - /dev:/dev\n    - /var/lib/ceph/bootstrap-osd/ceph.keyring:/var/lib/ceph/bootstrap-osd/ceph.keyring:z\n    - /var/lib/ceph/osd/{{ cluster }}-\"${OSD_ID}\":/var/lib/ceph/osd/{{ cluster }}-\"${OSD_ID}\":z\n    - /var/run/udev/:/var/run/udev/\n    - /run/lvm/:/run/lvm/\n  envs:\n    OSD_ID: ${OSD_ID}\n  args:\n    - -f\n    - -i=${OSD_ID}\n\n###########\n# SYSTEMD #\n###########\n\n# ceph_osd_systemd_overrides will override the systemd settings\n# for the ceph-osd services.\n# For example,to set \"PrivateDevices=false\" you can specify:\n# ceph_osd_systemd_overrides:\n#   Service:\n#     PrivateDevices: false\n\n\n###########\n#  CHECK  #\n###########\n\nnb_retry_wait_osd_up: 60\ndelay_wait_osd_up: 10\n","created":"2025-12-15T09:55:43.452132Z","updated":"2025-12-15T09:55:43.452159Z","path":"/etc/ansible/roles/ceph-ansible/roles/ceph-osd/defaults/main.yml"}