{"id":756,"sha1":"9d0319077aa0eb3854e918c79423dd30fb3778d3","playbook":{"id":4,"items":{"plays":32,"tasks":1505,"results":1497,"hosts":12,"files":487,"records":0},"arguments":{"version":null,"verbosity":0,"private_key_file":null,"remote_user":null,"connection":"openstack.osa.ssh","timeout":null,"ssh_common_args":null,"sftp_extra_args":null,"scp_extra_args":null,"ssh_extra_args":null,"ask_pass":false,"connection_password_file":null,"force_handlers":true,"flush_cache":false,"become":false,"become_method":"sudo","become_user":null,"become_ask_pass":false,"become_password_file":null,"tags":["all"],"skip_tags":[],"check":false,"diff":false,"inventory":["/home/zuul/src/opendev.org/openstack/openstack-ansible/inventory/dynamic_inventory.py","/home/zuul/src/opendev.org/openstack/openstack-ansible/inventory/inventory.ini","/etc/openstack_deploy/inventory.ini"],"listhosts":false,"subset":null,"extra_vars":"Not saved by ARA as configured by 'ignored_arguments'","vault_ids":[],"ask_vault_pass":false,"vault_password_files":[],"forks":4,"module_path":null,"syntax":false,"listtasks":false,"listtags":false,"step":false,"start_at_task":null,"args":["setup-openstack.yml"]},"labels":[{"id":1,"name":"check:False"},{"id":2,"name":"tags:all"}],"started":"2025-12-08T13:57:07.871967Z","ended":"2025-12-08T14:21:54.049657Z","duration":"00:24:46.177690","name":null,"ansible_version":"2.18.6","client_version":"1.7.4","python_version":"3.12.11","server_version":"1.7.4","status":"failed","path":"/home/zuul/src/opendev.org/openstack/openstack-ansible/playbooks/setup-openstack.yml","controller":"aio1.openstack.local","user":"root"},"content":"---\n# You can override vars by using host or group vars\n\n###########\n# GENERAL #\n###########\n\n# Even though RGW nodes should not have the admin key\n# at their disposal, some people might want to have it\n# distributed on RGW nodes. Setting 'copy_admin_key' to 'true'\n# will copy the admin key to the /etc/ceph/ directory\ncopy_admin_key: false\n\n##########\n# TUNING #\n##########\n\n# Declaring rgw_create_pools will create pools with the given number of pgs,\n# size, and type. The following are some important notes on this automatic\n# pool creation:\n#   - The pools and associated pg_num's below are merely examples of pools that\n#     could be automatically created when rgws are deployed.\n#   - The default pg_num is 8 (from osd_pool_default_pg_num) for pool created\n#     if rgw_create_pools isn't declared and configured.\n#   - A pgcalc tool should be used to determine the optimal sizes for\n#     the rgw.buckets.data, rgw.buckets.index pools as well as any other\n#     pools declared in this dictionary.\n#     https://ceph.io/pgcalc is the upstream pgcalc tool\n#     https://access.redhat.com/labsinfo/cephpgc is a pgcalc tool offered by\n#     Red Hat if you are using RHCS.\n#   - The default value of {{ rgw_zone }} is 'default'.\n#   - The type must be set as either 'replicated' or 'ec' for\n#     each pool.\n#   - If a pool's type is 'ec', k and m values must be set via\n#     the ec_k, and ec_m variables.\n#   - The rule_name key can be used with a specific crush rule value (must exist).\n#     If the key doesn't exist it falls back to the default replicated_rule.\n#     This only works for replicated pool type not erasure.\n\n# rgw_create_pools:\n#   \"{{ rgw_zone }}.rgw.buckets.data\":\n#     pg_num: 64\n#     type: ec\n#     ec_profile: myecprofile\n#     ec_k: 5\n#     ec_m: 3\n#   \"{{ rgw_zone }}.rgw.buckets.index\":\n#     pg_num: 16\n#     size: 3\n#     type: replicated\n#   \"{{ rgw_zone }}.rgw.meta\":\n#     pg_num: 8\n#     size: 3\n#     type: replicated\n#   \"{{ rgw_zone }}.rgw.log\":\n#     pg_num: 8\n#     size: 3\n#     type: replicated\n#   \"{{ rgw_zone }}.rgw.control\":\n#     pg_num: 8\n#     size: 3\n#     type: replicated\n#     rule_name: foo\n\n\n##########\n# DOCKER #\n##########\n\n# Resource limitation\n# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints\n# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations\nceph_rgw_docker_memory_limit: \"4096m\"\nceph_rgw_docker_cpu_limit: 8\n# ceph_rgw_docker_cpuset_cpus: \"0,2,4,6,8,10,12,14,16\"\n# ceph_rgw_docker_cpuset_mems: \"0\"\n\nceph_config_keys: [] # DON'T TOUCH ME\nrgw_config_keys: \"/\" # DON'T TOUCH ME\n# If you want to add parameters, you should retain the existing ones and include the new ones.\nceph_rgw_container_params:\n  volumes:\n    - /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.${INST_NAME}:/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.${INST_NAME}:z\n  args:\n    - -f\n    - -n=client.rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.${INST_NAME}\n    - -k=/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.${INST_NAME}/keyring\n\n###########\n# SYSTEMD #\n###########\n# ceph_rgw_systemd_overrides will override the systemd settings\n# for the ceph-rgw services.\n# For example,to set \"PrivateDevices=false\" you can specify:\n# ceph_rgw_systemd_overrides:\n#   Service:\n#     PrivateDevices: false\n","created":"2025-12-08T13:57:24.551827Z","updated":"2025-12-08T13:57:24.551838Z","path":"/etc/ansible/roles/ceph-ansible/roles/ceph-rgw/defaults/main.yml"}