Execution
Date
15 Dec 2025 09:55:36 +0000
Duration
00:21:13.46
Controller
aio1.openstack.local
User
root
Versions
Ansible
2.18.6
ara
1.7.4 / 1.7.4
Python
3.12.3
Summary
15
Hosts
924
Tasks
1203
Results
46
Plays
212
Files
0
Records
File: /etc/ansible/roles/ceph-ansible/roles/ceph-defaults/defaults/main.yml
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 | --- # You can override vars by using host or group vars ########### # GENERAL # ########### ###################################### # Releases name to number dictionary # ###################################### ceph_release_num: dumpling: 0.67 emperor: 0.72 firefly: 0.80 giant: 0.87 hammer: 0.94 infernalis: 9 jewel: 10 kraken: 11 luminous: 12 mimic: 13 nautilus: 14 octopus: 15 pacific: 16 quincy: 17 reef: 18 squid: 19 dev: 99 # The 'cluster' variable determines the name of the cluster. # Changing the default value to something else means that you will # need to change all the command line calls as well, for example if # your cluster name is 'foo': # "ceph health" will become "ceph --cluster foo health" # # An easier way to handle this is to use the environment variable CEPH_ARGS # So run: "export CEPH_ARGS="--cluster foo" # With that you will be able to run "ceph health" normally cluster: ceph # Inventory host group variables mon_group_name: mons osd_group_name: osds rgw_group_name: rgws mds_group_name: mdss nfs_group_name: nfss rbdmirror_group_name: rbdmirrors client_group_name: clients mgr_group_name: mgrs rgwloadbalancer_group_name: rgwloadbalancers monitoring_group_name: monitoring adopt_label_group_names: - "{{ mon_group_name }}" - "{{ osd_group_name }}" - "{{ rgw_group_name }}" - "{{ mds_group_name }}" - "{{ nfs_group_name }}" - "{{ rbdmirror_group_name }}" - "{{ client_group_name }}" - "{{ mgr_group_name }}" - "{{ rgwloadbalancer_group_name }}" - "{{ monitoring_group_name }}" # If configure_firewall is true, then ansible will try to configure the # appropriate firewalling rules so that Ceph daemons can communicate # with each others. configure_firewall: true # Open ports on corresponding nodes if firewall is installed on it ceph_mon_firewall_zone: public ceph_mgr_firewall_zone: public ceph_osd_firewall_zone: public ceph_rgw_firewall_zone: public ceph_mds_firewall_zone: public ceph_nfs_firewall_zone: public ceph_rbdmirror_firewall_zone: public ceph_dashboard_firewall_zone: public ceph_rgwloadbalancer_firewall_zone: public # cephadm account for remote connections cephadm_ssh_user: root cephadm_ssh_priv_key_path: "/home/{{ cephadm_ssh_user }}/.ssh/id_rsa" cephadm_ssh_pub_key_path: "{{ cephadm_ssh_priv_key_path }}.pub" cephadm_mgmt_network: "{{ public_network }}" ############ # PACKAGES # ############ debian_package_dependencies: [] centos_package_dependencies: - epel-release - "{{ (ansible_facts['distribution_major_version'] is version('8', '>=')) | ternary('python3-libselinux', 'libselinux-python') }}" redhat_package_dependencies: [] suse_package_dependencies: [] # Whether or not to install the ceph-test package. ceph_test: false # Enable the ntp service by default to avoid clock skew on ceph nodes # Disable if an appropriate NTP client is already installed and configured ntp_service_enabled: true # Set type of NTP client daemon to use, valid entries are chronyd, ntpd or timesyncd ntp_daemon_type: chronyd # This variable determines if ceph packages can be updated. If False, the # package resources will use "state=present". If True, they will use # "state=latest". upgrade_ceph_packages: false ceph_use_distro_backports: false # DEBIAN ONLY ceph_directories_mode: "0755" ########### # INSTALL # ########### # ORIGIN SOURCE # # Choose between: # - 'repository' means that you will get ceph installed through a new repository. Later below choose between 'community', 'dev' or 'obs' # - 'distro' means that no separate repo file will be added # you will get whatever version of Ceph is included in your Linux distro. # 'local' means that the ceph binaries will be copied over from the local machine ceph_origin: dummy valid_ceph_origins: - repository - distro - local ceph_repository: dummy valid_ceph_repository: - community - dev - uca - custom - obs # REPOSITORY: COMMUNITY VERSION # # Enabled when ceph_repository == 'community' # ceph_mirror: https://download.ceph.com ceph_stable_key: https://download.ceph.com/keys/release.asc ceph_stable_release: reef ceph_stable_repo: "{{ ceph_mirror }}/debian-{{ ceph_stable_release }}" nfs_ganesha_stable: true # use stable repos for nfs-ganesha centos_release_nfs: centos-release-nfs-ganesha4 nfs_ganesha_stable_deb_repo: http://ppa.launchpad.net/nfs-ganesha/nfs-ganesha-4/ubuntu nfs_ganesha_apt_keyserver: keyserver.ubuntu.com nfs_ganesha_apt_key_id: EA914D611053D07BD332E18010353E8834DC57CA libntirpc_stable_deb_repo: http://ppa.launchpad.net/nfs-ganesha/libntirpc-4/ubuntu # Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions # # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/ # for more info read: https://github.com/ceph/ceph-ansible/issues/305 # ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}" # REPOSITORY: UBUNTU CLOUD ARCHIVE # # Enabled when ceph_repository == 'uca' # # This allows the install of Ceph from the Ubuntu Cloud Archive. The Ubuntu Cloud Archive # usually has newer Ceph releases than the normal distro repository. # # ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu" ceph_stable_openstack_release_uca: queens ceph_stable_release_uca: "{{ ansible_facts['distribution_release'] }}-updates/{{ ceph_stable_openstack_release_uca }}" # REPOSITORY: openSUSE OBS # # Enabled when ceph_repository == 'obs' # # This allows the install of Ceph from the openSUSE OBS repository. The OBS repository # usually has newer Ceph releases than the normal distro repository. # # ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_facts['distribution_version'] }}/" # REPOSITORY: DEV # # Enabled when ceph_repository == 'dev' # ceph_dev_branch: main # development branch you would like to use e.g: main, wip-hack ceph_dev_sha1: latest # distinct sha1 to use, defaults to 'latest' (as in latest built) nfs_ganesha_dev: false # use development repos for nfs-ganesha # Set this to choose the version of ceph dev libraries used in the nfs-ganesha packages from shaman # flavors so far include: ceph_main, ceph_jewel, ceph_kraken, ceph_luminous nfs_ganesha_flavor: "ceph_main" # REPOSITORY: CUSTOM # # Enabled when ceph_repository == 'custom' # # Use a custom repository to install ceph. For RPM, ceph_custom_repo should be # a URL to the .repo file to be installed on the targets. For deb, # ceph_custom_repo should be the URL to the repo base. # # ceph_custom_key: https://server.domain.com/ceph-custom-repo/key.asc ceph_custom_repo: https://server.domain.com/ceph-custom-repo # ORIGIN: LOCAL CEPH INSTALLATION # # Enabled when ceph_repository == 'local' # # Path to DESTDIR of the ceph install # ceph_installation_dir: "/path/to/ceph_installation/" # Whether or not to use installer script rundep_installer.sh # This script takes in rundep and installs the packages line by line onto the machine # If this is set to false then it is assumed that the machine ceph is being copied onto will already have # all runtime dependencies installed # use_installer: false # Root directory for ceph-ansible # ansible_dir: "/path/to/ceph-ansible" ###################### # CEPH CONFIGURATION # ###################### ## Ceph options # # Each cluster requires a unique, consistent filesystem ID. By # default, the playbook generates one for you. # If you want to customize how the fsid is # generated, you may find it useful to disable fsid generation to # avoid cluttering up your ansible repo. If you set `generate_fsid` to # false, you *must* generate `fsid` in another way. # ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT fsid: "{{ cluster_uuid.stdout }}" generate_fsid: true ceph_conf_key_directory: /etc/ceph ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_facts['os_family'] == 'Debian' else '167' }}" # Permissions for keyring files in /etc/ceph ceph_keyring_permissions: '0600' cephx: true # Cluster configuration ceph_cluster_conf: global: public_network: "{{ public_network | default(omit) }}" cluster_network: "{{ cluster_network | default(omit) }}" osd_pool_default_crush_rule: "{{ osd_pool_default_crush_rule }}" ms_bind_ipv6: "{{ (ip_version == 'ipv6') | string }}" ms_bind_ipv4: "{{ (ip_version == 'ipv4') | string }}" osd_crush_chooseleaf_type: "{{ '0' if common_single_host_mode | default(false) else omit }}" ## Client options # rbd_cache: "true" rbd_cache_writethrough_until_flush: "true" rbd_concurrent_management_ops: 20 rbd_client_directories: true # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions # Permissions for the rbd_client_log_path and # rbd_client_admin_socket_path. Depending on your use case for Ceph # you may want to change these values. The default, which is used if # any of the variables are unset or set to a false value (like `null` # or `false`) is to automatically determine what is appropriate for # the Ceph version with non-OpenStack workloads -- ceph:ceph and 0770 # for infernalis releases, and root:root and 1777 for pre-infernalis # releases. # # For other use cases, including running Ceph with OpenStack, you'll # want to set these differently: # # For OpenStack on RHEL, you'll want: # rbd_client_directory_owner: "qemu" # rbd_client_directory_group: "libvirtd" (or "libvirt", depending on your version of libvirt) # rbd_client_directory_mode: "0755" # # For OpenStack on Ubuntu or Debian, set: # rbd_client_directory_owner: "libvirt-qemu" # rbd_client_directory_group: "kvm" # rbd_client_directory_mode: "0755" # # If you set rbd_client_directory_mode, you must use a string (e.g., # 'rbd_client_directory_mode: "0755"', *not* # 'rbd_client_directory_mode: 0755', or Ansible will complain: mode # must be in octal or symbolic form rbd_client_directory_owner: ceph rbd_client_directory_group: ceph rbd_client_directory_mode: "0755" rbd_client_log_path: /var/log/ceph rbd_client_log_file: "{{ rbd_client_log_path }}/qemu-guest-$pid.log" # must be writable by QEMU and allowed by SELinux or AppArmor rbd_client_admin_socket_path: /var/run/ceph # must be writable by QEMU and allowed by SELinux or AppArmor ## Monitor options # set to either ipv4 or ipv6, whichever your network is using ip_version: ipv4 mon_host_v1: enabled: true suffix: ':6789' mon_host_v2: suffix: ':3300' enable_ceph_volume_debug: false ########## # CEPHFS # ########## # When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value # `pg_num` and `pgp_num` keys will be ignored, even if specified. # eg: # cephfs_data_pool: # name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}" # target_size_ratio: 0.2 cephfs: cephfs # name of the ceph filesystem cephfs_data_pool: name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}" cephfs_metadata_pool: name: "{{ cephfs_metadata if cephfs_metadata is defined else 'cephfs_metadata' }}" cephfs_pools: - "{{ cephfs_data_pool }}" - "{{ cephfs_metadata_pool }}" ## OSD options # lvmetad_disabled: false is_hci: false hci_safety_factor: 0.2 non_hci_safety_factor: 0.7 safety_factor: "{{ hci_safety_factor if is_hci | bool else non_hci_safety_factor }}" osd_memory_target: 4294967296 journal_size: 5120 # OSD journal size in MB block_db_size: -1 # block db size in bytes for the ceph-volume lvm batch. -1 means use the default of 'as big as possible'. public_network: 0.0.0.0/0 cluster_network: "{{ public_network | regex_replace(' ', '') }}" osd_mkfs_type: xfs osd_mkfs_options_xfs: -f -i size=2048 osd_mount_options_xfs: noatime,largeio,inode64,swalloc osd_objectstore: bluestore # Any device containing these patterns in their path will be excluded. osd_auto_discovery_exclude: "dm-*|loop*|md*|rbd*" ## MDS options # mds_max_mds: 1 ## Rados Gateway options # radosgw_frontend_type: beast # For additional frontends see: https://docs.ceph.com/en/latest/radosgw/frontends/ radosgw_frontend_port: 8080 # The server private key, public certificate and any other CA or intermediate certificates should be in one file, in PEM format. radosgw_frontend_ssl_certificate: "" radosgw_frontend_ssl_certificate_data: "" # certificate contents to be written to path defined by radosgw_frontend_ssl_certificate radosgw_frontend_options: "" radosgw_thread_pool_size: 512 # You must define either radosgw_interface, radosgw_address. # These variables must be defined at least in all.yml and overrided if needed (inventory host file or group_vars/*.yml). # Eg. If you want to specify for each radosgw node which address the radosgw will bind to you can set it in your **inventory host file** by using 'radosgw_address' variable. # Preference will go to radosgw_address if both radosgw_address and radosgw_interface are defined. radosgw_interface: interface radosgw_address: x.x.x.x radosgw_address_block: subnet radosgw_keystone_ssl: false # activate this when using keystone PKI keys radosgw_num_instances: 1 rgw_zone: default # This is used for rgw instance client names. ## Testing mode # enable this mode _only_ when you have a single node # if you don't want it keep the option commented # common_single_host_mode: true ## Handlers - restarting daemons after a config change # if for whatever reasons the content of your ceph configuration changes # ceph daemons will be restarted as well. At the moment, we can not detect # which config option changed so all the daemons will be restarted. Although # this restart will be serialized for each node, in between a health check # will be performed so we make sure we don't move to the next node until # ceph is not healthy # Obviously between the checks (for monitors to be in quorum and for osd's pgs # to be clean) we have to wait. These retries and delays can be configurable # for both monitors and osds. # # Monitor handler checks handler_health_mon_check_retries: 10 handler_health_mon_check_delay: 20 # # OSD handler checks handler_health_osd_check_retries: 40 handler_health_osd_check_delay: 30 handler_health_osd_check: true # # MDS handler checks handler_health_mds_check_retries: 5 handler_health_mds_check_delay: 10 # # RGW handler checks handler_health_rgw_check_retries: 5 handler_health_rgw_check_delay: 10 handler_rgw_use_haproxy_maintenance: false # NFS handler checks handler_health_nfs_check_retries: 5 handler_health_nfs_check_delay: 10 # RBD MIRROR handler checks handler_health_rbd_mirror_check_retries: 5 handler_health_rbd_mirror_check_delay: 10 # MGR handler checks handler_health_mgr_check_retries: 5 handler_health_mgr_check_delay: 10 ## health mon/osds check retries/delay: health_mon_check_retries: 20 health_mon_check_delay: 10 health_osd_check_retries: 20 health_osd_check_delay: 10 ############## # RBD-MIRROR # ############## ceph_rbd_mirror_pool: "rbd" ############### # NFS-GANESHA # ############### # # Access type options # # Enable NFS File access # If set to true, then ganesha is set up to export the root of the # Ceph filesystem, and ganesha's attribute and directory caching is disabled # as much as possible since libcephfs clients also caches the same # information. # # Set this to true to enable File access via NFS. Requires an MDS role. nfs_file_gw: false # Set this to true to enable Object access via NFS. Requires an RGW role. nfs_obj_gw: "{{ False if groups.get(mon_group_name, []) | length == 0 else True }}" ################### # CONFIG OVERRIDE # ################### # Ceph configuration file override. # This allows you to specify more configuration options # using an INI style format. # # When configuring RGWs, make sure you use the form [client.rgw.*] # instead of [client.radosgw.*]. # For more examples check the profiles directory of https://github.com/ceph/ceph-ansible. # # The following sections are supported: [global], [mon], [osd], [mds], [client] # # Example: # ceph_conf_overrides: # global: # foo: 1234 # bar: 5678 # "client.rgw.{{ rgw_zone }}.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_facts']['hostname'] }}": # rgw_zone: zone1 # ceph_conf_overrides: {} ############# # OS TUNING # ############# disable_transparent_hugepage: "{{ false if osd_objectstore == 'bluestore' }}" os_tuning_params: - { name: fs.file-max, value: 26234859 } - { name: vm.zone_reclaim_mode, value: 0 } - { name: vm.swappiness, value: 10 } - { name: vm.min_free_kbytes, value: "{{ vm_min_free_kbytes }}" } # For Debian & Red Hat/CentOS installs set TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES # Set this to a byte value (e.g. 134217728) # A value of 0 will leave the package default. ceph_tcmalloc_max_total_thread_cache: 134217728 ########## # DOCKER # ########## ceph_docker_image: "ceph/daemon-base" ceph_docker_image_tag: latest-reef ceph_docker_registry: quay.io ceph_docker_registry_auth: false # ceph_docker_registry_username: # ceph_docker_registry_password: # ceph_docker_http_proxy: # ceph_docker_https_proxy: ceph_docker_no_proxy: "localhost,127.0.0.1" ## Client only docker image - defaults to {{ ceph_docker_image }} ceph_client_docker_image: "{{ ceph_docker_image }}" ceph_client_docker_image_tag: "{{ ceph_docker_image_tag }}" ceph_client_docker_registry: "{{ ceph_docker_registry }}" containerized_deployment: false container_binary: timeout_command: "{{ 'timeout --foreground -s KILL ' ~ docker_pull_timeout if (docker_pull_timeout != '0') and (ceph_docker_dev_image is undefined or not ceph_docker_dev_image) else '' }}" ceph_common_container_params: envs: NODE_NAME: "{{ ansible_facts['hostname'] }}" CEPH_USE_RANDOM_NONCE: "1" CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES: "{{ ceph_tcmalloc_max_total_thread_cache }}" args: - --setuser=ceph - --setgroup=ceph - --default-log-to-file=false - --default-log-to-stderr=true - --default-log-stderr-prefix="debug " volumes: - /var/lib/ceph/crash:/var/lib/ceph/crash:z - /var/run/ceph:/var/run/ceph:z - /var/log/ceph:/var/log/ceph:z - /etc/ceph:/etc/ceph:z - /etc/localtime:/etc/localtime:ro # this is only here for usage with the rolling_update.yml playbook # do not ever change this here rolling_update: false ##################### # Docker pull retry # ##################### docker_pull_retry: 3 docker_pull_timeout: "300s" ############# # DASHBOARD # ############# dashboard_enabled: true # Choose http or https # For https, you should set dashboard.crt/key and grafana.crt/key # If you define the dashboard_crt and dashboard_key variables, but leave them as '', # then we will autogenerate a cert and keyfile dashboard_protocol: https dashboard_port: 8443 # set this variable to the network you want the dashboard to listen on. (Default to public_network) dashboard_network: "{{ public_network }}" dashboard_admin_user: admin dashboard_admin_user_ro: false # This variable must be set with a strong custom password when dashboard_enabled is True # dashboard_admin_password: p@ssw0rd # We only need this for SSL (https) connections dashboard_crt: '' dashboard_key: '' dashboard_certificate_cn: ceph-dashboard dashboard_tls_external: false dashboard_grafana_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not grafana_crt and not grafana_key else false }}" dashboard_rgw_api_user_id: ceph-dashboard dashboard_rgw_api_admin_resource: '' dashboard_rgw_api_no_ssl_verify: false dashboard_frontend_vip: '' dashboard_disabled_features: [] prometheus_frontend_vip: '' alertmanager_frontend_vip: '' node_exporter_container_image: "docker.io/prom/node-exporter:v0.17.0" node_exporter_port: 9100 grafana_admin_user: admin # This variable must be set with a strong custom password when dashboard_enabled is True # grafana_admin_password: admin # We only need this for SSL (https) connections grafana_crt: '' grafana_key: '' # When using https, please fill with a hostname for which grafana_crt is valid. grafana_server_fqdn: '' grafana_container_image: "docker.io/grafana/grafana:6.7.4" grafana_container_cpu_period: 100000 grafana_container_cpu_cores: 2 # container_memory is in GB grafana_container_memory: 4 grafana_uid: 472 grafana_datasource: Dashboard grafana_dashboards_path: "/etc/grafana/dashboards/ceph-dashboard" grafana_dashboard_version: main grafana_dashboard_files: - ceph-cluster.json - cephfs-overview.json - host-details.json - hosts-overview.json - osd-device-details.json - osds-overview.json - pool-detail.json - pool-overview.json - radosgw-detail.json - radosgw-overview.json - radosgw-sync-overview.json - rbd-details.json - rbd-overview.json grafana_plugins: - vonage-status-panel - grafana-piechart-panel grafana_allow_embedding: true grafana_port: 3000 grafana_network: "{{ public_network }}" grafana_conf_overrides: {} prometheus_container_image: "docker.io/prom/prometheus:v2.7.2" prometheus_container_cpu_period: 100000 prometheus_container_cpu_cores: 2 # container_memory is in GB prometheus_container_memory: 4 prometheus_data_dir: /var/lib/prometheus prometheus_conf_dir: /etc/prometheus prometheus_user_id: '65534' # This is the UID used by the prom/prometheus container image prometheus_port: 9092 prometheus_conf_overrides: {} # Uncomment out this variable if you need to customize the retention period for prometheus storage. # set it to '30d' if you want to retain 30 days of data. # prometheus_storage_tsdb_retention_time: 15d alertmanager_container_image: "docker.io/prom/alertmanager:v0.16.2" alertmanager_container_cpu_period: 100000 alertmanager_container_cpu_cores: 2 # container_memory is in GB alertmanager_container_memory: 4 alertmanager_data_dir: /var/lib/alertmanager alertmanager_conf_dir: /etc/alertmanager alertmanager_port: 9093 alertmanager_cluster_port: 9094 alertmanager_conf_overrides: {} alertmanager_dashboard_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not dashboard_crt and not dashboard_key else false }}" no_log_on_ceph_key_tasks: true ############### # DEPRECATION # ############### ###################################################### # VARIABLES BELOW SHOULD NOT BE MODIFIED BY THE USER # # *DO NOT* MODIFY THEM # ###################################################### container_exec_cmd: docker: false ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}" |