-
Notifications
You must be signed in to change notification settings - Fork 16
/
internal.yml.j2
32 lines (29 loc) · 1.1 KB
/
internal.yml.j2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
{% if clean_nodes != true %}
resource_registry:
OS::TripleO::NodeUserData: /home/stack/wipe-disks.yaml
{% endif %}
parameter_defaults:
LocalCephAnsibleFetchDirectoryBackup: /tmp/fetch_dir
{% if ceph_node_count < 3 %}
CephPoolDefaultSize: {{ ceph_node_count }}
{% else %}
CephPoolDefaultSize: 3
{% endif %}
# when deploying a small number of osd's - < 12), it's necessary to decrease the default pg_num from 128 to get past the max 200pgs/per osd limitation
CephPoolDefaultPgNum: 32
CephAnsiblePlaybookVerbosity: 1
CephAnsibleDisksConfig:
{% if osp_release|int < 17 %}
devices:
{% for disk in storage_node_disks[1:] %}
- {{ disk }}
{% endfor %}
{% endif %}
# the following two parameters are the defaults. Just included them here for info
osd_scenario: {{ osd_scenario }}
osd_objectstore: {{ osd_objectstore }}
CephAnsibleExtraConfig:
osd_pool_default_autoscale_mode: on
ExtraConfig:
ceph::profile::params::osd_pool_default_pg_num: {{ osd_pool_default_pg_num | default(32) }}
ceph::profile::params::osd_pool_default_pgp_num: {{ osd_pool_default_pgp_num | default(32) }}