From dd6212bcf65a17e791ae3e121cb190b21d8c30ba Mon Sep 17 00:00:00 2001 From: Seena Fallah Date: Thu, 14 Mar 2024 13:36:07 +0100 Subject: [PATCH 1/3] ceph_ec_profile: add support more plugins Add support more plugins with their respective args. Signed-off-by: Seena Fallah (cherry picked from commit 80666a20dad82c6c0f8ad6a99c26bf52781bcfe3) --- library/ceph_ec_profile.py | 99 ++++++++++++++++++++------------------ module_utils/ca_common.py | 4 +- 2 files changed, 55 insertions(+), 48 deletions(-) diff --git a/library/ceph_ec_profile.py b/library/ceph_ec_profile.py index 1ac5df38dc..0b84724095 100644 --- a/library/ceph_ec_profile.py +++ b/library/ceph_ec_profile.py @@ -101,7 +101,7 @@ RETURN = '''# ''' -def get_profile(module, name, cluster='ceph', container_image=None): +def get_profile(name, cluster='ceph', container_image=None): ''' Get existing profile ''' @@ -116,16 +116,14 @@ def get_profile(module, name, cluster='ceph', container_image=None): return cmd -def create_profile(module, name, k, m, stripe_unit, crush_device_class, cluster='ceph', force=False, container_image=None): # noqa: E501 +def create_profile(name, user_profile, force, cluster='ceph', container_image=None): # noqa: E501 ''' Create a profile ''' - args = ['set', name, 'k={}'.format(k), 'm={}'.format(m)] - if stripe_unit: - args.append('stripe_unit={}'.format(stripe_unit)) - if crush_device_class: - args.append('crush-device-class={}'.format(crush_device_class)) + args = ['set', name] + for key, value in user_profile.items(): + args.append('{}={}'.format(key, value)) if force: args.append('--force') @@ -137,7 +135,7 @@ def create_profile(module, name, k, m, stripe_unit, crush_device_class, cluster= return cmd -def delete_profile(module, name, cluster='ceph', container_image=None): +def delete_profile(name, cluster='ceph', container_image=None): ''' Delete a profile ''' @@ -152,6 +150,22 @@ def delete_profile(module, name, cluster='ceph', container_image=None): return cmd +def parse_user_profile(module): + profile_keys = ['plugin', + 'k', 'm', 'd', 'l', 'c', + 'stripe_unit', 'scalar_mds', 'technique', + 'crush-root', 'crush-device-class', 'crush-failure-domain'] + + profile = {} + for key in profile_keys: + ansible_lookup_key = key.replace('-', '_') + value = module.params.get(ansible_lookup_key) + if value: + profile[key] = value + + return profile + + def run_module(): module_args = dict( cluster=dict(type='str', required=False, default='ceph'), @@ -159,9 +173,18 @@ def run_module(): state=dict(type='str', required=False, choices=['present', 'absent'], default='present'), stripe_unit=dict(type='str', required=False), - k=dict(type='str', required=False), - m=dict(type='str', required=False), - crush_device_class=dict(type='str', required=False, default=''), + plugin=dict(type='str', required=False, default='jerasure'), + k=dict(type='int', required=False), + m=dict(type='int', required=False), + d=dict(type='int', required=False), + l=dict(type='int', required=False), + c=dict(type='int', required=False), + scalar_mds=dict(type='str', required=False), + technique=dict(type='str', required=False), + crush_root=dict(type='str', required=False), + crush_failure_domain=dict(type='str', required=False), + crush_device_class=dict(type='str', required=False), + force=dict(type='bool', required=False, default=False), ) module = AnsibleModule( @@ -174,10 +197,8 @@ def run_module(): name = module.params.get('name') cluster = module.params.get('cluster') state = module.params.get('state') - stripe_unit = module.params.get('stripe_unit') - k = module.params.get('k') - m = module.params.get('m') - crush_device_class = module.params.get('crush_device_class') + force = module.params.get('force') + user_profile = parse_user_profile(module) if module.check_mode: module.exit_json( @@ -191,46 +212,32 @@ def run_module(): ) startd = datetime.datetime.now() + diff = dict(before="", after="") changed = False # will return either the image name or None container_image = is_containerized() if state == "present": - rc, cmd, out, err = exec_command(module, get_profile(module, name, cluster, container_image=container_image)) # noqa: E501 + rc, cmd, out, err = exec_command(module, get_profile(name, cluster, container_image=container_image)) # noqa: E501 + current_profile = {} if rc == 0: - # the profile already exists, let's check whether we have to - # update it current_profile = json.loads(out) - if current_profile['k'] != k or \ - current_profile['m'] != m or \ - current_profile.get('stripe_unit', stripe_unit) != stripe_unit or \ - current_profile.get('crush-device-class', crush_device_class) != crush_device_class: # noqa: E501 - rc, cmd, out, err = exec_command(module, - create_profile(module, - name, - k, - m, - stripe_unit, - crush_device_class, # noqa: E501 - cluster, - force=True, container_image=container_image)) # noqa: E501 - changed = True - else: - # the profile doesn't exist, it has to be created - rc, cmd, out, err = exec_command(module, create_profile(module, - name, - k, - m, - stripe_unit, # noqa: E501 - crush_device_class, # noqa: E501 - cluster, - container_image=container_image)) # noqa: E501 - if rc == 0: - changed = True + + changed = current_profile != user_profile + if changed: + diff['before'] = json.dumps(current_profile) + diff['after'] = json.dumps(user_profile) + rc, cmd, out, err = exec_command(module, + create_profile(name, + user_profile, + force, + cluster, + container_image=container_image), # noqa: E501 + check_rc=True) elif state == "absent": - rc, cmd, out, err = exec_command(module, delete_profile(module, name, cluster, container_image=container_image)) # noqa: E501 + rc, cmd, out, err = exec_command(module, delete_profile(name, cluster, container_image=container_image)) # noqa: E501 if not err: out = 'Profile {} removed.'.format(name) changed = True @@ -238,7 +245,7 @@ def run_module(): rc = 0 out = "Skipping, the profile {} doesn't exist".format(name) - exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) # noqa: E501 + exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed, diff=diff) # noqa: E501 def main(): diff --git a/module_utils/ca_common.py b/module_utils/ca_common.py index f478b75e59..32c0cbdbed 100644 --- a/module_utils/ca_common.py +++ b/module_utils/ca_common.py @@ -81,7 +81,7 @@ def pre_generate_cmd(cmd, container_image=None, interactive=False): return cmd -def exec_command(module, cmd, stdin=None): +def exec_command(module, cmd, stdin=None, check_rc=False): ''' Execute command(s) ''' @@ -89,7 +89,7 @@ def exec_command(module, cmd, stdin=None): binary_data = False if stdin: binary_data = True - rc, out, err = module.run_command(cmd, data=stdin, binary_data=binary_data) + rc, out, err = module.run_command(cmd, data=stdin, binary_data=binary_data, check_rc=check_rc) # noqa: E501 return rc, cmd, out, err From 1860692185fe3cfb01717ff3fcfce62df8fed966 Mon Sep 17 00:00:00 2001 From: Seena Fallah Date: Thu, 14 Mar 2024 15:49:31 +0100 Subject: [PATCH 2/3] ceph-osd: introduce ec profile creation RGW Pools can now use the existing ec profiles and rules created by ceph-osd role. Signed-off-by: Seena Fallah (cherry picked from commit bbc1ba5c05d9a4444617e1777b10e127b345afb3) --- group_vars/osds.yml.sample | 2 + roles/ceph-osd/defaults/main.yml | 2 + roles/ceph-osd/tasks/crush_rules.yml | 32 ++++++++++++-- roles/ceph-rgw/tasks/rgw_create_pools.yml | 42 +++++-------------- roles/ceph-validate/tasks/check_rgw_pools.yml | 2 + 5 files changed, 45 insertions(+), 35 deletions(-) diff --git a/group_vars/osds.yml.sample b/group_vars/osds.yml.sample index 430318d0ed..2723cd5131 100644 --- a/group_vars/osds.yml.sample +++ b/group_vars/osds.yml.sample @@ -148,6 +148,8 @@ dummy: # - "{{ crush_rule_hdd }}" # - "{{ crush_rule_ssd }}" +#ceph_ec_profiles: {} + # Caution: this will create crush roots and racks according to hostvars {{ osd_crush_location }} # and will move hosts into them which might lead to significant data movement in the cluster! # diff --git a/roles/ceph-osd/defaults/main.yml b/roles/ceph-osd/defaults/main.yml index 99019ee50e..629400f57c 100644 --- a/roles/ceph-osd/defaults/main.yml +++ b/roles/ceph-osd/defaults/main.yml @@ -140,6 +140,8 @@ crush_rules: - "{{ crush_rule_hdd }}" - "{{ crush_rule_ssd }}" +ceph_ec_profiles: {} + # Caution: this will create crush roots and racks according to hostvars {{ osd_crush_location }} # and will move hosts into them which might lead to significant data movement in the cluster! # diff --git a/roles/ceph-osd/tasks/crush_rules.yml b/roles/ceph-osd/tasks/crush_rules.yml index 303326794e..0d4164b557 100644 --- a/roles/ceph-osd/tasks/crush_rules.yml +++ b/roles/ceph-osd/tasks/crush_rules.yml @@ -10,13 +10,39 @@ - hostvars[groups[mon_group_name][0]]['create_crush_tree'] | default(create_crush_tree) | bool - osd_crush_location is defined +- name: Create configured ec profiles + ceph_ec_profile: + name: "{{ item.key }}" + cluster: "{{ cluster }}" + state: "{{ item.value.state | default('present') }}" + stripe_unit: "{{ item.value.stripe_unit | default(omit) }}" + plugin: "{{ item.value.plugin | default(omit) }}" + k: "{{ item.value.k }}" + m: "{{ item.value.m }}" + d: "{{ item.value.d | default(omit) }}" + l: "{{ item.value.l | default(omit) }}" + c: "{{ item.value.c | default(omit) }}" + scalar_mds: "{{ item.value.scalar_mds | default(omit) }}" + technique: "{{ item.value.technique | default(omit) }}" + crush_root: "{{ item.value.crush_root | default(omit) }}" + crush_failure_domain: "{{ item.value.crush_failure_domain | default(omit) }}" + crush_device_class: "{{ item.value.crush_device_class | default(omit) }}" + force: "{{ item.value.force | default(false) }}" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + loop: "{{ ceph_ec_profiles | dict2items }}" + delegate_to: '{{ groups[mon_group_name][0] }}' + run_once: true + - name: Create configured crush rules ceph_crush_rule: name: "{{ item.name }}" cluster: "{{ cluster }}" - rule_type: replicated - bucket_root: "{{ item.root }}" - bucket_type: "{{ item.type }}" + rule_type: "{{ item.rule_type | default('replicated') }}" + profile: "{{ item.ec_profile | default(omit) }}" + bucket_root: "{{ item.root | default(omit) }}" + bucket_type: "{{ item.type | default(omit) }}" device_class: "{{ item.class | default(omit) }}" environment: CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" diff --git a/roles/ceph-rgw/tasks/rgw_create_pools.yml b/roles/ceph-rgw/tasks/rgw_create_pools.yml index 3f6d1c2dd5..b529a0ad95 100644 --- a/roles/ceph-rgw/tasks/rgw_create_pools.yml +++ b/roles/ceph-rgw/tasks/rgw_create_pools.yml @@ -3,14 +3,14 @@ ceph_ec_profile: name: "{{ item.value.ec_profile }}" cluster: "{{ cluster }}" - k: "{{ item.value.ec_k }}" - m: "{{ item.value.ec_m }}" + k: "{{ item.value.ec_k | default(omit) }}" + m: "{{ item.value.ec_m | default(omit) }}" crush_device_class: "{{ item.value.ec_crush_device_class | default(omit) }}" delegate_to: "{{ groups[mon_group_name][0] }}" loop: "{{ rgw_create_pools | dict2items }}" when: - - item.value.type is defined - - item.value.type == 'ec' + - item.value.create_profile | default(true) + - item.value.type | default('') == 'ec' environment: CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" @@ -27,32 +27,10 @@ loop: "{{ rgw_create_pools | dict2items }}" delegate_to: "{{ groups[mon_group_name][0] }}" when: - - item.value.type is defined - - item.value.type == 'ec' + - item.value.create_profile | default(true) + - item.value.type | default('') == 'ec' -- name: Create ec pools for rgw - ceph_pool: - name: "{{ item.key }}" - state: present - cluster: "{{ cluster }}" - pg_num: "{{ item.value.pg_num | default(omit) }}" - pgp_num: "{{ item.value.pgp_num | default(omit) }}" - size: "{{ item.value.size | default(omit) }}" - pg_autoscale_mode: "{{ item.value.pg_autoscale_mode | default(omit) }}" - target_size_ratio: "{{ item.value.target_size_ratio | default(omit) }}" - pool_type: erasure - erasure_profile: "{{ item.value.ec_profile }}" - application: rgw - loop: "{{ rgw_create_pools | dict2items }}" - delegate_to: "{{ groups[mon_group_name][0] }}" - when: - - item.value.type is defined - - item.value.type == 'ec' - environment: - CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" - CEPH_CONTAINER_BINARY: "{{ container_binary }}" - -- name: Create replicated pools for rgw +- name: Create rgw pools ceph_pool: name: "{{ item.key }}" state: present @@ -63,12 +41,12 @@ min_size: "{{ item.value.min_size | default(omit) }}" pg_autoscale_mode: "{{ item.value.pg_autoscale_mode | default(omit) }}" target_size_ratio: "{{ item.value.target_size_ratio | default(omit) }}" - pool_type: replicated - rule_name: "{{ item.value.rule_name | default(ceph_osd_pool_default_crush_rule_name) }}" + pool_type: "{{ 'erasure' if item.value.type | default('') == 'ec' else 'replicated' }}" + erasure_profile: "{{ item.value.ec_profile | default(omit) }}" + rule_name: "{{ item.value.rule_name if item.value.rule_name is defined else item.key if item.value.type | default('') == 'ec' else ceph_osd_pool_default_crush_rule_name }}" application: rgw loop: "{{ rgw_create_pools | dict2items }}" delegate_to: "{{ groups[mon_group_name][0] }}" - when: item.value.type is not defined or item.value.type == 'replicated' environment: CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" diff --git a/roles/ceph-validate/tasks/check_rgw_pools.yml b/roles/ceph-validate/tasks/check_rgw_pools.yml index ae30eec6d1..73fbeb5c6d 100644 --- a/roles/ceph-validate/tasks/check_rgw_pools.yml +++ b/roles/ceph-validate/tasks/check_rgw_pools.yml @@ -15,6 +15,7 @@ when: - item.value.type is defined - item.value.type == 'ec' + - item.value.create_profile | default(true) - item.value.ec_k is undefined - name: Fail if ec_m is not set for ec pools @@ -24,4 +25,5 @@ when: - item.value.type is defined - item.value.type == 'ec' + - item.value.create_profile | default(true) - item.value.ec_m is undefined From 4735bf377808b5199c9bc791453d4b211c8d856f Mon Sep 17 00:00:00 2001 From: Seena Fallah Date: Sat, 16 Mar 2024 16:43:28 +0100 Subject: [PATCH 3/3] ceph_ec_profile: make int params str Ceph return them as string so in comparision it would always fail Signed-off-by: Seena Fallah (cherry picked from commit f0b4cc3f0d6c82ec6433935bfd443367422c76a5) --- library/ceph_ec_profile.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/library/ceph_ec_profile.py b/library/ceph_ec_profile.py index 0b84724095..778a7a7287 100644 --- a/library/ceph_ec_profile.py +++ b/library/ceph_ec_profile.py @@ -174,11 +174,11 @@ def run_module(): choices=['present', 'absent'], default='present'), stripe_unit=dict(type='str', required=False), plugin=dict(type='str', required=False, default='jerasure'), - k=dict(type='int', required=False), - m=dict(type='int', required=False), - d=dict(type='int', required=False), - l=dict(type='int', required=False), - c=dict(type='int', required=False), + k=dict(type='str', required=False), + m=dict(type='str', required=False), + d=dict(type='str', required=False), + l=dict(type='str', required=False), + c=dict(type='str', required=False), scalar_mds=dict(type='str', required=False), technique=dict(type='str', required=False), crush_root=dict(type='str', required=False),