diff --git a/README.md b/README.md index 1af2f405..75c1fccc 100644 --- a/README.md +++ b/README.md @@ -676,41 +676,6 @@ This variable defines resource clones. The items are as follows: You may take a look at [an example](#creating-a-cluster-with-fencing-and-several-resources). -#### `ha_cluster_resource_master_slave_clones` - -**[Master Slave clones are deprecated]( - https://crmsh.github.io/man-4.3/#cmdhelp_configure_ms), but they are -supported for crmsh and SUSE servers.** - -structure, default: no master slave resource clones - -```yaml -ha_cluster_resource_master_slave_clones: - - resource_id: resource-to-be-cloned - id: custom-clone-id - meta_attrs: - - attrs: - - name: clone_meta_attribute1_name - value: clone_meta_attribute1_value - - name: clone_meta_attribute2_name - value: clone_meta_attribute2_value -``` - -This variable defines master slave resource clones. The items are as follows: - -* `resource_id` (mandatory) - Resource to be cloned. The resource must be - defined in - [`ha_cluster_resource_primitives`](#ha_cluster_resource_primitives) or - [`ha_cluster_resource_groups`](#ha_cluster_resource_groups). -* `id` (optional) - Custom ID of the clone. If no ID is specified, it will be - generated. Warning will be emitted if this option is not supported by the - cluster. -* `meta_attrs` (optional) - List of sets of the clone's meta attributes. - Currently, only one set is supported. - -You may take a look at -[an example](#creating-a-cluster-with-fencing-and-several-resources). - #### `ha_cluster_resource_bundles` structure, default: no bundle resources @@ -1700,15 +1665,6 @@ SBD stonith resource. value: '1' - resource_id: cloned-group promotable: true - ha_cluster_resource_master_slave_clones: - - resource_id: resource-to-be-cloned - id: custom-clone-id - meta_attrs: - - attrs: - - name: clone-max - value: '2' - - name: clone-node-max - value: '1' ha_cluster_resource_bundles: - id: bundle-with-resource resource-id: bundled-resource diff --git a/defaults/main.yml b/defaults/main.yml index bba1bf94..bc90201c 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -52,7 +52,6 @@ ha_cluster_resource_operation_defaults: {} ha_cluster_resource_primitives: [] ha_cluster_resource_groups: [] ha_cluster_resource_clones: [] -ha_cluster_resource_master_slave_clones: [] ha_cluster_resource_bundles: [] ha_cluster_stonith_levels: [] diff --git a/tasks/shell_crmsh/create-and-push-cib.yml b/tasks/shell_crmsh/create-and-push-cib.yml index b6c7476d..08ca18d3 100644 --- a/tasks/shell_crmsh/create-and-push-cib.yml +++ b/tasks/shell_crmsh/create-and-push-cib.yml @@ -31,8 +31,8 @@ # status changes, resulting in shadow CIB outdated and unable to patch. # Sleep is implemented to ensure that cluster have enough time to freeze # to ensure CIB export consistency. -# Meta-attrs is-managed will conflict with maintenance mode. Option n -# will skip their deletion. +# Meta-attrs is-managed will conflict with maintenance mode as well as +# individual resource maintenance attributes. Expect will skip their deletion. - name: Put cluster in maintenance mode to freeze cib changes ansible.builtin.expect: command: crm configure property maintenance-mode=true @@ -142,13 +142,6 @@ resource_clone: "{{ item }}" loop: "{{ ha_cluster_resource_clones }}" - ## Added support for Master Slave clone resources - - name: Configure cluster master slave clone resources - ansible.builtin.include_tasks: crm-cib-resource-master-slave.yml - vars: - resource: "{{ item }}" - loop: "{{ ha_cluster_resource_master_slave_clones }}" - ## Stonith levels - fencing_topology - name: Configure stonith levels - fencing_topology ansible.builtin.include_tasks: crm-cib-stonith-level.yml @@ -275,8 +268,8 @@ when: __ha_cluster_cib_diff.rc == 1 run_once: true # noqa: run_once[task] -# Meta-attrs is-managed will conflict with maintenance mode. Option n -# will skip their deletion. +# Meta-attrs is-managed will conflict with maintenance mode as well as +# individual resource maintenance attributes. Expect will skip their deletion. - name: Disable maintenance mode ansible.builtin.expect: command: crm configure property maintenance-mode=false diff --git a/tasks/shell_crmsh/crm-cib-constraint-colocation.yml b/tasks/shell_crmsh/crm-cib-constraint-colocation.yml index cd4d76a9..bd3c4270 100644 --- a/tasks/shell_crmsh/crm-cib-constraint-colocation.yml +++ b/tasks/shell_crmsh/crm-cib-constraint-colocation.yml @@ -3,7 +3,7 @@ # Verify if constraint.resource_leader.id exists - name: Verify resource_leader presence {{ constraint.resource_leader.id }} ansible.builtin.command: - cmd: | + cmd: >- crm -c {{ __ha_cluster_crm_shadow }} configure show {{ constraint.resource_leader.id }} register: __ha_cluster_constraint_resource_leader @@ -14,7 +14,7 @@ # Verify if constraint.resourceresource_follower_leader.id exists - name: Verify resource_follower presence {{ constraint.resource_follower.id }} ansible.builtin.command: - cmd: | + cmd: >- crm -c {{ __ha_cluster_crm_shadow }} configure show {{ constraint.resource_follower.id }} register: __ha_cluster_constraint_resource_follower @@ -32,7 +32,7 @@ # Verify if Shadow CIB already contains same constraint id. - name: Verify colocation constraint presence {{ __ha_cluster_constraint_id }} ansible.builtin.command: - cmd: | + cmd: >- crm -c {{ __ha_cluster_crm_shadow }} configure show {{ __ha_cluster_constraint_id }} register: __ha_cluster_constraint_status @@ -42,16 +42,17 @@ # Delete constraint id in Shadow CIB to avoid errors during cibadmin patch. - name: Delete present colocation constraint {{ __ha_cluster_constraint_id }} ansible.builtin.command: - cmd: | + cmd: >- crm --force -c {{ __ha_cluster_crm_shadow }} configure delete {{ __ha_cluster_constraint_id }} when: __ha_cluster_constraint_status.rc == 0 check_mode: false changed_when: not ansible_check_mode +# Expect module is used to combat crmsh freezing when asking for user prompt - name: Configure colocation constraint {{ __ha_cluster_constraint_id }} - ansible.builtin.command: - cmd: | + ansible.builtin.expect: + command: | crm -c {{ __ha_cluster_crm_shadow }} configure colocation {{ __ha_cluster_constraint_id }} {% for option in constraint.options | d([]) if option.name == 'score' %} @@ -76,5 +77,16 @@ {% for option in constraint.options | d([]) if option.name != 'score' %} {{ option.name | quote }}={{ option.value | quote }} {% endfor %} + responses: + ".*Do you still want to commit.*": "n" check_mode: false changed_when: not ansible_check_mode + ignore_errors: true + register: __ha_cluster_crmsh_output + +- name: Display crm command error details + ansible.builtin.fail: + msg: "{{ __ha_cluster_crmsh_output.stdout_lines }}" + when: + - __ha_cluster_crmsh_output is defined + - __ha_cluster_crmsh_output.rc != 0 diff --git a/tasks/shell_crmsh/crm-cib-constraint-location.yml b/tasks/shell_crmsh/crm-cib-constraint-location.yml index 7dabd182..f0bb3386 100644 --- a/tasks/shell_crmsh/crm-cib-constraint-location.yml +++ b/tasks/shell_crmsh/crm-cib-constraint-location.yml @@ -3,7 +3,7 @@ # Verify if constraint.resource.id exists - name: Verify resource presence {{ constraint.resource.id }} ansible.builtin.command: - cmd: | + cmd: >- crm -c {{ __ha_cluster_crm_shadow }} configure show {{ constraint.resource.id }} register: __ha_cluster_constraint_resource @@ -25,7 +25,7 @@ # Verify if Shadow CIB already contains same constraint id. - name: Verify location constraint presence {{ __ha_cluster_constraint_id }} ansible.builtin.command: - cmd: | + cmd: >- crm -c {{ __ha_cluster_crm_shadow }} configure show {{ __ha_cluster_constraint_id }} register: __ha_cluster_constraint_status @@ -35,16 +35,17 @@ # Delete constraint id in Shadow CIB to avoid errors during cibadmin patch. - name: Delete present location constraint {{ __ha_cluster_constraint_id }} ansible.builtin.command: - cmd: | + cmd: >- crm --force -c {{ __ha_cluster_crm_shadow }} configure delete {{ __ha_cluster_constraint_id }} when: __ha_cluster_constraint_status.rc == 0 check_mode: false changed_when: not ansible_check_mode +# Expect module is used to combat crmsh freezing when asking for user prompt - name: Configure location constraint {{ __ha_cluster_constraint_id }} - ansible.builtin.command: - cmd: | + ansible.builtin.expect: + command: | crm -c {{ __ha_cluster_crm_shadow }} configure location {{ __ha_cluster_constraint_id }} {% if constraint.resource.pattern | d() %} @@ -72,5 +73,16 @@ {% for option in constraint.options | d([]) if option.name != 'score' %} {{ option.name | quote }}={{ option.value | quote }} {% endfor %} + responses: + ".*Do you still want to commit.*": "n" check_mode: false changed_when: not ansible_check_mode + ignore_errors: true + register: __ha_cluster_crmsh_output + +- name: Display crm command error details + ansible.builtin.fail: + msg: "{{ __ha_cluster_crmsh_output.stdout_lines }}" + when: + - __ha_cluster_crmsh_output is defined + - __ha_cluster_crmsh_output.rc != 0 diff --git a/tasks/shell_crmsh/crm-cib-constraint-order.yml b/tasks/shell_crmsh/crm-cib-constraint-order.yml index 061bf408..1f40fd3c 100644 --- a/tasks/shell_crmsh/crm-cib-constraint-order.yml +++ b/tasks/shell_crmsh/crm-cib-constraint-order.yml @@ -14,7 +14,7 @@ # Verify if constraint.resource_then.id exists - name: Verify resource_then presence {{ constraint.resource_then.id }} ansible.builtin.command: - cmd: | + cmd: >- crm -c {{ __ha_cluster_crm_shadow }} configure show {{ constraint.resource_then.id }} register: __ha_cluster_constraint_resource_then @@ -31,7 +31,7 @@ # Verify if Shadow CIB already contains same constraint id. - name: Verify order constraint presence {{ __ha_cluster_constraint_id }} ansible.builtin.command: - cmd: | + cmd: >- crm -c {{ __ha_cluster_crm_shadow }} configure show {{ __ha_cluster_constraint_id }} register: __ha_cluster_constraint_status @@ -41,16 +41,17 @@ # Delete constraint id in Shadow CIB to avoid errors during cibadmin patch. - name: Delete present order constraint {{ __ha_cluster_constraint_id }} ansible.builtin.command: - cmd: | + cmd: >- crm --force -c {{ __ha_cluster_crm_shadow }} configure delete {{ __ha_cluster_constraint_id }} when: __ha_cluster_constraint_status.rc == 0 check_mode: false changed_when: not ansible_check_mode +# Expect module is used to combat crmsh freezing when asking for user prompt - name: Configure order constraint {{ __ha_cluster_constraint_id }} - ansible.builtin.command: - cmd: | + ansible.builtin.expect: + command: | crm -c {{ __ha_cluster_crm_shadow }} configure order {{ __ha_cluster_constraint_id | quote }} {% for option in constraint.options | d([]) if option.name == 'kind' %} @@ -66,5 +67,16 @@ if option.name != 'score' and option.name != 'kind'%} {{ option.name | quote }}={{ option.value | quote }} {% endfor %} + responses: + ".*Do you still want to commit.*": "n" check_mode: false changed_when: not ansible_check_mode + ignore_errors: true + register: __ha_cluster_crmsh_output + +- name: Display crm command error details + ansible.builtin.fail: + msg: "{{ __ha_cluster_crmsh_output.stdout_lines }}" + when: + - __ha_cluster_crmsh_output is defined + - __ha_cluster_crmsh_output.rc != 0 diff --git a/tasks/shell_crmsh/crm-cib-constraint-set.yml b/tasks/shell_crmsh/crm-cib-constraint-set.yml index f765afed..1c325f7c 100644 --- a/tasks/shell_crmsh/crm-cib-constraint-set.yml +++ b/tasks/shell_crmsh/crm-cib-constraint-set.yml @@ -3,13 +3,13 @@ # Verify if resources in sets exist - name: Verify set resources presence ansible.builtin.command: - cmd: | + cmd: >- crm -c {{ __ha_cluster_crm_shadow }} configure show {% for set in constraint.resource_sets %} - {% for resource in set.resource_ids %} - {{ resource | quote }} - {% endfor %} - {% endfor %} + {% for resource in set.resource_ids %} + {{ resource | quote }} + {% endfor %} + {% endfor %} register: __ha_cluster_constraint_resource_set changed_when: false failed_when: @@ -25,7 +25,7 @@ # Verify if Shadow CIB already contains same constraint id. - name: Verify constraint set presence {{ __ha_cluster_constraint_id }} ansible.builtin.command: - cmd: | + cmd: >- crm -c {{ __ha_cluster_crm_shadow }} configure show {{ __ha_cluster_constraint_id }} register: __ha_cluster_resource_status @@ -35,17 +35,18 @@ # Delete constraint id in Shadow CIB to avoid errors during cibadmin patch. - name: Delete present constraint set {{ __ha_cluster_constraint_id }} ansible.builtin.command: - cmd: | + cmd: >- crm --force -c {{ __ha_cluster_crm_shadow }} configure delete {{ __ha_cluster_constraint_id }} when: __ha_cluster_resource_status.rc == 0 check_mode: false changed_when: not ansible_check_mode +# Expect module is used to combat crmsh freezing when asking for user prompt - name: Configure {{ constraint_type }} constraint set '{{ __ha_cluster_constraint_id }}' # noqa name[template] - ansible.builtin.command: - cmd: | + ansible.builtin.expect: + command: | crm -c {{ __ha_cluster_crm_shadow }} configure {{ constraint_type if constraint_type != 'ticket' else 'rsc_ticket' }} {{ __ha_cluster_constraint_id }} @@ -81,5 +82,16 @@ {% for option in constraint.options | d([]) %} {{ option.name | quote }}={{ option.value | quote }} {% endfor %} + responses: + ".*Do you still want to commit.*": "n" check_mode: false changed_when: not ansible_check_mode + ignore_errors: true + register: __ha_cluster_crmsh_output + +- name: Display crm command error details + ansible.builtin.fail: + msg: "{{ __ha_cluster_crmsh_output.stdout_lines }}" + when: + - __ha_cluster_crmsh_output is defined + - __ha_cluster_crmsh_output.rc != 0 diff --git a/tasks/shell_crmsh/crm-cib-constraint-ticket.yml b/tasks/shell_crmsh/crm-cib-constraint-ticket.yml index 045ad397..29171215 100644 --- a/tasks/shell_crmsh/crm-cib-constraint-ticket.yml +++ b/tasks/shell_crmsh/crm-cib-constraint-ticket.yml @@ -3,7 +3,7 @@ # Verify if constraint.resource.id exists - name: Verify resource presence {{ constraint.resource.id }} ansible.builtin.command: - cmd: | + cmd: >- crm -c {{ __ha_cluster_crm_shadow }} configure show {{ constraint.resource.id }} register: __ha_cluster_constraint_resource @@ -21,7 +21,7 @@ # Verify if Shadow CIB already contains same constraint id. - name: Verify ticket constraint presence {{ __ha_cluster_constraint_id }} ansible.builtin.command: - cmd: | + cmd: >- crm -c {{ __ha_cluster_crm_shadow }} configure show {{ __ha_cluster_constraint_id }} register: __ha_cluster_constraint_status @@ -31,7 +31,7 @@ # Delete constraint id in Shadow CIB to avoid errors during cibadmin patch. - name: Delete present ticket constraint {{ __ha_cluster_constraint_id }} ansible.builtin.command: - cmd: | + cmd: >- crm --force -c {{ __ha_cluster_crm_shadow }} configure delete {{ __ha_cluster_constraint_id }} when: __ha_cluster_constraint_status.rc == 0 @@ -54,5 +54,7 @@ {% for option in constraint.options | d([]) %} {{ option.name | quote }}={{ option.value | quote }} {% endfor %} + # crm can get stuck if it encounters error and expects prompt response. + timeout: 60 check_mode: false changed_when: not ansible_check_mode diff --git a/tasks/shell_crmsh/crm-cib-resource-clone.yml b/tasks/shell_crmsh/crm-cib-resource-clone.yml index c2049b3e..2274e651 100644 --- a/tasks/shell_crmsh/crm-cib-resource-clone.yml +++ b/tasks/shell_crmsh/crm-cib-resource-clone.yml @@ -3,14 +3,14 @@ # Verify if resource_clone exists - name: Verify resource_clone presence {{ resource_clone.resource_id }} ansible.builtin.command: - cmd: | + cmd: >- crm -c {{ __ha_cluster_crm_shadow }} configure show {{ resource_clone.resource_id }} register: __ha_cluster_constraint_resource changed_when: false failed_when: - "'does not exist' in __ha_cluster_constraint_resource.stderr" - + - name: Define resouce clone resource_clone.id '{{ resource_clone.id | d('cln_' + resource_clone.resource_id) }}' ansible.builtin.set_fact: @@ -21,7 +21,7 @@ # Verify if Shadow CIB already contains same resource id. - name: Verify resouce clone presence {{ __ha_cluster_resource_id }} ansible.builtin.command: - cmd: | + cmd: >- crm -c {{ __ha_cluster_crm_shadow }} configure show {{ __ha_cluster_resource_id }} register: __ha_cluster_resource_status @@ -31,18 +31,23 @@ # Delete resource id in Shadow CIB to avoid errors during cibadmin patch. - name: Delete present resouce clone {{ __ha_cluster_resource_id }} ansible.builtin.command: - cmd: | + cmd: >- crm --force -c {{ __ha_cluster_crm_shadow }} configure delete {{ __ha_cluster_resource_id }} when: __ha_cluster_resource_status.rc == 0 check_mode: false changed_when: not ansible_check_mode +# Clone is default resource type, unless attribute ms: true is specified +# Pacemaker 2.0 deprecated use of ms, but it is still valid and supported +# solution for SAP Hana clusters on SUSE using SAPHanaSR. - name: Configure resource clone {{ __ha_cluster_resource_id }} ansible.builtin.command: cmd: | crm -c {{ __ha_cluster_crm_shadow }} - configure clone {{ __ha_cluster_resource_id }} + configure {% if resource_clone.ms is defined and + resource_clone.ms %} ms {% else %} clone {% endif %} + {{ __ha_cluster_resource_id }} {{ resource_clone.resource_id | quote }} \ {% if resource_clone.meta_attrs[0].attrs | default(False) %} meta @@ -50,5 +55,7 @@ {{ attr.name | quote }}={{ attr.value | quote }} {% endfor %} {% endif %} + # crm can get stuck if it encounters error and expects prompt response. + timeout: 60 check_mode: false changed_when: not ansible_check_mode diff --git a/tasks/shell_crmsh/crm-cib-resource-group.yml b/tasks/shell_crmsh/crm-cib-resource-group.yml index d384a21e..15a9fb9b 100644 --- a/tasks/shell_crmsh/crm-cib-resource-group.yml +++ b/tasks/shell_crmsh/crm-cib-resource-group.yml @@ -3,7 +3,7 @@ # Verify if resource_group.resource_ids exists - name: Verify resource_group.resource_ids presence ansible.builtin.command: - cmd: | + cmd: >- crm -c {{ __ha_cluster_crm_shadow }} configure show {{ resource }} register: __ha_cluster_constraint_resources @@ -16,7 +16,7 @@ # Verify if Shadow CIB already contains same resource id. - name: Verify resource group presence {{ resource_group.id }} ansible.builtin.command: - cmd: | + cmd: >- crm -c {{ __ha_cluster_crm_shadow }} configure show {{ resource_group.id }} register: __ha_cluster_resource_status @@ -26,7 +26,7 @@ # Delete resource id in Shadow CIB to avoid errors during cibadmin patch. - name: Delete present resource group {{ resource_group.id }} ansible.builtin.command: - cmd: | + cmd: >- crm --force -c {{ __ha_cluster_crm_shadow }} configure delete {{ resource_group.id }} when: __ha_cluster_resource_status.rc == 0 @@ -47,5 +47,7 @@ {{ attr.name | quote }}={{ attr.value | quote }} {% endfor %} {% endif %} + # crm can get stuck if it encounters error and expects prompt response. + timeout: 60 check_mode: false changed_when: not ansible_check_mode diff --git a/tasks/shell_crmsh/crm-cib-resource-master-slave.yml b/tasks/shell_crmsh/crm-cib-resource-master-slave.yml deleted file mode 100644 index a0158a98..00000000 --- a/tasks/shell_crmsh/crm-cib-resource-master-slave.yml +++ /dev/null @@ -1,54 +0,0 @@ -# SPDX-License-Identifier: MIT ---- -# Verify if resource.resource_id exists -- name: Verify resource presence {{ resource.resource_id }} - ansible.builtin.command: - cmd: | - crm -c {{ __ha_cluster_crm_shadow }} - configure show {{ resource.resource_id }} - register: __ha_cluster_constraint_resource - changed_when: false - failed_when: - - "'does not exist' in __ha_cluster_constraint_resource.stderr" - -- name: Define master slave msl_ resource id '{{ - resource.id | d('msl_' + resource.resource_id) }}' - ansible.builtin.set_fact: - __ha_cluster_resource_id: - "{{ resource.id if resource.id is defined - else 'msl_' + resource.resource_id | quote }}" - -# Verify if Shadow CIB already contains same resource id. -- name: Verify master slave clone presence {{ __ha_cluster_resource_id }} - ansible.builtin.command: - cmd: | - crm -c {{ __ha_cluster_crm_shadow }} - configure show {{ __ha_cluster_resource_id }} - register: __ha_cluster_resource_status - changed_when: false - failed_when: false - -# Delete resource id in Shadow CIB to avoid errors during cibadmin patch. -- name: Delete present master slave clone {{ __ha_cluster_resource_id }} - ansible.builtin.command: - cmd: | - crm --force -c {{ __ha_cluster_crm_shadow }} - configure delete {{ __ha_cluster_resource_id }} - when: __ha_cluster_resource_status.rc == 0 - check_mode: false - changed_when: not ansible_check_mode - -- name: Configure master slave clone {{ __ha_cluster_resource_id }} - ansible.builtin.command: - cmd: | - crm -c {{ __ha_cluster_crm_shadow }} - configure ms {{ __ha_cluster_resource_id }} - {{ resource.resource_id | quote }} \ - {% if resource.meta_attrs[0].attrs | default(False) %} - meta - {% for attr in resource.meta_attrs[0].attrs -%} - {{ attr.name | quote }}={{ attr.value | quote }} - {% endfor %} - {% endif %} - check_mode: false - changed_when: not ansible_check_mode diff --git a/tasks/shell_crmsh/crm-cib-resource-primitive.yml b/tasks/shell_crmsh/crm-cib-resource-primitive.yml index 43f2c05e..31738eea 100644 --- a/tasks/shell_crmsh/crm-cib-resource-primitive.yml +++ b/tasks/shell_crmsh/crm-cib-resource-primitive.yml @@ -11,7 +11,7 @@ # Delete resource id in Shadow CIB to avoid errors during cibadmin patch. - name: Delete present resource primitive {{ resource.id }} ansible.builtin.command: - cmd: | + cmd: >- crm --force -c {{ __ha_cluster_crm_shadow }} configure delete {{ resource.id }} when: __ha_cluster_resource_status.rc == 0 @@ -49,5 +49,7 @@ {% endif %} {% endfor %} {% endif %} + # crm can get stuck if it encounters error and expects prompt response. + timeout: 60 check_mode: false changed_when: not ansible_check_mode diff --git a/vars/Suse.yml b/vars/Suse.yml index b57312f8..5bf3b5fe 100644 --- a/vars/Suse.yml +++ b/vars/Suse.yml @@ -50,4 +50,5 @@ ha_cluster_manage_selinux: false # List of allowed role inputs to limit crmsh issues __ha_cluster_crmsh_roles: - ['promoted', 'unpromoted', 'master', 'slave', 'started', 'stopped'] + ['promoted', 'unpromoted', + 'master', 'slave', 'started', 'stopped'] # wokeignore:rule=master,slave