Skip to content

Commit

Permalink
drop rgw multisite deployment support
Browse files Browse the repository at this point in the history
The current approach is extremely complex and introduced a lot
of spaghetti code. This doesn't offer a good user experience at all.

It's time to think to another approach (dedicated playbook) and drop
the current implementation in order to clean up the code.

Signed-off-by: Guillaume Abrioux <[email protected]>
  • Loading branch information
guits committed Feb 14, 2024
1 parent c58529f commit 05ba9c9
Show file tree
Hide file tree
Showing 47 changed files with 4 additions and 1,926 deletions.
556 changes: 0 additions & 556 deletions README-MULTISITE.md

This file was deleted.

37 changes: 0 additions & 37 deletions group_vars/all.yml.sample
Original file line number Diff line number Diff line change
Expand Up @@ -478,43 +478,6 @@ dummy:
#nfs_obj_gw: "{{ False if groups.get(mon_group_name, []) | length == 0 else True }}"


#############
# MULTISITE #
#############

# Changing this value allows multisite code to run
#rgw_multisite: false

# If the desired multisite configuration involves only one realm, one zone group and one zone (per cluster), then the multisite variables can be set here.
# Please see README-MULTISITE.md for more information.
#
# If multiple realms or multiple zonegroups or multiple zones need to be created on a cluster then,
# the multisite config variables should be editted in their respective zone .yaml file and realm .yaml file.
# See README-MULTISITE.md for more information.

# The following Multi-site related variables should be set by the user.
#
# rgw_zone is set to "default" to enable compression for clusters configured without rgw multi-site
# If multisite is configured, rgw_zone should not be set to "default".
#
#rgw_zone: default

#rgw_zonemaster: true
#rgw_zonesecondary: false
#rgw_zonegroup: solarsystem # should be set by the user
#rgw_zonegroupmaster: true
#rgw_zone_user: zone.user
#rgw_zone_user_display_name: "Zone User"
#rgw_realm: milkyway # should be set by the user
#rgw_multisite_proto: "http"
#system_access_key: 6kWkikvapSnHyE22P7nO # should be re-created by the user
#system_secret_key: MGecsMrWtKZgngOHZdrd6d3JxGO5CPWgT2lcnpSt # should be re-created by the user

# Multi-site remote pull URL variables
#rgw_pull_port: "{{ radosgw_frontend_port }}"
#rgw_pull_proto: "http" # should be the same as rgw_multisite_proto for the master zone cluster
#rgw_pullhost: localhost # rgw_pullhost only needs to be declared if there is a zone secondary.

###################
# CONFIG OVERRIDE #
###################
Expand Down
37 changes: 0 additions & 37 deletions group_vars/rhcs.yml.sample
Original file line number Diff line number Diff line change
Expand Up @@ -478,43 +478,6 @@ ceph_iscsi_config_dev: false
#nfs_obj_gw: "{{ False if groups.get(mon_group_name, []) | length == 0 else True }}"


#############
# MULTISITE #
#############

# Changing this value allows multisite code to run
#rgw_multisite: false

# If the desired multisite configuration involves only one realm, one zone group and one zone (per cluster), then the multisite variables can be set here.
# Please see README-MULTISITE.md for more information.
#
# If multiple realms or multiple zonegroups or multiple zones need to be created on a cluster then,
# the multisite config variables should be editted in their respective zone .yaml file and realm .yaml file.
# See README-MULTISITE.md for more information.

# The following Multi-site related variables should be set by the user.
#
# rgw_zone is set to "default" to enable compression for clusters configured without rgw multi-site
# If multisite is configured, rgw_zone should not be set to "default".
#
#rgw_zone: default

#rgw_zonemaster: true
#rgw_zonesecondary: false
#rgw_zonegroup: solarsystem # should be set by the user
#rgw_zonegroupmaster: true
#rgw_zone_user: zone.user
#rgw_zone_user_display_name: "Zone User"
#rgw_realm: milkyway # should be set by the user
#rgw_multisite_proto: "http"
#system_access_key: 6kWkikvapSnHyE22P7nO # should be re-created by the user
#system_secret_key: MGecsMrWtKZgngOHZdrd6d3JxGO5CPWgT2lcnpSt # should be re-created by the user

# Multi-site remote pull URL variables
#rgw_pull_port: "{{ radosgw_frontend_port }}"
#rgw_pull_proto: "http" # should be the same as rgw_multisite_proto for the master zone cluster
#rgw_pullhost: localhost # rgw_pullhost only needs to be declared if there is a zone secondary.

###################
# CONFIG OVERRIDE #
###################
Expand Down
34 changes: 1 addition & 33 deletions infrastructure-playbooks/cephadm-adopt.yml
Original file line number Diff line number Diff line change
Expand Up @@ -971,29 +971,14 @@
src: "{{ radosgw_frontend_ssl_certificate }}"
register: rgw_ssl_cert

- name: store ssl certificate in kv store (not multisite)
- name: store ssl certificate in kv store
command: >
{{ container_binary }} run --rm -i -v /etc/ceph:/etc/ceph:z --entrypoint=ceph {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} --cluster {{ cluster }}
config-key set rgw/cert/rgw.{{ ansible_facts['hostname'] }} -i -
args:
stdin: "{{ rgw_ssl_cert.content | b64decode }}"
stdin_add_newline: no
changed_when: false
when: not rgw_multisite | bool
delegate_to: "{{ groups[mon_group_name][0] }}"
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'

- name: store ssl certificate in kv store (multisite)
command: >
{{ container_binary }} run --rm -i -v /etc/ceph:/etc/ceph:z --entrypoint=ceph {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} --cluster {{ cluster }}
config-key set rgw/cert/rgw.{{ ansible_facts['hostname'] }}.{{ item.rgw_realm }}.{{ item.rgw_zone }}.{{ item.radosgw_frontend_port }} -i -
args:
stdin: "{{ rgw_ssl_cert.content | b64decode }}"
stdin_add_newline: no
changed_when: false
loop: "{{ rgw_instances }}"
when: rgw_multisite | bool
delegate_to: "{{ groups[mon_group_name][0] }}"
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
Expand All @@ -1015,23 +1000,6 @@
{{ '--ssl' if radosgw_frontend_ssl_certificate else '' }}
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
when: not rgw_multisite | bool
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'

- name: update the placement of radosgw multisite hosts
command: >
{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} --
ceph orch apply rgw {{ ansible_facts['hostname'] }}.{{ item.rgw_realm }}.{{ item.rgw_zone }}.{{ item.radosgw_frontend_port }}
--placement={{ ansible_facts['nodename'] }}
--realm={{ item.rgw_realm }} --zone={{ item.rgw_zone }}
{{ rgw_subnet if rgw_subnet is defined else '' }}
--port={{ item.radosgw_frontend_port }}
{{ '--ssl' if radosgw_frontend_ssl_certificate else '' }}
changed_when: false
loop: "{{ rgw_instances }}"
when: rgw_multisite | bool
delegate_to: "{{ groups[mon_group_name][0] }}"
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'

Expand Down
37 changes: 0 additions & 37 deletions roles/ceph-defaults/defaults/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -470,43 +470,6 @@ nfs_file_gw: false
nfs_obj_gw: "{{ False if groups.get(mon_group_name, []) | length == 0 else True }}"


#############
# MULTISITE #
#############

# Changing this value allows multisite code to run
rgw_multisite: false

# If the desired multisite configuration involves only one realm, one zone group and one zone (per cluster), then the multisite variables can be set here.
# Please see README-MULTISITE.md for more information.
#
# If multiple realms or multiple zonegroups or multiple zones need to be created on a cluster then,
# the multisite config variables should be editted in their respective zone .yaml file and realm .yaml file.
# See README-MULTISITE.md for more information.

# The following Multi-site related variables should be set by the user.
#
# rgw_zone is set to "default" to enable compression for clusters configured without rgw multi-site
# If multisite is configured, rgw_zone should not be set to "default".
#
rgw_zone: default

#rgw_zonemaster: true
#rgw_zonesecondary: false
#rgw_zonegroup: solarsystem # should be set by the user
#rgw_zonegroupmaster: true
#rgw_zone_user: zone.user
#rgw_zone_user_display_name: "Zone User"
#rgw_realm: milkyway # should be set by the user
#rgw_multisite_proto: "http"
#system_access_key: 6kWkikvapSnHyE22P7nO # should be re-created by the user
#system_secret_key: MGecsMrWtKZgngOHZdrd6d3JxGO5CPWgT2lcnpSt # should be re-created by the user

# Multi-site remote pull URL variables
#rgw_pull_port: "{{ radosgw_frontend_port }}"
#rgw_pull_proto: "http" # should be the same as rgw_multisite_proto for the master zone cluster
#rgw_pullhost: localhost # rgw_pullhost only needs to be declared if there is a zone secondary.

###################
# CONFIG OVERRIDE #
###################
Expand Down
47 changes: 2 additions & 45 deletions roles/ceph-facts/tasks/set_radosgw_address.yml
Original file line number Diff line number Diff line change
Expand Up @@ -63,61 +63,18 @@
run_once: true
when: ip_version == 'ipv6'

- name: rgw_instances without rgw multisite
- name: rgw_instances
when:
- ceph_dashboard_call_item is defined or
inventory_hostname in groups.get(rgw_group_name, [])
- not rgw_multisite | bool
block:
- name: reset rgw_instances (workaround)
set_fact:
rgw_instances: []

- name: set_fact rgw_instances without rgw multisite
- name: set_fact rgw_instances
set_fact:
rgw_instances: "{{ rgw_instances|default([]) | union([{'instance_name': 'rgw' + item|string, 'radosgw_address': hostvars[ceph_dashboard_call_item | default(inventory_hostname)]['_radosgw_address'], 'radosgw_frontend_port': radosgw_frontend_port|int + item|int }]) }}"
with_sequence: start=0 end={{ radosgw_num_instances|int - 1 }}
delegate_to: "{{ ceph_dashboard_call_item if ceph_dashboard_call_item is defined else inventory_hostname }}"
delegate_facts: "{{ true if ceph_dashboard_call_item is defined else false }}"

- name: set_fact is_rgw_instances_defined
set_fact:
is_rgw_instances_defined: "{{ rgw_instances is defined }}"
when:
- inventory_hostname in groups.get(rgw_group_name, [])
- rgw_multisite | bool

- name: rgw_instances with rgw multisite
when:
- ceph_dashboard_call_item is defined or
inventory_hostname in groups.get(rgw_group_name, [])
- rgw_multisite | bool
- not is_rgw_instances_defined | default(False) | bool
block:
- name: reset rgw_instances (workaround)
set_fact:
rgw_instances: []

- name: set_fact rgw_instances with rgw multisite
set_fact:
rgw_instances: "{{ rgw_instances|default([]) | union([{ 'instance_name': 'rgw' + item | string, 'radosgw_address': hostvars[ceph_dashboard_call_item | default(inventory_hostname)]['_radosgw_address'], 'radosgw_frontend_port': radosgw_frontend_port | int + item|int, 'rgw_realm': rgw_realm | string, 'rgw_zonegroup': rgw_zonegroup | string, 'rgw_zone': rgw_zone | string, 'system_access_key': system_access_key, 'system_secret_key': system_secret_key, 'rgw_zone_user': rgw_zone_user, 'rgw_zone_user_display_name': rgw_zone_user_display_name, 'endpoint': (rgw_pull_proto + '://' + rgw_pullhost + ':' + rgw_pull_port | string) if not rgw_zonemaster | bool and rgw_zonesecondary | bool else omit }]) }}"
with_sequence: start=0 end={{ radosgw_num_instances|int - 1 }}
delegate_to: "{{ ceph_dashboard_call_item if ceph_dashboard_call_item is defined else inventory_hostname }}"
delegate_facts: "{{ true if ceph_dashboard_call_item is defined else false }}"

- name: set_fact rgw_instances_host
set_fact:
rgw_instances_host: '{{ rgw_instances_host | default([]) | union([item | combine({"host": inventory_hostname})]) }}'
with_items: '{{ rgw_instances }}'
when:
- inventory_hostname in groups.get(rgw_group_name, [])
- rgw_multisite | bool

- name: set_fact rgw_instances_all
set_fact:
rgw_instances_all: '{{ rgw_instances_all | default([]) | union(hostvars[item]["rgw_instances_host"]) }}'
with_items: "{{ groups.get(rgw_group_name, []) }}"
when:
- inventory_hostname in groups.get(rgw_group_name, [])
- hostvars[item]["rgw_instances_host"] is defined
- hostvars[item]["rgw_multisite"] | default(False) | bool
28 changes: 0 additions & 28 deletions roles/ceph-handler/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -48,31 +48,3 @@
or inventory_hostname in groups.get(mds_group_name, [])
or inventory_hostname in groups.get(rgw_group_name, [])
or inventory_hostname in groups.get(rbdmirror_group_name, [])

- name: rgw multi-instances related tasks
when:
- not docker2podman | default(false) | bool
- not rolling_update | default(false) | bool
- inventory_hostname in groups.get(rgw_group_name, [])
- handler_rgw_status | bool
block:
- name: import_role ceph-config
import_role:
name: ceph-config

- name: import_role ceph-rgw
import_role:
name: ceph-rgw
tasks_from: pre_requisite.yml

- name: import_role ceph-rgw
import_role:
name: ceph-rgw
tasks_from: multisite.yml
when:
- rgw_multisite | bool
- not multisite_called_from_handler_role | default(False) | bool

- name: set_fact multisite_called_from_handler_role
set_fact:
multisite_called_from_handler_role: true
8 changes: 0 additions & 8 deletions roles/ceph-rgw/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,9 @@
- name: include_tasks start_radosgw.yml
include_tasks: start_radosgw.yml
when:
- not rgw_multisite | bool
- not containerized_deployment | bool

- name: include start_docker_rgw.yml
include_tasks: start_docker_rgw.yml
when:
- not rgw_multisite | bool
- containerized_deployment | bool

- name: include_tasks multisite/main.yml
include_tasks: multisite/main.yml
when:
- rgw_multisite | bool
- not multisite_called_from_handler_role | default(False) | bool
3 changes: 0 additions & 3 deletions roles/ceph-rgw/tasks/multisite.yml

This file was deleted.

28 changes: 0 additions & 28 deletions roles/ceph-rgw/tasks/multisite/create_zone_user.yml

This file was deleted.

Loading

0 comments on commit 05ba9c9

Please sign in to comment.