Skip to content

Commit

Permalink
Merge pull request #4424 from tintumathew10/odf_deploy
Browse files Browse the repository at this point in the history
Adding 8.0 support for ODF ceph cluster deployment
  • Loading branch information
mergify[bot] authored Feb 5, 2025
2 parents fcf5207 + 3b3fcb8 commit e094f05
Show file tree
Hide file tree
Showing 7 changed files with 650 additions and 0 deletions.
59 changes: 59 additions & 0 deletions conf/squid/integrations/7_node_ceph.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
# System Under Test environment configuration for OCS-RHCeph integration.
---
globals:
- ceph-cluster:
name: ceph

node1:
role:
- _admin
- installer
- mgr
- mon

node2:
disk-size: 20
no-of-volumes: 6
role:
- mgr
- mon
- osd

node3:
disk-size: 20
no-of-volumes: 6
role:
- mds
- mon
- osd

node4:
disk-size: 20
no-of-volumes: 6
role:
- mds
- osd

node5:
disk-size: 20
no-of-volumes: 6
role:
- mds
- osd

node6:
disk-size: 20
no-of-volumes: 6
role:
- mds
- osd

node7:
disk-size: 20
no-of-volumes: 6
role:
- osd
- rgw

node8:
role: client
39 changes: 39 additions & 0 deletions conf/squid/integrations/ocs_rgw_ssl_openstack_conf.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
globals:
- ceph-cluster:
name: ceph
node1:
role:
- _admin
- installer
- osd
- mds
- grafana
no-of-volumes: 6
disk-size: 15
node2:
role:
- osd
- mon
- mgr
- rgw
no-of-volumes: 6
disk-size: 15
node3:
role:
- mon
- mgr
- osd
- mds
no-of-volumes: 6
disk-size: 15
node4:
role:
- mon
- osd
- mgr
- rgw
no-of-volumes: 6
disk-size: 15
node5:
role:
- client
7 changes: 7 additions & 0 deletions pipeline/rhcs_deploy.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,13 @@ def argsMap = [
"suite": "suites/reef/integrations/ocs.yaml",
"platform": "rhel-9",
"rgwSecure": "suites/reef/integrations/ocs_rgw_ssl.yaml",
],
"8": [
"inventory": "conf/inventory/rhel-9.5-server-x86_64-xlarge.yaml",
"globalConf": "conf/squid/integrations/7_node_ceph.yaml",
"suite": "suites/squid/integrations/ocs.yaml",
"platform": "rhel-9",
"rgwSecure": "suites/squid/integrations/ocs_rgw_ssl.yaml",
]
]
def ciMap = [:]
Expand Down
116 changes: 116 additions & 0 deletions suites/squid/integrations/ocs.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
# This test suite deploys the required environment for OCS CI to validate with external
# Ceph cluster. Only cluster deployment is validated and the rest of validation occurs
# in OCS / ODF CI.
---
tests:

- test:
abort-on-fail: true
desc: "Install prerequisites for RHCeph cluster deployment."
module: install_prereq.py
name: "test suite setup"

- test:
abort-on-fail: true
config:
args:
mon-ip: node1
command: bootstrap
service: cephadm
desc: "Bootstrap the cluster with minimal configuration."
destroy-cluster: false
module: test_bootstrap.py
name: "Test cluster deployment using cephadm"

- test:
abort-on-fail: true
config:
args:
attach_ip_address: true
labels: apply-all-labels
command: add_hosts
service: host
desc: "Adding hosts to the cluster using labels and IP information."
destroy-cluster: false
module: test_host.py
name: "Test host add with labels and IP"

- test:
abort-on-fail: true
config:
args:
all-available-devices: true
command: apply
service: osd
desc: "Deploying OSD daemons using all-available-devices option."
destroy-cluster: false
module: test_osd.py
name: "Test apply osd with all-available-devices option"

- test:
abort-on-fail: true
config:
args:
placement:
label: rgw
command: apply
pos_args:
- rgw.1
service: rgw
desc: "Deploying RGW daemon using label placement."
destroy-cluster: false
module: test_rgw.py
name: "Test apply rgw with label"

- test:
abort-on-fail: true
config:
cephadm: true
commands:
- ceph fs volume create fsvol001 --placement='4 label:mds'
- ceph fs set fsvol001 max_mds 2
desc: "Create and configure a volume with 2 MDS limit"
destroy-cluster: false
module: exec.py
name: "Create volume with"

- test:
abort-on-fail: true
config:
args:
placement:
label: mds
command: apply
pos_args:
- fsvol001
service: mds
desc: Test deploying MDS daemons on hosts with label mds.
destroy-cluster: false
module: test_mds.py
name: Test OSD daemon deployment with all-available-devices enabled.

- test:
abort-on-fail: true
config:
command: add
id: client.1
node: node8 # new client node
install_packages:
- ceph-common
copy_admin_keyring: true
desc: Configure the ceph client
destroy-cluster: false
module: test_client.py
name: configure client

- test:
abort-on-fail: true
desc: "Check Ceph health"
module: test_cluster_health.py
name: "Get ceph cluster details."

- test:
abort-on-fail: true
desc: "Retrieve the deployed cluster information."
module: gather_cluster_info.py
name: "Get ceph cluster details."
113 changes: 113 additions & 0 deletions suites/squid/integrations/ocs_rgw_ssl.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
# This test suite deploys the required environment for OCS CI to validate with external
# Ceph cluster. Only cluster deployment is validated and the rest of validation occurs
# in OCS / ODF CI.
---
tests:

- test:
abort-on-fail: true
desc: "Install prerequisites for RHCeph cluster deployment."
module: install_prereq.py
name: "test suite setup"

- test:
abort-on-fail: true
config:
args:
mon-ip: node1
command: bootstrap
service: cephadm
desc: "Bootstrap the cluster with minimal configuration."
destroy-cluster: false
module: test_bootstrap.py
name: "Test cluster deployment using cephadm"

- test:
abort-on-fail: true
config:
args:
attach_ip_address: true
labels: apply-all-labels
command: add_hosts
service: host
desc: "Adding hosts to the cluster using labels and IP information."
destroy-cluster: false
module: test_host.py
name: "Test host add with labels and IP"

- test:
abort-on-fail: true
config:
args:
all-available-devices: true
command: apply
service: osd
desc: "Deploying OSD daemons using all-available-devices option."
destroy-cluster: false
module: test_osd.py
name: "Test apply osd with all-available-devices option"

- test:
abort-on-fail: true
config:
command: apply_spec
specs:
- service_type: rgw
service_id: rgw.ssl
placement:
nodes:
- node7
spec:
ssl: true
rgw_frontend_ssl_certificate: create-cert
desc: "Deploying RGW endpoint which is SSL terminated."
destroy-cluster: false
module: test_rgw.py
name: "Test rgw ssl endpoint using spec"

- test:
abort-on-fail: true
config:
cephadm: true
commands:
- ceph fs volume create fsvol001 --placement='4 label:mds'
- ceph fs set fsvol001 max_mds 2
desc: "Create and configure a volume with 2 MDS limit"
destroy-cluster: false
module: exec.py
name: "Create volume with"

- test:
abort-on-fail: true
config:
args:
placement:
label: mds
command: apply
pos_args:
- fsvol001
service: mds
desc: Test deploying MDS daemons on hosts with label mds.
destroy-cluster: false
module: test_mds.py
name: Test OSD daemon deployment with all-available-devices enabled.

- test:
abort-on-fail: true
config:
command: add
id: client.1
node: node8
install_packages:
- ceph-common
copy_admin_keyring: true
desc: Configure the ceph client
destroy-cluster: false
module: test_client.py
name: configure client

- test:
abort-on-fail: true
desc: "Retrieve the deployed cluster information."
module: gather_cluster_info.py
name: "Get ceph cluster details."
Loading

0 comments on commit e094f05

Please sign in to comment.