-
Notifications
You must be signed in to change notification settings - Fork 16
/
all.yml
322 lines (300 loc) · 12.3 KB
/
all.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
---
infrared_dir: "{{ ansible_user_dir }}/infrared"
infrared_workspaces_dir: "{{ ansible_user_dir }}/.infrared/.workspaces"
infrared_hosts_file: "{{ infrared_workspaces_dir }}/active/hosts"
instackenv_file: "{{ playbook_dir }}/instackenv.json"
undercloud_conf: "{{ ansible_user_dir }}/undercloud.conf"
log_directory: "{{ playbook_dir }}/logs"
cloud_name: cloud00
lab_name: scale
ansible_ssh_pass: password
ansible_ssh_key: "{{ ansible_user_dir }}/.ssh/id_rsa"
osp_release: 13
osp_puddle: passed_phase2
baseurl: http://download.eng.pek2.redhat.com/released/RHEL-7/7.9/Server/x86_64/os/
nic_configs: "{{ ansible_user_dir }}/virt"
controller_count: 3
# No need to set compute_count. This will be set to all remaining nodes, which is calculated in overcloud.yml
#compute_count: 1
ceph_node_count: 0
set_boot_order: true
#specify undercloud hostname and password when using scaleout.yml
#undercloud_hostname:
#undercloud_password:
alias:
#lab specific vars
lab_url: "http://quads.alias.bos.scalelab.redhat.com"
machine_types:
dell:
740xd:
rhel7_interfaces: [em2, em1, p7p1, p7p2]
rhel8_interfaces: [eno2, eno1, ens7f0, ens7f1]
rhel9_interfaces: [eno2, eno1, ens7f0, ens7f1]
r750:
rhel7_interfaces: []
rhel8_interfaces: []
rhel9_interfaces: [ens3f1, ens3f0, ens6f0, ens6f1]
supermicro:
6029r:
rhel7_interfaces: [enp95s0f1, enp95s0f0, enp94s0f0, enp94s0f1]
rhel8_interfaces: [enp95s0f1, enp95s0f0, enp94s0f0, enp94s0f1]
6048p:
rhel7_interfaces: [enp130s0f1, enp130s0f0, enp4s0f0, enp4s0f1]
rhel8_interfaces: [enp130s0f1, enp130s0f0, enp4s0f0, enp4s0f1]
scale:
# lab specific vars
lab_url: "http://quads.rdu2.scalelab.redhat.com"
machine_types:
supermicro:
1029p:
rhel7_interfaces: [enp94s0f0, enp94s0f1, enp94s0f2, enp94s0f3]
rhel8_interfaces: [ens2f0, ens2f1, ens2f2, ens2f3]
rhel9_interfaces: [ens2f0, ens2f1, ens2f2, ens2f3]
1029utrtp:
rhel7_interfaces: [enp175s0f0, enp175s0f1, enp216s0f0, enp216s0f1]
rhel8_interfaces: [ens1f0, ens1f1, ens2f0, ens2f1]
rhel9_interfaces: [ens1f0, ens1f1, ens2f0, ens2f1]
1029utn10rt:
rhel7_interfaces: [enp175s0f0, enp175s0f1, enp216s0f0, enp216s0f1]
rhel8_interfaces: [enp175s0f0, enp175s0f1, ens2f0, ens2f1]
rhel9_interfaces: [enp175s0f0, enp175s0f1, ens2f0, ens2f1]
6048r:
rhel7_interfaces: [enp4s0f0, enp4s0f1, enp131s0f0, enp131s0f1]
rhel8_interfaces: []
5039ms:
rhel7_interfaces: [enp1s0f0, enp1s0f1, enp2s0f1]
rhel8_interfaces: [enp1s0f0, enp1s0f1, enp2s0f1]
rhel9_interfaces: [enp1s0f0, enp1s0f1, enp2s0f1]
6049p:
rhel7_interfaces: [enp175s0f0, enp175s0f1, enp216s0f0, enp216s0f1]
rhel8_interfaces: [ens3f0, ens3f1, ens2f0, ens2f1]
rhel9_interfaces: [eno1, eno2, enp4s0f0, enp4s0f1]
6048p:
rhel7_interfaces: []
rhel8_interfaces: []
6018r:
rhel7_interfaces: [eno1, eno2, enp4s0f0, enp4s0f1]
rhel8_interfaces: [eno1, eno2, enp4s0f0, enp4s0f1]
dell:
r620:
rhel7_interfaces: [p2p3, p2p4, em1, em2]
rhel8_interfaces: [enp66s0f2, enp66s0f3, eno1, eno2]
r630:
rhel7_interfaces: [em1, em2, em3, em4]
rhel8_interfaces: [eno1, eno2, eno3, eno4]
rhel9_interfaces: [eno1, eno2, eno3, eno4]
r730xd:
rhel7_interfaces: [em1, em2, p4p1, p4p2]
rhel8_interfaces: [eno1, eno2, enp130s0f0, enp130s0f1]
r930:
rhel7_interfaces: [em1, em2, p1p1, p1p2]
rhel8_interfaces: []
r640:
rhel7_interfaces: [p1p1, p1p2, p2p1, p2p2]
rhel7_interfaces_f04: [p3p1, p3p2, p2p1, p2p2]
rhel8_interfaces: [ens1f0, ens1f1, ens2f0, ens2f1]
rhel8_interfaces_f04: [ens3f0, ens3f1, ens2f0, ens2f1]
rhel9_interfaces: [ens1f0, ens1f1, ens2f0np0, ens2f1np1]
r650:
rhel7_interfaces: []
rhel8_interfaces: [ens1f0, ens1f1, ens2f0, ens2f1]
rhel9_interfaces: [ens1f0, ens1f1, ens2f0np0, ens2f1np1]
fc640:
rhel8_interfaces: [eno2, ens2f0, ens2f1]
# Other lab machines have to define their interfaces through below
# 'interfaces' variable. prepare_nic_configs.yml will use that
# instead of deriving from
# scale/alias.machine_types.supermicro/dell...interfaces
# note: If you are using scale lab or alias lab, don't enable below
# 'interfaces' parameter.
interfaces:
rhel8_interfaces: [eno1]
rhel7_interfaces: [em1]
# Scale lab can provide us public external network on 4th interface
# We can use that to access overcloud VM's from outside undercloud.
# To use this feature set 'public_external_interface: true' and
# assign 4th nic to external_interface to 4th nic and
# define external_XXXXXX parameters wth CIDR provided by lab team
# Don't define external_network_vlan_id as lab team might have
# created their own vlan for this external network implementation.
#public_external_interface: false
# set this based on osp version. For example, for 1029p nodes if the
# osp version is 14 and above set it to ens2f3, otherwise to enp94s0f3.
# You can refer above table or scale lab wiki for this interface name.
#external_interface: ens2f3
# overcloud provider external network. This external network will be created
# after overcloud deployment as part of post deployment tasks.
#public_net_name: public
#extra_templates:
#heat_configs:
# Enable root user password for all nodes
# NodeRootPassword: "{{ ansible_ssh_pass }}"
# SshServerOptions:
# PasswordAuthentication: 'yes'
# PermitRootLogin: 'yes'
#if you need vlan tenant network enable the below lines
# - NeutronNetworkType=vlan
# - NeutronBridgeMappings=openshift:br-openshift,datacentre:br-ex
# - NeutronNetworkVLANRanges=openshift:500:1000,datacentre:1:500
# - ComputeParameters.NeutronBridgeMappings=openshift:br-openshift
# For non-DVR OVN deployments, the comment on the line below should be removed to boot VMs on provider networks created on the datacentre network.
# - ComputeParameters.NeutronBridgeMappings=datacentre:br-ex
#resource_registry:
# Enable root user password for all nodes
# - OS::TripleO::NodeUserData=/usr/share/openstack-tripleo-heat-templates/firstboot/userdata_root_password.yaml
# - OS::TripleO::Services::Sshd=/usr/share/openstack-tripleo-heat-templates/deployment/sshd/sshd-baremetal-puppet.yaml
# containers params
registry_mirror: redhat.com
registry_namespace: redhat
insecure_registries: redhat.com
#undercloud.conf default section
undercloud_public_host: 192.168.24.2
undercloud_admin_host: 192.168.24.3
# undercloud.conf ctlplane-subnet section config options
cidr: 192.168.24.0/24
gateway: 192.168.24.1
dhcp_start: 192.168.24.5
dhcp_end: 192.168.24.105
inspection_iprange: 192.168.24.110,192.168.24.250
# external network params for adding external network to
# undercloud to access overcloud resources
external_gateway: 172.18.0.1/16
external_network_vlan_id: 300
clean_nodes: false
#adding changes
external_net_cidr: 172.18.0.0/16
external_allocation_pools_start: 172.18.0.50
external_allocation_pools_end: 172.18.0.150
external_interface_default_route: 172.18.0.1
overcloud_external_net_alloc_start: 172.18.1.1
overcloud_external_net_alloc_end: 172.18.254.254
#internal
internal_api_net_cidr: 172.17.1.0/24
internal_api_allocation_pools_start: 172.17.1.10
internal_api_allocation_pools_end: 172.17.1.149
internal_api_network_vlan_id: 301
#storage
storage_net_cidr: 172.17.3.0/24
storage_allocation_pools_start: 172.17.3.10
storage_allocation_pools_end: 172.17.3.149
storage_network_vlan_id: 302
storage_mgmt_net_cidr: 172.17.4.0/24
storage_mgmt_allocation_pools_start: 172.17.4.10
storage_mgmt_allocation_pools_end: 172.17.4.149
storage_mgmt_network_vlan_id: 303
#storage_nfs
storage_nfs_net_cidr: 172.17.5.0/24
storage_nfs_network_vlan_id: 305
storage_nfs_allocation_pools_start: 172.17.5.10
storage_nfs_allocation_pools_end: 172.17.5.149
#tenant
tenant_net_cidr: 172.17.2.0/24
tenant_allocation_pools_start: 172.17.2.10
tenant_allocation_pools_end: 172.17.2.150
tenant_network_vlan_id: 304
#This allows the user to force re provision undercloud
#default is false - not forced
force_reprovision: false
#This enables user to have a undercloud in VM
#NOTE: now the vm is created in the ansible host, so
#do not enable this while running from your desktop
virtual_uc: false
undercloud_host: 172.16.0.2
vm_external_interface: eth0
undercloud_local_interface: eth0
virtual_uc_ctlplane_interface: em1
vm_image_url:
rhel7: https://url.corp.redhat.com/rhel-guest-image-7-6-210-x86-64-qcow2
rhel8: http://download.hosts.prod.upshift.rdu2.redhat.com/released/RHEL-8/8.1.0/BaseOS/x86_64/images/rhel-guest-image-8.1-263.x86_64.qcow2
rhel8_2: http://download.eng.pek2.redhat.com/released/RHEL-8/8.2.0/BaseOS/x86_64/images/rhel-guest-image-8.2-290.x86_64.qcow2
#Neutron backend, if not set default will be used.
#we are not supporting ML2/OVS for DVR as of now, as it needs
#modification of default roles_data.yaml
neutron_backend: ovn
#ocp installation
shift_stack: false
vlan_provider_network: false
#browbeat installation
#installs browbeat, if this variable is not defined or set true
#skips browbeat installation, only if the variable set false
browbeat: true
# define mount_nvme to mount /var/lib/nova on nvme device
# to use nvme storage for nova instances as ephemeral disk
#mount_nvme: true
# enable nvme parameters
# README has details about how to get these values
#passthrough_nvme:
# vendor_id: '144d'
# product_id: 'a804'
# address: '01:00.0'
# flavor:
# name: 'nvme'
# ram: 16384
# disk: 40
# vcpus: 4
#pci_node_count: 0
# if your machine is 1029U type you need to specify the product type also
# refer http://wiki.scalelab.redhat.com/1029u/ for list of 1029utn10rt machines
# rest are 1029utrtp machines
#pci_node_type: '1029p'
#For Nova-less provisioning enable the variable
novaless_prov: false
new_nodes_instack: "{{ playbook_dir }}/newnodes.json"
# This feature creates VMs on hypervisors and uses them as overcloud
# compute nodes to simulate overcloud scale deployment. This feature
# requires control plane network tuning as below
scale_compute_vms: false
#undercloud_public_host: 192.168.0.2
#undercloud_admin_host: 192.168.0.3
#cidr: 192.168.0.0/16
#gateway: 192.168.0.1
#dhcp_start: 192.168.0.5
#dhcp_end: 192.168.10.254
#inspection_iprange: 192.168.11.1,192.168.20.254
# Enables composable_roles
# If lab_name not in [scale, alias] set undercloud_local_interface.
#Specify the controller ifaces for composable roles explicitly in
#case you need specific machines as controllers. Else default controller
#machine type is the first node in overcloud_instackenv.json
composable_roles: false
# if your machine is 1029U type you need to specify the product type also
# refer http://wiki.scalelab.redhat.com/1029u/ for list of 1029utn10rt machines
# rest are 1029utrtp machines
#controller_machine_type: "1029p"
#Ceph deployment params
# if your machine is 1029U type you need to specify the product type also
# refer http://wiki.scalelab.redhat.com/1029u/ for list of 1029utn10rt machines
# rest are 1029utrtp machines
ceph_machine_type: ''
ceph_enabled: false
osd_scenario: lvm
osd_objectstore: bluestore
#Note:By default storage_node_disks can be detected automatically using
#introspection data when ceph machine type count is equal to total
#no.of nodes of that machine type. In case of 1029u or if user wants
#to specify the storage_node_disks. The first element is always root
#disk partition followed by the storage_node_disks user wants to use.
#storage_node_disks: ['/dev/sda', '/dev/<storage_node_disk>']
#Note: By default the number of osds_per_device is set to 1. It can
#be changed to another value through this parameter for OSP17.
osds_per_device: 1
#osd_pool_default_pg_num:
#osd_pool_default_pgp_num:
manila_enabled: false
#Default value for introspection timeout is 2400s, but user can configure
#the timeout for large cloud
#introspection_timeout:
#overcloud_timeout:
foreman_url: https://foreman.example.com
# DVR is enabled by default for osp_releases >= 16, set this to false for non-dvr based deployments.
# This variable has no affect on osp_releases < 16, as jetpack doesn't support dvr for osp_releases < 16 as of now.
dvr_enabled: true
# TLS-Everywhere has been tested on OSP 17.
tls_everywhere: false
# vars for TLS-Everywhere deployment
idm_host_fqdn:
ipa_domain: redhat.local
ipa_realm: REDHAT.LOCAL
ipa_password: redhatidm
# Barbican has been tested on OSP 17
barbican_enabled: false