-
Notifications
You must be signed in to change notification settings - Fork 207
/
Copy pathvalues-large.yaml
636 lines (575 loc) · 30.1 KB
/
values-large.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
---
global:
# This values.yaml file is an example. For more information about
# each configuration option, see the project readme.
# Enter your Kubernetes provider.
provider: "YOUR_KUBERNETES_PROVIDER"
# Enter a name for the deployment if using multi-tenant services such as the Search and Reporting Service.
customerDeploymentId:
deployment:
# The name specified will be used to prefix all of the Pega pods (replacing "pega" with something like "app1-dev").
name: "pega"
# Deploy Pega nodes
actions:
execute: "deploy"
# Add custom certificates to be mounted to container
# to support custom certificates as plain text (less secure), pass them directly using the certificates parameter;
# to support multiple custom certificates as external secrets, specify each of your external secrets
# as an array of comma-separated strings using the certificatesSecrets parameter.
certificatesSecrets: []
certificates: {}
# Add krb5.conf file content here.
# Feature is used for Decisioning data flows to fetch data from Kafka or HBase streams
kerberos: {}
# Set to true to comply with NIST SP 800-53 and NIST SP 800-131.
highlySecureCryptoModeEnabled: false
# If a storage class to be passed to the VolumeClaimTemplates in search and stream pods, it can be specified here:
storageClassName: ""
# Provide JDBC connection information to the Pega relational database
# If you are installing or upgrading on IBM DB2, update the udb.conf file in the /charts/pega/charts/installer/config/udb directory with any additional connection properties.
jdbc:
# url Valid values are:
#
# Oracle jdbc:oracle:thin:@//localhost:1521/dbName
# IBM DB/2 z / OS jdbc:db2://localhost:50000/dbName
# IBM DB/2 jdbc:db2://localhost:50000/dbName:fullyMaterializeLobData=true;fullyMaterializeInputStreams=true;
# progressiveStreaming=2;useJDBC4ColumnNameAndLabelSemantics=2;
# SQL Server jdbc:sqlserver://localhost:1433;databaseName=dbName;selectMethod=cursor;sendStringParametersAsUnicode=false
# PostgreSQL jdbc:postgresql://localhost:5432/dbName
url: "YOUR_JDBC_URL"
# driverClass -- jdbc class. Valid values are:
#
# Oracle oracle.jdbc.OracleDriver
# IBM DB/2 com.ibm.db2.jcc.DB2Driver
# SQL Server com.microsoft.sqlserver.jdbc.SQLServerDriver
# PostgreSQL org.postgresql.Driver
driverClass: "YOUR_JDBC_DRIVER_CLASS"
# pega.database.type Valid values are: mssql, oracledate, udb, db2zos, postgres
dbType: "YOUR_DATABASE_TYPE"
# For databases that use multiple JDBC driver files (such as DB2), specify comma separated values for 'driverUri'
driverUri: "YOUR_JDBC_DRIVER_URI"
username: "YOUR_JDBC_USERNAME"
password: "YOUR_JDBC_PASSWORD"
# To avoid exposing username & password, leave the jdbc.password & jdbc.username parameters empty (no quotes),
# configure JDBC username & password parameters in the External Secrets Manager, and enter the external secret for the credentials
# make sure the keys in the secret should be DB_USERNAME and DB_PASSWORD respectively
external_secret_name: ""
# CUSTOM CONNECTION PROPERTIES
# Use the connectionProperties parameter to pass connection settings to your deployment
# by adding a list of semi-colon-delimited required connection setting. The list string must end with ";".
# For example, you can set a custom authentication using Azure Managed Identity and avoid using a password.
# To pass an Authentication method and a managed identity, MSI Client ID,
# set: connectionProperties: "Authentication=ActiveDirectoryMSI;msiClientId=<your Azure Managed Identity>;"
connectionProperties: ""
rulesSchema: "YOUR_RULES_SCHEMA"
dataSchema: "YOUR_DATA_SCHEMA"
customerDataSchema: ""
customArtifactory:
# If you use a secured custom artifactory to manager your JDBC driver,
# provide the authentication details below by filling in the appropriate authentication section,
# either basic or apiKey.
authentication:
# Provide the basic authentication credentials or the API key authentication details to satisfy your custom artifactory authentication mechanism.
basic:
username: ""
password: ""
apiKey:
headerName: ""
value: ""
# To avoid exposing basic.username,basic.password,apiKey.headerName,apiKey.value parameters, configure the
# basic.username,basic.password,apiKey.headerName,apiKey.value parameters in External Secrets Manager, and enter the external secret for the credentials
# make sure the keys in the secret should be CUSTOM_ARTIFACTORY_USERNAME , CUSTOM_ARTIFACTORY_PASSWORD , CUSTOM_ARTIFACTORY_APIKEY_HEADER , CUSTOM_ARTIFACTORY_APIKEY
external_secret_name: ""
# Leave customArtifactory.enableSSLVerification enabled to ensure secure access to your custom artifactory;
# when customArtifactory.enableSSLVerification is false, SSL verification is skipped and establishes an insecure connection.
enableSSLVerification: true
# Provide a required domain certificate for your custom artifactory; if none is required, leave this field blank.
certificate:
docker:
# If using a custom Docker registry, supply the credentials here to pull Docker images.
registry:
url: "YOUR_DOCKER_REGISTRY"
username: "YOUR_DOCKER_REGISTRY_USERNAME"
password: "YOUR_DOCKER_REGISTRY_PASSWORD"
# To avoid exposing Docker registry details, create secrets to manage your Docker registry credentials.
# Specify secret names as an array of comma-separated strings in double quotation marks using the imagePullSecretNames parameter. For example: ["secret1", "secret2"]
imagePullSecretNames: []
# Docker image information for the Pega docker image, containing the application server.
pega:
image: "pegasystems/pega"
utilityImages:
busybox:
image: busybox:1.31.0
imagePullPolicy: IfNotPresent
k8s_wait_for:
image: pegasystems/k8s-wait-for
imagePullPolicy: "IfNotPresent"
# waitTimeSeconds: 2
# maxRetries: 1
# Upgrade specific properties
upgrade:
# Configure only for aks/pks
# Run "kubectl cluster-info" command to get the service host and https service port of kubernetes api server.
# Example - Kubernetes master is running at https://<service_host>:<https_service_port>
kube-apiserver:
serviceHost: "API_SERVICE_ADDRESS"
httpsServicePort: "SERVICE_PORT_HTTPS"
# Set the `compressedConfigurations` parameter to `true` when the configuration files under charts/pega/config/deploy are in compressed format.
# For more information, see the “Pega compressed configuration files” section in the Pega Helm chart documentation.
compressedConfigurations: false
pegaDiagnosticUser: ""
pegaDiagnosticPassword: ""
# Specify the Pega tiers to deploy
tier:
- name: "web"
# Create a an interactive tier for web users. This tier uses
# the WebUser node type and will be exposed via a service to
# the load balancer.
nodeType: "WebUser"
# Pega requestor specific properties
requestor:
# Inactivity time after which requestor is passivated
passivationTimeSec: 900
service:
# For help configuring the service block, see the Helm chart documentation
# https://github.com/pegasystems/pega-helm-charts/blob/master/charts/pega/README.md#service
httpEnabled: true
port: 80
targetPort: 8080
# Use this parameter to deploy a specific type of service using the serviceType parameter and specify the type of service in double quotes.
# This is an optional value and should be used based on the use case.
# This should be set only in case of eks, gke and other cloud providers. This option should not be used for k8s and minikube.
# For example if you want to deploy a service of type LoadBalancer, uncomment the following line and specify serviceType: "LoadBalancer"
# serviceType: ""
# Specify the CIDR ranges to restrict the service access to the given CIDR range.
# Each new CIDR block should be added in a separate line.
# Should be used only when serviceType is set to LoadBalancer.
# Uncomment the following lines and replace the CIDR blocks with your configuration requirements.
# loadBalancerSourceRanges:
# - "123.123.123.0/24"
# - "128.128.128.64/32"
# To configure TLS between the ingress/load balancer and the backend, set the following:
tls:
enabled: false
# To avoid entering the certificate values in plain text, configure the keystore, keystorepassword, cacertificate parameter
# values in the External Secrets Manager, and enter the external secret name below
# make sure the keys in the secret should be TOMCAT_KEYSTORE_CONTENT, TOMCAT_KEYSTORE_PASSWORD and ca.crt respectively
# In case of providing multiple secrets, please provide them in comma separated string format.
external_secret_names: []
# If using tools like cert-manager to generate certificates, please provide the keystore name that is autogenerated by the external tool.
# Default is TOMCAT_KEYSTORE_CONTENT
external_keystore_name: ""
# If using external secrets operator and not using standard Password Key, please provide the key for keystore password.
# Default is TOMCAT_KEYSTORE_PASSWORD
external_keystore_password: ""
keystore:
keystorepassword:
port: 443
targetPort: 8443
# set the value of CA certificate here in case of baremetal/openshift deployments - CA certificate should be in base64 format
# pass the certificateChainFile file if you are using certificateFile and certificateKeyFile
cacertificate:
# provide the SSL certificate and private key as a PEM format
certificateFile:
certificateKeyFile:
# if you will deploy traefik addon chart and enable traefik, set enabled=true; otherwise leave the default setting.
traefik:
enabled: false
# the SAN of the certificate present inside the container
serverName: ""
# set insecureSkipVerify=true, if the certificate verification has to be skipped
insecureSkipVerify: false
ingress:
enabled: true
# For help configuring the ingress block including TLS, see the Helm chart documentation
# https://github.com/pegasystems/pega-helm-charts/blob/master/charts/pega/README.md#ingress
# Enter the domain name to access web nodes via a load balancer.
# e.g. web.mypega.example.com
domain: "YOUR_WEB_NODE_DOMAIN"
# Configure custom path for given host along with pathType. Default pathType is ImplementationSpecific.
# path:
# pathType:
tls:
# Enable TLS encryption
enabled: true
secretName:
# For GKE Managed Certificate, mention true if Google Managed Certificate has to be created and annotation specified
useManagedCertificate: false
# Provide appropriate certificate annotations for EKS or GKE
# For EKS, use alb.ingress.kubernetes.io/certificate-arn: <certificate-arn>
# For GKE Pre-shared Certificate, use ingress.gcp.kubernetes.io/pre-shared-cert: <pre-shared-certificate-name>
# For GKE to use static IP for load balancer, use kubernetes.io/ingress.global-static-ip-name: <global-static-ip-name>
ssl_annotation:
# For Openshift, Pega deployments enable TLS to secure the connection
# from the browser to the router by creating the route using reencrypt termination policy.
# Add your certificate, the corresponding key using the appropriate .pem or .crt format and
# specify a CA certificate to validate the endpoint certificate.
certificate:
key:
cacertificate:
replicas: 1
javaOpts: ""
# Check the 'JVM Arguments' section in https://github.com/pegasystems/pega-helm-charts/blob/master/charts/pega/README.md
catalinaOpts: "-XX:InitialCodeCacheSize=256M -XX:ReservedCodeCacheSize=512M -XX:MetaspaceSize=512M"
deploymentStrategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
type: RollingUpdate
livenessProbe:
port: 8081
# Optionally overridde default resource specifications
# cpuRequest: 2
# memRequest: "12Gi"
# cpuLimit: 4
# memLimit: "12Gi"
# initialHeap: "4096m"
# maxHeap: "8192m"
# ephemeralStorageRequest:
# ephemeralStorageLimit:
# To configure an alternative user for custom image, set value for runAsUser.
# To configure an alternative group for volume mounts, set value for fsGroup
# See, https://github.com/pegasystems/pega-helm-charts/blob/master/charts/pega/README.md#security-context
# securityContext:
# runAsUser: 9001
# fsGroup: 0
hpa:
enabled: true
# To configure behavior specifications for hpa, set the required scaleUp & scaleDown values.
# See, https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#stabilization-window
# behavior:
# scaleDown:
# stabilizationWindowSeconds: 600
# key/value pairs that are attached to the pods (https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
# podLabels:
# Topology spread constraints to control the placement of your pods across nodes, zones, regions, or other user-defined topology domains.
# For more information please refer https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
# If you want to apply topology spread constraints in other tiers, please use the same configuration as described here.
# topologySpreadConstraints:
# - maxSkew: <integer>
# topologyKey: <string>
# whenUnsatisfiable: <string>
# labelSelector: <object>
# Tolerations allow the scheduler to schedule pods with matching taints.
# For more information please refer https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration
# If you want to apply tolerations to other tiers, please use the same configuration as described here.
# tolerations:
# - key: "key1"
# operator: "Equal"
# value: "value1"
# effect: "NoSchedule"
# Set enabled to true to include a Pod Disruption Budget for this tier.
# To enable this budget, specifiy either a pdb.minAvailable or pdb.maxUnavailable
# value and comment out the other parameter.
pdb:
enabled: false
minAvailable: 1
# maxUnavailable: "50%"
resources:
requests:
memory: "12Gi"
cpu: 3
limits:
memory: "12Gi"
cpu: 4
- name: "batch"
# Create a background tier for batch processing. This tier uses
# a collection of background node types and will not be exposed to
# the load balancer.
nodeType: "BackgroundProcessing,Search,Batch,Custom1,Custom2,Custom3,Custom4,Custom5"
replicas: 1
javaOpts: ""
deploymentStrategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
type: RollingUpdate
livenessProbe:
port: 8081
# To configure an alternative user for your custom image, set value for runAsUser
# To configure an alternative group for volume mounts, set value for fsGroup
# See, https://github.com/pegasystems/pega-helm-charts/blob/master/charts/pega/README.md#security-context
# securityContext:
# runAsUser: 9001
# fsGroup: 0
hpa:
enabled: true
# Set enabled to true to include a Pod Disruption Budget for this tier.
# To enable this budget, specifiy either a pdb.minAvailable or pdb.maxUnavailable
# value and comment out the other parameter.
pdb:
enabled: false
minAvailable: 1
# maxUnavailable: "50%"
resources:
requests:
memory: "12Gi"
cpu: 3
limits:
memory: "12Gi"
cpu: 4
- name: "bix"
# Create a background tier for BIX processing. This tier uses
# the BIX node type and will not be exposed to the load balancer.
nodeType: "BIX"
replicas: 1
javaOpts: ""
deploymentStrategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
type: RollingUpdate
livenessProbe:
port: 8081
hpa:
enabled: true
# Set enabled to true to include a Pod Disruption Budget for this tier.
# To enable this budget, specifiy either a pdb.minAvailable or pdb.maxUnavailable
# value and comment out the other parameter.
pdb:
enabled: false
minAvailable: 1
# maxUnavailable: "50%"
resources:
requests:
memory: "12Gi"
cpu: 3
limits:
memory: "12Gi"
cpu: 4
- name: "realtime"
# Create a dedicated tier for real-time data grid processing.
nodeType: "RealTime"
replicas: 1
javaOpts: ""
deploymentStrategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
type: RollingUpdate
livenessProbe:
port: 8081
hpa:
enabled: true
# Set enabled to true to include a Pod Disruption Budget for this tier.
# To enable this budget, specifiy either a pdb.minAvailable or pdb.maxUnavailable
# value and comment out the other parameter.
pdb:
enabled: false
minAvailable: 1
# maxUnavailable: "50%"
resources:
requests:
memory: "12Gi"
cpu: 3
limits:
memory: "12Gi"
cpu: 4
# External services
# Cassandra automatic deployment settings.
cassandra:
enabled: true
persistence:
enabled: true
resources:
requests:
memory: "4Gi"
cpu: 2
limits:
memory: "8Gi"
cpu: 4
# DDS (external Cassandra) connection settings.
# These settings should only be modified if you are using a custom Cassandra deployment.
# To deploy Pega without Cassandra, comment out or delete the following dds section and set
# the cassandra.enabled property above to false.
dds:
# A comma separated list of hosts in the Cassandra cluster.
externalNodes: ""
# TCP Port to connect to cassandra.
port: "9042"
# The username for authentication with the Cassandra cluster.
username: "dnode_ext"
# The password for authentication with the Cassandra cluster.
password: "dnode_ext"
# Whether to enable client encryption on the Cassandra connection.
clientEncryption: false
# If required, provide the type of Cassandra truststore and keystore that you used to hold keys and certificates for client encryption. Available store types
# are JKS and PKCS12. Default is JKS.
clientEncryptionStoreType: ""
# If required, provide the trustStore certificate file name.
# When using a trustStore certificate, you must also include a Kubernetes secret name that contains the trustStore certificate in the global.certificatesSecrets parameter.
trustStore: ""
# If required provide trustStorePassword value in plain text.
trustStorePassword: ""
# If required, provide the keystore certificate file name.
# When using a keystore certificate, you must also include a Kubernetes secret name that contains the keystore certificate in the global.certificatesSecrets parameter.
keyStore: ""
# If required provide keyStorePassword value in plain text.
keyStorePassword: ""
# To avoid exposing username,password,trustStorePassword,keyStorePassword parameters, configure the
# username,password,trustStorePassword,keyStorePassword parameters in External Secrets Manager, and enter the external secret for the credentials
# make sure the keys in the secret should be CASSANDRA_USERNAME, CASSANDRA_PASSWORD , CASSANDRA_TRUSTSTORE_PASSWORD , CASSANDRA_KEYSTORE_PASSWORD
external_secret_name: ""
# Enable asynchronous processing of records in DDS Dataset save operation. Failures to store individual records will
# not interrupt Dataset save operations.
asyncProcessingEnabled: false
# Specify a prefix to use when creating Pega-managed keyspaces in Cassandra.
keyspacesPrefix: ""
# Enable an extended token aware policy for use when a Cassandra range query runs. When enabled this policy selects a
# token from the token range to determine which Cassandra node to send the request. Before you can enable this policy,
# you must configure the token range partitioner.
extendedTokenAwarePolicy: false
# Enable a latency awareness policy, which collects the latencies of the queries for each Cassandra node and maintains
# a per-node latency score (an average).
latencyAwarePolicy: false
# Enable the use of a customized retry policy for your Pega Platform deployment. After enabling this policy in your
# deployment configuration, Cassandra queries that timeout will be retried. The number of retries may be configured
# using the dynamic system setting (DSS): dnode/cassandra_custom_retry_policy/retryCount. If not configured, queries
# will be retried once.
customRetryPolicy: false
# Enable the use of a customized retry policy for your Pega Platform deployment for Pega Platform ’23 and earlier
# releases. After you enable this policy in your deployment configuration, the deployment retries Cassandra queries
# that time out. Configure the number of retries using the dynamic system setting (DSS):
# dnode/cassandra_custom_retry_policy/retryCount. The default is 1, so if you do not specify a retry count, timed out
# queries are retried once.
customRetryPolicyEnabled: false
# Use this parameter in Pega Platform '24 and later instead of `customRetryPolicy`. Configure the number of retries
# using the `customRetryPolicyCount` property.
customRetryPolicyCount: 1
# Specify the number of retry attempts when `customRetryPolicyEnabled` is true. For Pega Platform '23 and earlier
# releases use the dynamic system setting (DSS): dnode/cassandra_custom_retry_policy/retryCount.
speculativeExecutionPolicy: false
# Enable the speculative execution policy for retrieving data from your Cassandra service for Pega Platform '23 and
# earlier releases. When enabled, Pega Platform sends a query to multiple nodes in your Cassandra service and
# processes the first response. This provides lower perceived latencies for your deployment, but puts greater load
# on your Cassandra service. Configure the speculative execution delay and max executions using the following dynamic
# system settings (DSS): dnode/cassandra_speculative_execution_policy/delay and
# dnode/cassandra_speculative_execution_policy/max_executions.
speculativeExecutionPolicyEnabled: false
# Use this parameter in Pega Platform '24 and later instead of `speculativeExecutionPolicy`. Configure the
# speculative execution delay and max executions using the `speculativeExecutionPolicyDelay` and
# `speculativeExecutionPolicyMaxExecutions` properties.
speculativeExecutionPolicyDelay: 100
# Specify the delay in milliseconds before speculative executions are made when `speculativeExecutionPolicyEnabled` is
# true. For Pega Platform '23 and earlier releases use the dynamic system setting (DSS):
# dnode/cassandra_speculative_execution_policy/delay.
speculativeExecutionPolicyMaxExecutions: 2
# Specify the maximum number of speculative execution attempts when `speculativeExecutionPolicyEnabled` is true. For
# Pega Platform '23 and earlier releases use the dynamic system setting (DSS):
# dnode/cassandra_speculative_execution_policy/max_executions.
jmxMetricsEnabled: true
# Enable reporting of DDS SDK metrics to a Comma Separated Value (CSV) format for use by your organization to monitor
# your Cassandra service. If you enable this property, use the Pega Platform DSS:
# dnode/ddsclient/metrics/csv_directory to customize the filepath to which the deployment writes CSV files. By
# default, after you enable this property, CSV files will be written to the Pega Platform work directory.
csvMetricsEnabled: false
# Enable reporting of DDS SDK metrics to your Pega Platform logs.
logMetricsEnabled: false
# Elasticsearch deployment settings.
# Note: This Elasticsearch deployment is used for Pega search, and is not the same Elasticsearch deployment used by the EFK stack.
# These search nodes will be deployed regardless of the Elasticsearch configuration above.
pegasearch:
image: "pegasystems/search"
memLimit: "3Gi"
replicas: 1
# Set externalSearchService to true to use the Search and Reporting Service.
# Refer to the README document to configure SRS as a search functionality provider under this section.
externalSearchService: false
externalURL:
srsAuth:
enabled: false
url: ""
clientId: ""
authType: ""
privateKey: ""
external_secret_name: ""
# Pega Installer settings.
installer:
image: "YOUR_INSTALLER_IMAGE:TAG"
# Set the initial [email protected] password for your installation. This will need to be changed at first login.
# The adminPassword value cannot start with "@".
adminPassword: "ADMIN_PASSWORD"
# Upgrade specific properties
upgrade:
# Type of upgrade
# Valid upgradeType values are 'in-place' , 'zero-downtime' , 'custom' , 'out-of-place-rules' , 'out-of-place-data' .
upgradeType: "in-place"
# Specify a name for a target rules schema that the upgrade process creates for patches and upgrades.
targetRulesSchema: ""
# Specify a name for a target data schema that the upgrade process creates for patches and upgrades.
# For postgres databases that you are upgrading from Pega Infinity version 8.4.0 and later
# And for Oracle databases that you are upgrading from Pega Infinity version 8.4.3 and later.
targetDataSchema: ""
# Specify the username and password to access the pre-upgrade Pega Platform to perform pre- and post- actions during zero-downtime upgrades.
pegaRESTUsername: ""
pegaRESTPassword: ""
# Hazelcast settings (applicable from Pega 8.6)
hazelcast:
# Hazelcast docker image for platform version 8.6 through 8.7.x
image: "YOUR_HAZELCAST_IMAGE:TAG"
# Hazelcast docker image for platform version 8.8 and later
clusteringServiceImage: "YOUR_CLUSTERING_SERVICE_IMAGE:TAG"
# Setting below to true will deploy Pega Platform using a client-server Hazelcast model for version 8.6 through 8.7.x.
# Note: Make sure to set this value as "false" in case of Pega Platform version before "8.6". If not set this will fail the installation.
enabled: true
# Setting below to true will deploy Pega Platform using a client-server Hazelcast model for version 8.8 and later.
clusteringServiceEnabled: false
# Set to true to enforce SSL communication between the Clustering Service and Pega Platform.
encryption:
enabled: false
# Setting related to Hazelcast migration.
migration:
# Set to `true` to initiate the migration job.
initiateMigration: false
# Reference the `platform/clustering-service-kubectl` Docker image to create the migration job.
migrationJobImage: "YOUR_MIGRATION_JOB_IMAGE:TAG"
# Set to `true` when migrating from embedded Hazelcast.
embeddedToCSMigration: false
# No. of initial members to join
replicas: 3
# UserName in the client-server Hazelcast model authentication. This setting is exposed and not secure.
username: ""
# Password in the client-server Hazelcast model authentication. This setting is exposed and not secure.
password: ""
# To avoid exposing username and password parameters, leave these parameters empty and configure
# these cluster settings using an External Secrets Manager. Use the following keys in the secret:
# HZ_CS_AUTH_USERNAME for username and HZ_CS_AUTH_PASSWORD for password.
# Enter the external secret for these credentials below.
external_secret_name: ""
# Stream (externalized Kafka service) settings.
stream:
# Beginning with Pega Platform '23, enabled by default; when disabled, your deployment does not use a"Kafka stream service" configuration.
enabled: true
# Provide externalized Kafka service broker urls.
bootstrapServer: ""
# Provide Security Protocol used to communicate with kafka brokers. Supported values are: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL.
securityProtocol: PLAINTEXT
# If required, provide trustStore certificate file name
# When using a trustStore certificate, you must also include a Kubernetes secret name, that contains the trustStore certificate,
# in the global.certificatesSecrets parameter.
# Pega deployments only support trustStores using the Java Key Store (.jks) format.
trustStore: ""
# If required provide trustStorePassword value in plain text.
trustStorePassword: ""
# If required, provide keyStore certificate file name
# When using a keyStore certificate, you must also include a Kubernetes secret name, that contains the keyStore certificate,
# in the global.certificatesSecrets parameter.
# Pega deployments only support keyStores using the Java Key Store (.jks) format.
keyStore: ""
# If required, provide keyStore value in plain text.
keyStorePassword: ""
# If required, provide jaasConfig value in plain text.
jaasConfig: ""
# If required, provide a SASL mechanism**. Supported values are: PLAIN, SCRAM-SHA-256, SCRAM-SHA-512.
saslMechanism: PLAIN
# By default, topics originating from Pega Platform have the pega- prefix,
# so that it is easy to distinguish them from topics created by other applications.
# Pega supports customizing the name pattern for your Externalized Kafka configuration for each deployment.
streamNamePattern: "pega-{stream.name}"
# Your replicationFactor value cannot be more than the number of Kafka brokers. Pega recommended value is 3.
replicationFactor: "3"
# To avoid exposing trustStorePassword, keyStorePassword, and jaasConfig parameters, leave the values empty and
# configure them using an External Secrets Manager, making sure you configure the keys in the secret in the order:
# STREAM_TRUSTSTORE_PASSWORD, STREAM_KEYSTORE_PASSWORD and STREAM_JAAS_CONFIG.
# Enter the external secret name below.
external_secret_name: ""