From 1ea46609721e96422086f611508270bd95303f95 Mon Sep 17 00:00:00 2001 From: G33tha Date: Fri, 23 Sep 2022 16:53:09 +0530 Subject: [PATCH 001/616] removed copy-to-helm-public tasks from mount-keys role (#3549) Co-authored-by: G33tha --- ansible/roles/stack-sunbird/defaults/main.yml | 2 +- .../ansible/roles/helm-deploy/defaults/main.yml | 2 +- .../ansible/roles/helm-deploy/tasks/main.yml | 2 +- .../roles/mount-keys/tasks/copy-to-helm-public.yml | 14 -------------- 4 files changed, 3 insertions(+), 17 deletions(-) delete mode 100644 kubernetes/ansible/roles/mount-keys/tasks/copy-to-helm-public.yml diff --git a/ansible/roles/stack-sunbird/defaults/main.yml b/ansible/roles/stack-sunbird/defaults/main.yml index 12d98086b3..3b5946e333 100644 --- a/ansible/roles/stack-sunbird/defaults/main.yml +++ b/ansible/roles/stack-sunbird/defaults/main.yml @@ -982,7 +982,7 @@ adminutil_access_values: role_to_run: - decrypt.yml - generate-keys.yml - - copy-to-helm-public.yml + - copy-to-helm.yml # analytics-service related vars cassandra: diff --git a/kubernetes/ansible/roles/helm-deploy/defaults/main.yml b/kubernetes/ansible/roles/helm-deploy/defaults/main.yml index 0dcb0e8d3a..ed2c7f5aca 100644 --- a/kubernetes/ansible/roles/helm-deploy/defaults/main.yml +++ b/kubernetes/ansible/roles/helm-deploy/defaults/main.yml @@ -238,7 +238,7 @@ adminutil_access_values: role_to_run: - decrypt.yml - generate-keys.yml - - copy-to-helm-public.yml + - copy-to-helm.yml opa_policies_path: ../../../opa # Cannot remove common.rego from common_opa_policy_files yet diff --git a/kubernetes/ansible/roles/helm-deploy/tasks/main.yml b/kubernetes/ansible/roles/helm-deploy/tasks/main.yml index a0dba8e709..bd40bcfb82 100644 --- a/kubernetes/ansible/roles/helm-deploy/tasks/main.yml +++ b/kubernetes/ansible/roles/helm-deploy/tasks/main.yml @@ -31,7 +31,7 @@ private_key_prefix: "{{ outer_item.0.values_to_pass.keyprefix }}" private_key_sign_start: "{{ outer_item.0.values_to_pass.keystart }}" private_key_sign_end: "{{ outer_item.0.values_to_pass.keycount if outer_item.0.values_to_pass.keycount > '0' else '1' }}" - when: release_name == "adminutils" or release_name == "gotenberg" + when: release_name == "adminutils" with_subelements: - "{{adminutil_keys_values}}" - role_to_run diff --git a/kubernetes/ansible/roles/mount-keys/tasks/copy-to-helm-public.yml b/kubernetes/ansible/roles/mount-keys/tasks/copy-to-helm-public.yml deleted file mode 100644 index 4320bccfd3..0000000000 --- a/kubernetes/ansible/roles/mount-keys/tasks/copy-to-helm-public.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -- name: Create the keys directory in case user is overriding the private_key_path - file: - path: "{{chart_path}}/keys" - state: directory - mode: 0755 - -- name: Copy keys directory to chart path - vars: - private_key_path: "{{private_key_path | regex_replace('^\\/|\\/$', '')}}" - copy: "src={{inventory_dir}}{{private_key_path}}{{private_key_prefix}}{{item}} dest={{chart_path}}/keys/" - with_sequence: start={{private_key_sign_start}} end={{private_key_sign_start|int + private_key_sign_end|int - 1}} stride={{private_key_sign_incr}} - when: (private_key_sign_start|int + private_key_sign_end|int - 1) > 0 - From 730798ca832c07ec937221b3d6fbbcdb502d65e0 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Thu, 8 Sep 2022 17:33:41 +0530 Subject: [PATCH 002/616] fix: install specific azure-cli version --- ansible/bootstrap.yml | 1 - ansible/roles/azure-cli/tasks/main.yml | 28 ++++++++++++++++++++------ 2 files changed, 22 insertions(+), 7 deletions(-) diff --git a/ansible/bootstrap.yml b/ansible/bootstrap.yml index 429d8b7127..aba26fbbd4 100644 --- a/ansible/bootstrap.yml +++ b/ansible/bootstrap.yml @@ -22,7 +22,6 @@ - all - hosts: "{{hosts}}" - gather_facts: no become: yes ignore_unreachable: yes vars_files: diff --git a/ansible/roles/azure-cli/tasks/main.yml b/ansible/roles/azure-cli/tasks/main.yml index 484bf1f4fb..0a1148f2c4 100644 --- a/ansible/roles/azure-cli/tasks/main.yml +++ b/ansible/roles/azure-cli/tasks/main.yml @@ -1,9 +1,25 @@ -- name: install azure cli - become: yes - shell: - which az || curl -sL https://aka.ms/InstallAzureCLIDeb | bash -- name: install azcopy - become: yes +--- +- name: Add Microsfot signing key + ansible.builtin.apt_key: + url: https://packages.microsoft.com/keys/microsoft.asc + state: present + +- name: Add Microsfot repository into sources list + ansible.builtin.apt_repository: + repo: "deb [arch=amd64] https://packages.microsoft.com/repos/azure-cli/ {{ ansible_distribution_release | lower }} main" + state: present + +- name: Install azue cli and dependent packages + ansible.builtin.apt: + pkg: + - ca-certificates + - curl + - apt-transport-https + - lsb-release + - gnupg + - "azure-cli=2.33.1-1~{{ ansible_distribution_release | lower }}" + +- name: Install azcopy shell: | which azcopy || ( \ mkdir /tmp/azcopy && cd /tmp/azcopy && \ From 4773ffc261b91899047190011228404f336a72e2 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Fri, 9 Sep 2022 23:58:59 +0530 Subject: [PATCH 003/616] feat: updated plays to use azure role taks based on cloud provider Signed-off-by: Keshav Prasad --- ansible/artifacts-download.yml | 17 +++- ansible/artifacts-upload.yml | 18 +++- ansible/assets-upload.yml | 28 +++++- ansible/cert-file-upload.yml | 10 -- ansible/deploy-plugins.yml | 99 +++++++++++-------- ansible/desktop-faq-upload.yml | 93 ++++++++--------- ansible/dial_upload-schema.yml | 21 ++-- ansible/grafana-backup.yml | 3 - ansible/jenkins-backup.yml | 4 - ansible/kp_upload-schema.yml | 20 ++-- ansible/mongodb-backup.yml | 3 - ansible/nodebbui-upload.yml | 34 ++++--- ansible/plugin.yml | 22 ----- ansible/plugins.yml | 28 ++++++ ansible/prometheus-backup.yml | 8 -- ansible/prometheus-restore.yml | 9 -- ansible/redis-backup.yml | 3 - .../artifacts-download-azure/tasks/main.yml | 8 -- .../artifacts-upload-azure/tasks/main.yml | 8 -- .../assets-upload-azure/defaults/main.yml | 5 - .../roles/assets-upload-azure/tasks/main.yml | 25 ----- ansible/roles/azure-cli/tasks/main.yml | 6 +- .../azure-cloud-storage/defaults/main.yml | 67 +++++++++++++ .../tasks/blob-delete-batch.yml | 5 + .../tasks/blob-download.yml | 5 + .../tasks/blob-upload-batch.yml | 10 ++ .../azure-cloud-storage/tasks/blob-upload.yml | 10 ++ .../tasks/container-create.yml | 8 ++ .../tasks/delete-using-azcopy.yml | 7 ++ .../roles/azure-cloud-storage/tasks/main.yml | 21 ++++ .../tasks/upload-using-azcopy.yml | 12 +++ .../blob-batch-delete-azure/tasks/main.yml | 8 -- ansible/roles/cassandra-backup/meta/main.yml | 2 - ansible/roles/cassandra-backup/tasks/main.yml | 34 +++---- ansible/roles/cassandra-restore/meta/main.yml | 2 - .../roles/cassandra-restore/tasks/main.yml | 45 ++++----- .../roles/cert-file-upload/defaults/main.yml | 2 - ansible/roles/cert-file-upload/tasks/main.yml | 7 -- ansible/roles/cert-templates/tasks/main.yml | 27 +++-- ansible/roles/desktop-deploy/tasks/main.yml | 48 ++++----- .../roles/es-azure-snapshot/defaults/main.yml | 2 +- .../roles/es-azure-snapshot/tasks/main.yml | 14 ++- ansible/roles/grafana-backup/meta/main.yml | 2 - ansible/roles/grafana-backup/tasks/main.yml | 20 ++-- .../jenkins-backup-upload/tasks/main.yml | 20 ++-- ansible/roles/mongodb-backup/meta/main.yml | 2 - ansible/roles/mongodb-backup/tasks/main.yml | 16 ++- .../roles/offline-installer/tasks/main.yml | 2 +- ...ploadToAzure.yml => upload_to_storage.yml} | 51 +++++----- .../meta/main.yml | 2 - .../tasks/main.yml | 27 +++-- .../tasks/main.yml | 17 ++-- ansible/roles/postgresql-backup/meta/main.yml | 2 - .../roles/postgresql-backup/tasks/main.yml | 28 +++--- .../roles/postgresql-restore/meta/main.yml | 2 - .../roles/postgresql-restore/tasks/main.yml | 18 ++-- .../roles/prometheus-backup-v2/tasks/main.yml | 15 ++- ansible/roles/prometheus-backup/meta/main.yml | 2 - .../roles/prometheus-backup/tasks/main.yml | 26 +++-- .../roles/prometheus-restore/tasks/main.yml | 22 +++-- ansible/roles/redis-backup/meta/main.yml | 2 - ansible/roles/redis-backup/tasks/main.yml | 20 ++-- ansible/roles/upload-batch/tasks/main.yml | 8 -- ansible/uploadFAQs.yml | 61 +++++------- pipelines/certs-templates/Jenkinsfile.upload | 61 ------------ .../org_sunbird_questionunit_quml/Jenkinsfile | 4 +- .../ansible/inventory/dev/Core/common.yml | 15 ++- .../ansible/inventory/dev/Core/secrets.yml | 14 ++- 68 files changed, 662 insertions(+), 605 deletions(-) delete mode 100644 ansible/cert-file-upload.yml delete mode 100644 ansible/plugin.yml create mode 100644 ansible/plugins.yml delete mode 100644 ansible/roles/artifacts-download-azure/tasks/main.yml delete mode 100644 ansible/roles/artifacts-upload-azure/tasks/main.yml delete mode 100644 ansible/roles/assets-upload-azure/defaults/main.yml delete mode 100755 ansible/roles/assets-upload-azure/tasks/main.yml create mode 100644 ansible/roles/azure-cloud-storage/defaults/main.yml create mode 100644 ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml create mode 100644 ansible/roles/azure-cloud-storage/tasks/blob-download.yml create mode 100644 ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml create mode 100644 ansible/roles/azure-cloud-storage/tasks/blob-upload.yml create mode 100644 ansible/roles/azure-cloud-storage/tasks/container-create.yml create mode 100644 ansible/roles/azure-cloud-storage/tasks/delete-using-azcopy.yml create mode 100644 ansible/roles/azure-cloud-storage/tasks/main.yml create mode 100644 ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml delete mode 100755 ansible/roles/blob-batch-delete-azure/tasks/main.yml delete mode 100644 ansible/roles/cassandra-backup/meta/main.yml delete mode 100644 ansible/roles/cassandra-restore/meta/main.yml delete mode 100644 ansible/roles/cert-file-upload/defaults/main.yml delete mode 100644 ansible/roles/cert-file-upload/tasks/main.yml delete mode 100644 ansible/roles/grafana-backup/meta/main.yml delete mode 100644 ansible/roles/mongodb-backup/meta/main.yml rename ansible/roles/offline-installer/tasks/{uploadToAzure.yml => upload_to_storage.yml} (67%) delete mode 100644 ansible/roles/postgres-azure-managed-service-backup/meta/main.yml delete mode 100644 ansible/roles/postgresql-backup/meta/main.yml delete mode 100644 ansible/roles/postgresql-restore/meta/main.yml delete mode 100644 ansible/roles/prometheus-backup/meta/main.yml delete mode 100644 ansible/roles/redis-backup/meta/main.yml delete mode 100644 ansible/roles/upload-batch/tasks/main.yml delete mode 100644 pipelines/certs-templates/Jenkinsfile.upload diff --git a/ansible/artifacts-download.yml b/ansible/artifacts-download.yml index feb78219b8..2872fa1013 100644 --- a/ansible/artifacts-download.yml +++ b/ansible/artifacts-download.yml @@ -3,8 +3,15 @@ become: yes vars_files: - "{{inventory_dir}}/secrets.yml" - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_artifact_storage_account_name }}" - AZURE_STORAGE_SAS_TOKEN: "{{ sunbird_artifact_storage_account_sas }}" - roles: - - artifacts-download-azure + tasks: + - name: download artifact from azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-download.yml + vars: + blob_container_name: "{{ artifacts_container }}" + blob_file_name: "{{ artifact }}" + local_file_or_folder_path: "{{ artifact_path }}" + storage_account_name: "{{ azure_artifact_storage_account_name }}" + storage_account_key: "{{ azure_artifact_storage_account_key }}" + when: cloud_service_provider == "azure" \ No newline at end of file diff --git a/ansible/artifacts-upload.yml b/ansible/artifacts-upload.yml index 772ec2cca4..642a9aa111 100644 --- a/ansible/artifacts-upload.yml +++ b/ansible/artifacts-upload.yml @@ -3,8 +3,16 @@ become: yes vars_files: - "{{inventory_dir}}/secrets.yml" - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_artifact_storage_account_name }}" - AZURE_STORAGE_SAS_TOKEN: "{{ sunbird_artifact_storage_account_sas }}" - roles: - - artifacts-upload-azure + tasks: + - name: upload artifact to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload.yml + vars: + blob_container_name: "{{ artifacts_container }}" + container_public_access: "off" + blob_file_name: "{{ artifact }}" + local_file_or_folder_path: "{{ artifact_path }}" + storage_account_name: "{{ azure_artifact_storage_account_name }}" + storage_account_key: "{{ azure_artifact_storage_account_key }}" + when: cloud_service_provider == "azure" \ No newline at end of file diff --git a/ansible/assets-upload.yml b/ansible/assets-upload.yml index b356cf9362..8bc0ac9123 100644 --- a/ansible/assets-upload.yml +++ b/ansible/assets-upload.yml @@ -1,8 +1,28 @@ --- - hosts: localhost - vars: - ansible_connection: local vars_files: - ['{{inventory_dir}}/secrets.yml', 'secrets/{{env}}.yml'] - roles: - - assets-upload-azure + tasks: + - name: set common azure variables + set_fact: + blob_container_name: "{{ player_cdn_container }}" + container_public_access: "container" + blob_container_folder_path: "" + storage_account_name: "{{ azure_public_storage_account_name }}" + storage_account_key: "{{ azure_public_storage_account_key }}" + storage_account_sas_token: "{{ azure_public_storage_account_sas }}" + when: cloud_service_provider == "azure" + + - name: delete files and folders from azure storage using azcopy + include_role: + name: azure-cloud-storage + tasks_from: delete-using-azcopy.yml + when: cloud_service_provider == "azure" + + - name: upload batch of files to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + local_file_or_folder_path: "{{ assets }}" + when: cloud_service_provider == "azure" diff --git a/ansible/cert-file-upload.yml b/ansible/cert-file-upload.yml deleted file mode 100644 index e29a7b6c2b..0000000000 --- a/ansible/cert-file-upload.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- hosts: localhost - become: yes - vars_files: - - "{{inventory_dir}}/secrets.yml" - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_public_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_public_storage_account_key }}" - roles: - - cert-file-upload diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index cd2b5b512b..7c4958a5f5 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -3,9 +3,6 @@ gather_facts: no vars_files: - "{{inventory_dir}}/secrets.yml" - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_public_storage_account_name }}" - AZURE_STORAGE_SAS_TOKEN: "{{ sunbird_public_storage_account_sas }}" tasks: - name: rename env_domain in preview_cdn.html for CDN shell: | @@ -15,40 +12,62 @@ tags: - preview - - name: delete batch - shell: | - azcopy rm "https://{{ sunbird_public_storage_account_name }}.blob.core.windows.net/{{ plugin_container_name }}/{{ folder_name }}{{sunbird_public_storage_account_sas}}" --recursive=true - async: 3600 - poll: 10 - tags: - - content-editor - - collection-editor - - generic-editor - - preview - - - name: upload batch - command: "az storage blob upload-batch --destination {{ plugin_container_name }}/{{ folder_name }} --source {{ source_name }}" - async: 3600 - poll: 10 - tags: - - content-editor - - collection-editor - - generic-editor - - preview - - editor - - core-plugins - - - - name: upload file - command: "az storage blob upload --container-name {{ plugin_container_name }} --file {{ source_file_name }} --name artefacts/content-player/content-player-{{ player_version_number }}.zip" - async: 3600 - poll: 10 - tags: - - preview - - - name: run az_copy.sh - shell: "bash {{ az_file_path }} {{ plugin_container_name }} {{ source_file }}" - async: 3600 - poll: 10 - tags: - - plugins + - name: this block consists of tasks related to azure storage + block: + - name: set common azure variables + set_fact: + blob_container_name: "{{ plugin_container_name }}" + container_public_access: "container" + blob_container_folder_path: "/{{ folder_name }}" + storage_account_name: "{{ azure_public_storage_account_name }}" + storage_account_key: "{{ azure_public_storage_account_key }}" + storage_account_sas_token: "{{ azure_public_storage_account_sas }}" + + - block: + - name: delete files and folders from azure storage using azcopy + include_role: + name: azure-cloud-storage + tasks_from: delete-using-azcopy.yml + tags: + - content-editor + - collection-editor + - generic-editor + - preview + + - block: + - name: upload batch of files to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + local_file_or_folder_path: "{{ source_name }}" + tags: + - content-editor + - collection-editor + - generic-editor + - preview + - editor + - core-plugins + + - block: + - name: upload file to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + blob_file_name: "artefacts/content-player/content-player-{{ player_version_number }}.zip" + local_file_or_folder_path: "{{ source_file_name }}" + tags: + - preview + + - block: + - name: run the az_copy.sh script + shell: "bash {{ az_file_path }} {{ plugin_container_name }} {{ source_file }}" + async: 3600 + poll: 10 + environment: + AZURE_STORAGE_ACCOUNT: "{{ azure_public_storage_account_name }}" + AZURE_STORAGE_SAS_TOKEN: "{{ azure_public_storage_account_sas }}" + tags: + - plugins + when: cloud_service_provider == "azure" diff --git a/ansible/desktop-faq-upload.yml b/ansible/desktop-faq-upload.yml index 9dbeddd1c0..7c7e992039 100644 --- a/ansible/desktop-faq-upload.yml +++ b/ansible/desktop-faq-upload.yml @@ -1,49 +1,50 @@ - hosts: localhost - become: yes vars_files: - "{{inventory_dir}}/secrets.yml" - vars: - artifacts_container: "{{desktop_container}}" - artifact: "{{destination_path}}" - artifact_path: "{{playbook_dir}}/../{{src_file_path}}" - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_public_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_public_storage_account_key }}" - roles: - - artifacts-upload-azure - tags: - - upload-desktop-faq - - -- hosts: localhost - become: yes - vars_files: - - "{{inventory_dir}}/secrets.yml" - vars: - artifacts_container: "{{desktop_container}}" - artifact: "{{destination_path}}" - artifact_path: "{{playbook_dir}}/../{{src_file_path}}" - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_private_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_private_storage_account_key }}" - roles: - - artifacts-upload-azure - tags: - - upload-label - -- hosts: localhost - become: yes - vars_files: - - "{{inventory_dir}}/secrets.yml" - vars: - source_path: "{{playbook_dir}}/../{{src_file_path}}" - destination_path: "{{destination_path}}" - container_name: "{{desktop_container}}" - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_public_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_public_storage_account_key }}" - roles: - - upload-batch - tags: - - upload-chatbot-config - - upload-batch + tasks: + - name: this block consists of tasks related to azure storage + block: + - name: set common azure variables + set_fact: + blob_container_name: "{{ desktop_container }}" + blob_file_name: "{{ destination_path }}" + blob_container_folder_path: "/{{ destination_path }}" + local_file_or_folder_path: "{{ playbook_dir }}/../{{ src_file_path }}" + + - block: + - name: upload file to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload.yml + vars: + container_public_access: "container" + storage_account_name: "{{ azure_public_storage_account_name }}" + storage_account_key: "{{ azure_public_storage_account_key }}" + tags: + - upload-desktop-faq + + - block: + - name: upload file to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload.yml + vars: + container_public_access: "off" + storage_account_name: "{{ azure_private_storage_account_name }}" + storage_account_key: "{{ azure_private_storage_account_key }}" + tags: + - upload-label + + - block: + - name: upload batch of files to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + container_public_access: "container" + storage_account_name: "{{ azure_public_storage_account_name }}" + storage_account_key: "{{ azure_public_storage_account_key }}" + tags: + - upload-chatbot-config + - upload-batch + when: cloud_service_provider == "azure" \ No newline at end of file diff --git a/ansible/dial_upload-schema.yml b/ansible/dial_upload-schema.yml index 1f510c2c92..54b0672ed9 100644 --- a/ansible/dial_upload-schema.yml +++ b/ansible/dial_upload-schema.yml @@ -1,11 +1,7 @@ - hosts: local - become: yes gather_facts: no vars_files: - "{{inventory_dir}}/secrets.yml" - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_public_storage_account_name }}" - AZURE_STORAGE_SAS_TOKEN: "{{ sunbird_public_storage_account_sas }}" tasks: - name: Create directories file: @@ -22,9 +18,16 @@ dest: dial_schema_template_files/{{ item.path }} with_filetree: "{{ source_name }}" when: item.state == 'file' - - - name: upload batch - command: "az storage blob upload-batch --destination {{ dial_plugin_container_name }}/schemas/local --source dial_schema_template_files" - async: 3600 - poll: 10 + - name: upload batch of files to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + blob_container_name: "{{ dial_plugin_container_name }}" + container_public_access: "blob" + blob_container_folder_path: "/schemas/local" + local_file_or_folder_path: "dial_schema_template_files" + storage_account_name: "{{ azure_public_storage_account_name }}" + storage_account_key: "{{ azure_public_storage_account_key }}" + when: cloud_service_provider == "azure" \ No newline at end of file diff --git a/ansible/grafana-backup.yml b/ansible/grafana-backup.yml index 9770875812..e5ee720b59 100644 --- a/ansible/grafana-backup.yml +++ b/ansible/grafana-backup.yml @@ -2,8 +2,5 @@ become: yes vars_files: - "{{inventory_dir}}/secrets.yml" - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_name }}" roles: - grafana-backup diff --git a/ansible/jenkins-backup.yml b/ansible/jenkins-backup.yml index 4506a6bf62..acc4172c73 100644 --- a/ansible/jenkins-backup.yml +++ b/ansible/jenkins-backup.yml @@ -3,9 +3,5 @@ hosts: jenkins-master vars_files: - "{{inventory_dir}}/secrets.yml" - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" - roles: - jenkins-backup-upload diff --git a/ansible/kp_upload-schema.yml b/ansible/kp_upload-schema.yml index 0ca52f5f02..843abfbd19 100644 --- a/ansible/kp_upload-schema.yml +++ b/ansible/kp_upload-schema.yml @@ -1,13 +1,17 @@ - hosts: local - become: yes gather_facts: no vars_files: - "{{inventory_dir}}/secrets.yml" - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_public_storage_account_name }}" - AZURE_STORAGE_SAS_TOKEN: "{{ sunbird_public_storage_account_sas }}" tasks: - - name: upload batch - command: "az storage blob upload-batch --destination {{ plugin_container_name }}/schemas/local --source {{ source_name }}" - async: 3600 - poll: 10 \ No newline at end of file + - name: upload batch of files to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + blob_container_name: "{{ plugin_container_name }}" + container_public_access: "container" + blob_container_folder_path: "/schemas/local" + local_file_or_folder_path: "{{ source_name }}" + storage_account_name: "{{ azure_public_storage_account_name }}" + storage_account_key: "{{ azure_public_storage_account_key }}" + when: cloud_service_provider == "azure" \ No newline at end of file diff --git a/ansible/mongodb-backup.yml b/ansible/mongodb-backup.yml index 95ff1e7d61..2ab4091fc4 100644 --- a/ansible/mongodb-backup.yml +++ b/ansible/mongodb-backup.yml @@ -2,8 +2,5 @@ become: yes vars_files: - ['{{inventory_dir}}/secrets.yml'] - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" roles: - mongodb-backup diff --git a/ansible/nodebbui-upload.yml b/ansible/nodebbui-upload.yml index 92b484a580..48f59dd327 100644 --- a/ansible/nodebbui-upload.yml +++ b/ansible/nodebbui-upload.yml @@ -3,17 +3,27 @@ gather_facts: no vars_files: - "{{inventory_dir}}/secrets.yml" - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_public_storage_account_name }}" - AZURE_STORAGE_SAS_TOKEN: "{{ sunbird_public_storage_account_sas }}" tasks: - - name: delete batch - shell: | - azcopy rm "https://{{ sunbird_public_storage_account_name }}.blob.core.windows.net/{{ nodebbui_container_name }}{{sunbird_public_storage_account_sas}}" --recursive=true - async: 3600 - poll: 10 + - name: delete files and folders from azure storage using azcopy + include_role: + name: azure-cloud-storage + tasks_from: delete-using-azcopy.yml + vars: + blob_container_name: "{{ nodebbui_container_name }}" + blob_container_folder_path: "" + storage_account_name: "{{ azure_public_storage_account_name }}" + storage_account_sas_token: "{{ azure_public_storage_account_sas }}" + when: cloud_service_provider == "azure" - - name: upload batch - command: "az storage blob upload-batch --destination {{ nodebbui_container_name }} --source {{ source_name }}" - async: 3600 - poll: 10 + - name: upload batch of files to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + blob_container_name: "{{ nodebbui_container_name }}" + container_public_access: "container" + blob_container_folder_path: "" + local_file_or_folder_path: "{{ source_name }}" + storage_account_name: "{{ azure_public_storage_account_name }}" + storage_account_key: "{{ azure_public_storage_account_key }}" + when: cloud_service_provider == "azure" \ No newline at end of file diff --git a/ansible/plugin.yml b/ansible/plugin.yml deleted file mode 100644 index ac8f93b5cf..0000000000 --- a/ansible/plugin.yml +++ /dev/null @@ -1,22 +0,0 @@ -- hosts: local - become: yes - gather_facts: no - vars_files: - - "{{inventory_dir}}/secrets.yml" - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_public_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_public_storage_account_key }}" - tasks: - - name: delte plugin org_sunbird_questionunit_quml - command: "az storage blob delete-batch --source {{ plugin_container_name }} --pattern content-plugins/{{ plugins_name }}" - async: 3600 - poll: 10 - tags: - - org_sunbird_questionunit_quml - - - name: upload plugin org_sunbird_questionunit_quml - command: "az storage blob upload-batch --destination {{ plugin_container_name }}/content-plugins/{{ plugins_name }} --source {{ source_file }}" - async: 3600 - poll: 10 - tags: - - org_sunbird_questionunit_quml diff --git a/ansible/plugins.yml b/ansible/plugins.yml new file mode 100644 index 0000000000..0245f1801a --- /dev/null +++ b/ansible/plugins.yml @@ -0,0 +1,28 @@ +--- +- hosts: local + gather_facts: false + vars_files: + - "{{inventory_dir}}/secrets.yml" + tasks: + - name: this block consists of tasks related to azure storage + block: + - name: set common azure variables + set_fact: + blob_container_name: "{{ plugin_container_name }}" + container_public_access: "container" + blob_delete_pattern: "content-plugins/{{ plugins_name }}" + blob_container_folder_path: "/content-plugins/{{ plugins_name }}" + local_file_or_folder_path: "{{ source_file }}" + storage_account_name: "{{ azure_public_storage_account_name }}" + storage_account_key: "{{ azure_public_storage_account_key }}" + + - name: delete batch of files from azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-delete-batch.yml + + - name: upload batch of files to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + when: cloud_service_provider == "azure" diff --git a/ansible/prometheus-backup.yml b/ansible/prometheus-backup.yml index d31adbd125..65a87b3061 100644 --- a/ansible/prometheus-backup.yml +++ b/ansible/prometheus-backup.yml @@ -6,9 +6,6 @@ prometheus_url: "http://localhost:9090/prometheus" vars_files: - ['{{ inventory_dir }}/secrets.yml'] - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" roles: - prometheus-backup-v2 tags: @@ -23,8 +20,6 @@ vars_files: - ['{{inventory_dir}}/secrets.yml'] environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" roles: - prometheus-backup-v2 tags: @@ -39,9 +34,6 @@ prometheus_url: "http://localhost:19090/prometheus" vars_files: - ['{{inventory_dir}}/secrets.yml'] - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" roles: - prometheus-backup-v2 tags: diff --git a/ansible/prometheus-restore.yml b/ansible/prometheus-restore.yml index 5d4bba9731..1ddf6971d5 100644 --- a/ansible/prometheus-restore.yml +++ b/ansible/prometheus-restore.yml @@ -6,9 +6,6 @@ prometheus_service_name: "monitor_prometheus" vars_files: - ['{{ inventory_dir }}/secrets.yml'] - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" roles: - prometheus-restore tags: @@ -22,9 +19,6 @@ prometheus_service_name: "prometheus_fed_prometheus" vars_files: - ['{{ inventory_dir }}/secrets.yml'] - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" roles: - prometheus-restore tags: @@ -38,9 +32,6 @@ prometheus_service_name: "monitor_stateful_prometheus" vars_files: - ['{{ inventory_dir }}/secrets.yml'] - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" roles: - prometheus-restore tags: diff --git a/ansible/redis-backup.yml b/ansible/redis-backup.yml index af7b1564ed..72ab28e584 100644 --- a/ansible/redis-backup.yml +++ b/ansible/redis-backup.yml @@ -3,9 +3,6 @@ gather_facts: false vars_files: - ['{{inventory_dir}}/secrets.yml'] - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" roles: - redis-backup run_once: true diff --git a/ansible/roles/artifacts-download-azure/tasks/main.yml b/ansible/roles/artifacts-download-azure/tasks/main.yml deleted file mode 100644 index db79bc213f..0000000000 --- a/ansible/roles/artifacts-download-azure/tasks/main.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Ensure azure blob storage container exists - command: az storage container exists --name {{ artifacts_container }} - -- name: Download from azure blob storage - command: az storage blob download -c {{ artifacts_container }} --name {{ artifact }} -f {{ artifact_path }} - async: 3600 - poll: 10 diff --git a/ansible/roles/artifacts-upload-azure/tasks/main.yml b/ansible/roles/artifacts-upload-azure/tasks/main.yml deleted file mode 100644 index 785dc1a455..0000000000 --- a/ansible/roles/artifacts-upload-azure/tasks/main.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Ensure azure blob storage container exists - command: az storage container create --name {{ artifacts_container }} - -- name: Upload to azure blob storage - command: az storage blob upload -c {{ artifacts_container }} --name {{ artifact }} -f {{ artifact_path }} - async: 3600 - poll: 10 diff --git a/ansible/roles/assets-upload-azure/defaults/main.yml b/ansible/roles/assets-upload-azure/defaults/main.yml deleted file mode 100644 index 13cc322514..0000000000 --- a/ansible/roles/assets-upload-azure/defaults/main.yml +++ /dev/null @@ -1,5 +0,0 @@ -delete: True -player_cdn_container: -player_cdn_account: -player_cdn_account_key: -assets: diff --git a/ansible/roles/assets-upload-azure/tasks/main.yml b/ansible/roles/assets-upload-azure/tasks/main.yml deleted file mode 100755 index 520641f5e3..0000000000 --- a/ansible/roles/assets-upload-azure/tasks/main.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- name: Deleting container before Uploding assets - command: az storage blob delete-batch -s {{player_cdn_container}} - environment: - AZURE_STORAGE_ACCOUNT: "{{sunbird_public_storage_account_name}}" - AZURE_STORAGE_SAS_TOKEN: "{{sunbird_public_storage_account_sas}}" - async: 3600 - poll: 10 - -- name: Ensure azure blob storage container exists - command: az storage container create --name {{player_cdn_container}} - ignore_errors: true - environment: - AZURE_STORAGE_ACCOUNT: "{{sunbird_public_storage_account_name}}" - AZURE_STORAGE_SAS_TOKEN: "{{sunbird_public_storage_account_sas}}" - - -# Upload the assets created by the job to azure -- name: Upload to azure blob storage - command: az storage blob upload-batch -d {{player_cdn_container}} -s {{assets}} - environment: - AZURE_STORAGE_ACCOUNT: "{{sunbird_public_storage_account_name}}" - AZURE_STORAGE_SAS_TOKEN: "{{sunbird_public_storage_account_sas}}" - async: 3600 - poll: 10 diff --git a/ansible/roles/azure-cli/tasks/main.yml b/ansible/roles/azure-cli/tasks/main.yml index 0a1148f2c4..0374f6a0ec 100644 --- a/ansible/roles/azure-cli/tasks/main.yml +++ b/ansible/roles/azure-cli/tasks/main.yml @@ -1,16 +1,16 @@ --- - name: Add Microsfot signing key - ansible.builtin.apt_key: + apt_key: url: https://packages.microsoft.com/keys/microsoft.asc state: present - name: Add Microsfot repository into sources list - ansible.builtin.apt_repository: + apt_repository: repo: "deb [arch=amd64] https://packages.microsoft.com/repos/azure-cli/ {{ ansible_distribution_release | lower }} main" state: present - name: Install azue cli and dependent packages - ansible.builtin.apt: + apt: pkg: - ca-certificates - curl diff --git a/ansible/roles/azure-cloud-storage/defaults/main.yml b/ansible/roles/azure-cloud-storage/defaults/main.yml new file mode 100644 index 0000000000..0e4e45bf95 --- /dev/null +++ b/ansible/roles/azure-cloud-storage/defaults/main.yml @@ -0,0 +1,67 @@ +# The name of the blob container in the azure storage account +# Example - +# blob_container_name: "my-container" +blob_container_name: "" + +# The delete pattern to delete files and folder +# Example - +# blob_delete_pattern: "my-drectory/*" +# blob_delete_pattern: "my-drectory/another-directory/*" +# blob_delete_pattern: "*" +blob_delete_pattern: "" + +# The storage account name +# Example - +# storage_account_name: "sunbird-dev-public" +storage_account_name: "" + +# The storage account key +# Example - +# storage_account_name: "cmFuZG9tcmFuZG9tcmFuZG9tcmFuZG9tCg==" +storage_account_key: "" + +# The path to local file which has to be uploaded to azure storage +# The local path to store the file after downloading from azure storage +# Example - +# local_file_or_folder_path: "/workspace/my-folder/myfile.json" +# local_file_or_folder_path: "/workspace/my-folder" +local_file_or_folder_path: "" + +# The name of the file in azure storage after uploading from local +# The name of the file in azure storage that has to be downloaded +# Example - +# blob_file_name: "myfile-blob.json" +# You can also pass folder path in order to upload / download the file from a speciic folder +# blob_file_name "my-folder/my-file.json" +blob_file_name: "" + +# The storage account sas token +# Example - +# storage_account_sas_token: "?sv=2022-01-01&ss=abc&srt=rws%3D" +storage_account_sas_token: "" + +# The folder path in azure storage to upload the files starting from the root of the container +# This path should alwasy start with a slash / as we are going to append this value as shown in below example +# Example - +# blob_container_name: "my-container" +# blob_container_folder_path: "/my-folder-path" +# {{ blob_container_name }}{{ blob_container_folder_path }} +# The above translates to "my-container/my-folder-path" + +# The variable can also be empty as shown below, which means we will upload directly at the root path of the container +# Example - +# blob_container_name: "my-container" +# blob_container_folder_path: "" +# The above translates to "my-container" +blob_container_folder_path: "" + +# At what access level the container should be created +# Example - +# container_public_access: "off" +# container_public_access: "blob" +# container_public_access: "container" +# Allowed values are - off, blob, container +# This variable affects only new containers and has no affect on a container if it already exists +# If the container already exists, the access level will not be changed +# You will need to change the access level from Azure portal or using az storage container set-permission command +container_public_access: "" \ No newline at end of file diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml b/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml new file mode 100644 index 0000000000..4e8ad68a2d --- /dev/null +++ b/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml @@ -0,0 +1,5 @@ +--- +- name: delete files and folders from a blob container recursively + shell: "az storage blob delete-batch --source {{ blob_container_name }} --pattern '{{ blob_delete_pattern }}' --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" + async: 3600 + poll: 10 \ No newline at end of file diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-download.yml b/ansible/roles/azure-cloud-storage/tasks/blob-download.yml new file mode 100644 index 0000000000..3bbf4b607a --- /dev/null +++ b/ansible/roles/azure-cloud-storage/tasks/blob-download.yml @@ -0,0 +1,5 @@ +--- +- name: download a file from azure storage + shell: "az storage blob download --container-name {{ blob_container_name }} --file {{ local_file_or_folder_path }} --name {{ blob_file_name }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" + async: 3600 + poll: 10 \ No newline at end of file diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml b/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml new file mode 100644 index 0000000000..3043da46cc --- /dev/null +++ b/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml @@ -0,0 +1,10 @@ +--- +- name: create container in azure storage if it doesn't exist + include_role: + name: azure-cloud-storage + tasks_from: container-create.yml + +- name: upload files and folders from a local directory to azure storage container + shell: "az storage blob upload-batch --destination {{ blob_container_name }}{{ blob_container_folder_path }} --source {{ local_file_or_folder_path }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" + async: 3600 + poll: 10 \ No newline at end of file diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-upload.yml b/ansible/roles/azure-cloud-storage/tasks/blob-upload.yml new file mode 100644 index 0000000000..4b493ffb73 --- /dev/null +++ b/ansible/roles/azure-cloud-storage/tasks/blob-upload.yml @@ -0,0 +1,10 @@ +--- +- name: create container in azure storage if it doesn't exist + include_role: + name: azure-cloud-storage + tasks_from: container-create.yml + +- name: upload file to azure storage container + shell: "az storage blob upload --container-name {{ blob_container_name }} --file {{ local_file_or_folder_path }} --name {{ blob_file_name }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" + async: 3600 + poll: 10 \ No newline at end of file diff --git a/ansible/roles/azure-cloud-storage/tasks/container-create.yml b/ansible/roles/azure-cloud-storage/tasks/container-create.yml new file mode 100644 index 0000000000..419510cc19 --- /dev/null +++ b/ansible/roles/azure-cloud-storage/tasks/container-create.yml @@ -0,0 +1,8 @@ +--- +- name: create container in azure storage if it doesn't exist + shell: "az storage container create --name {{ blob_container_name }} --public-access {{ container_public_access }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" + when: storage_account_key | length > 0 + +- name: create container in azure storage if it doesn't exist + shell: "az storage container create --name {{ blob_container_name }} --public-access {{ container_public_access }} --account-name {{ storage_account_name }} --sas-token '{{ storage_account_sas_token }}'" + when: storage_account_sas_token | length > 0 \ No newline at end of file diff --git a/ansible/roles/azure-cloud-storage/tasks/delete-using-azcopy.yml b/ansible/roles/azure-cloud-storage/tasks/delete-using-azcopy.yml new file mode 100644 index 0000000000..236169e86c --- /dev/null +++ b/ansible/roles/azure-cloud-storage/tasks/delete-using-azcopy.yml @@ -0,0 +1,7 @@ +--- +- name: delete files and folders from azure storage using azcopy + shell: "azcopy rm 'https://{{ storage_account_name }}.blob.core.windows.net/{{ blob_container_name }}{{ blob_container_folder_path }}{{ storage_account_sas_token }}' --recursive" + environment: + AZCOPY_CONCURRENT_FILES: "10" + async: 10800 + poll: 10 diff --git a/ansible/roles/azure-cloud-storage/tasks/main.yml b/ansible/roles/azure-cloud-storage/tasks/main.yml new file mode 100644 index 0000000000..eb435ecfe2 --- /dev/null +++ b/ansible/roles/azure-cloud-storage/tasks/main.yml @@ -0,0 +1,21 @@ +--- +- name: delete files and folders from azure storage container recursively + include: blob-delete-batch.yml + +- name: download a file from azure storage + include: blob-download.yml + +- name: upload files and folders from a local directory to azure storage container + include: blob-upload-batch.yml + +- name: upload file to azure storage container + include: blob-upload.yml + +- name: create container in azure storage if it doesn't exist + include: container-create.yml + +- name: delete files and folders from azure storage using azcopy + include: delete-using-azcopy.yml + +- name: upload files and folders to azure storage using azcopy + include: upload-using-azcopy.yml diff --git a/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml b/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml new file mode 100644 index 0000000000..99ab3c2bf8 --- /dev/null +++ b/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml @@ -0,0 +1,12 @@ +--- +- name: create container in azure storage if it doesn't exist + include_role: + name: azure-cloud-storage + tasks_from: container-create.yml + +- name: upload files and folders to azure storage using azcopy + shell: "azcopy copy {{ local_file_or_folder_path }} 'https://{{ storage_account_name }}.blob.core.windows.net/{{ blob_container_name }}{{ blob_container_folder_path }}{{ storage_account_sas_token }}' --recursive" + environment: + AZCOPY_CONCURRENT_FILES: "10" + async: 10800 + poll: 10 \ No newline at end of file diff --git a/ansible/roles/blob-batch-delete-azure/tasks/main.yml b/ansible/roles/blob-batch-delete-azure/tasks/main.yml deleted file mode 100755 index 4d84085ed5..0000000000 --- a/ansible/roles/blob-batch-delete-azure/tasks/main.yml +++ /dev/null @@ -1,8 +0,0 @@ -# Delete the assets in container -- name: Upload to azure blob storage - command: az storage blob delete-batch -s {{container}} --pattern {{blob_pattern}} --dryrun - environment: - AZURE_STORAGE_ACCOUNT: "{{blob_account}}" - AZURE_STORAGE_KEY: "{{blob_account_key}}" - async: 60 - poll: 10 diff --git a/ansible/roles/cassandra-backup/meta/main.yml b/ansible/roles/cassandra-backup/meta/main.yml deleted file mode 100644 index 23b18a800a..0000000000 --- a/ansible/roles/cassandra-backup/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - azure-cli \ No newline at end of file diff --git a/ansible/roles/cassandra-backup/tasks/main.yml b/ansible/roles/cassandra-backup/tasks/main.yml index a6611c8ebf..d6365315d6 100755 --- a/ansible/roles/cassandra-backup/tasks/main.yml +++ b/ansible/roles/cassandra-backup/tasks/main.yml @@ -3,11 +3,11 @@ ignore_errors: true - name: Create the directory - become: yes + become: true file: path=/data/cassandra/backup state=directory recurse=yes - name: copy the backup script - become: yes + become: true template: src: ../../../../deploy/cassandra_backup.py dest: /data/cassandra/backup/cassandra_backup.py @@ -17,7 +17,7 @@ cassandra_backup_gzip_file_name: "cassandra-backup-{{ lookup('pipe', 'date +%Y%m%d') }}-{{ ansible_hostname }}-new" - name: run the backup script - become: yes + become: true shell: python3 cassandra_backup.py --snapshotname "{{ cassandra_backup_gzip_file_name }}" --snapshotdirectory "{{ cassandra_backup_gzip_file_name }}" "{{additional_arguments|d('')}}" args: chdir: /data/cassandra/backup @@ -32,21 +32,17 @@ debug: var: doc_data -- name: Ensure azure blob storage container exists - command: az storage container create --name {{ cassandra_backup_azure_container_name }} - ignore_errors: true - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" - -- name: Upload to azure blob storage - command: "azcopy copy {{cassandra_backup_gzip_file_name}} 'https://{{sunbird_management_storage_account_name}}.blob.core.windows.net/{{cassandra_backup_azure_container_name}}{{sunbird_management_storage_account_sas}}' --recursive" - environment: - AZCOPY_CONCURRENT_FILES: 10 # How many files azcopy should read concurrently. - args: - chdir: /data/cassandra/backup - async: 10800 - poll: 10 - +- name: upload file to azure storage using azcopy + include_role: + name: azure-cloud-storage + tasks_from: upload-using-azcopy.yml + vars: + blob_container_name: "{{ cassandra_backup_azure_container_name }}" + container_public_access: "off" + local_file_or_folder_path: "/data/cassandra/backup/{{ cassandra_backup_gzip_file_name }}" + storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_sas_token: "{{ azure_management_storage_account_sas }}" + when: cloud_service_provider == "azure" + - name: clean up backup dir after upload file: path="{{ cassandra_backup_dir }}" state=absent diff --git a/ansible/roles/cassandra-restore/meta/main.yml b/ansible/roles/cassandra-restore/meta/main.yml deleted file mode 100644 index 23b18a800a..0000000000 --- a/ansible/roles/cassandra-restore/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - azure-cli \ No newline at end of file diff --git a/ansible/roles/cassandra-restore/tasks/main.yml b/ansible/roles/cassandra-restore/tasks/main.yml index 6dcb7d97df..80b8f86863 100755 --- a/ansible/roles/cassandra-restore/tasks/main.yml +++ b/ansible/roles/cassandra-restore/tasks/main.yml @@ -1,52 +1,53 @@ - name: Stop the cassandra - become: yes + become: true service: name=cassandra state=stopped - set_fact: cassandra_restore_gzip_file_path: "{{ cassandra_restore_dir }}/{{ cassandra_restore_gzip_file_name }}" - -- name: Download to azure blob storage - command: az storage blob download --name {{ cassandra_restore_gzip_file_name }} --file {{ cassandra_restore_gzip_file_path }} --container-name {{ cassandra_backup_azure_container_name }} - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" - async: 3600 - poll: 10 - + +- name: download a file from azure storage + become: true + include_role: + name: azure-cloud-storage + tasks_from: blob-download.yml + vars: + blob_container_name: "{{ cassandra_backup_azure_container_name }}" + blob_file_name: "{{ cassandra_restore_gzip_file_name }}" + local_file_or_folder_path: "{{ cassandra_restore_gzip_file_path }}" + storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_key: "{{ azure_management_storage_account_key }}" + when: cloud_service_provider == "azure" + - name: unarchieve restore artifact - become: yes + become: true unarchive: src={{user_home}}/{{ cassandra_restore_gzip_file_name }} dest={{user_home}}/ copy=no - name: Remove the restroe artefact - become: yes + become: true file: path={{user_home}}/cassandra* state=absent - name: Remove the old data - become: yes + become: true file: path=/var/lib/cassandra/data/sunbird state=absent - name: Replace the new data - become: yes + become: true command: mv {{user_home}}/data/sunbird /var/lib/cassandra/data/ - - name: remove data - become: yes + become: true file: path: "/home/{{ ansible_ssh_user }}/data" state: absent - name: change the permissions - become: yes + become: true file: path=/var/lib/cassandra/data owner=cassandra group=cassandra recurse=yes - name: copy the backup script - become: yes + become: true template: src=nodetool.j2 dest={{user_home}}/nodetool.sh mode=0755 - - name: Start the cassandra - become: yes + become: true service: name=cassandra state=started - - diff --git a/ansible/roles/cert-file-upload/defaults/main.yml b/ansible/roles/cert-file-upload/defaults/main.yml deleted file mode 100644 index b74bef14c9..0000000000 --- a/ansible/roles/cert-file-upload/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -file_name: "certUploadedfile" -cert_template_artifacts_container_name: "e-credentials" diff --git a/ansible/roles/cert-file-upload/tasks/main.yml b/ansible/roles/cert-file-upload/tasks/main.yml deleted file mode 100644 index 9fcaf6a679..0000000000 --- a/ansible/roles/cert-file-upload/tasks/main.yml +++ /dev/null @@ -1,7 +0,0 @@ -- name: rename the file to the correct name as required in blobstore - command: "mv /tmp/certUploadedfile /tmp/{{file_name}}" - -- name: Upload to azure blob storage - command: "az storage blob upload -c {{ cert_template_artifacts_container_name }} --name {{ file_name }} -f /tmp/{{file_name}}" - async: 3600 - poll: 10 diff --git a/ansible/roles/cert-templates/tasks/main.yml b/ansible/roles/cert-templates/tasks/main.yml index b41aaef0a8..321a91f139 100644 --- a/ansible/roles/cert-templates/tasks/main.yml +++ b/ansible/roles/cert-templates/tasks/main.yml @@ -31,21 +31,18 @@ chdir: "{{cert_location}}/cert-templates/certUtilScripts/" when: createPublicKey is defined -- name: Ensure azure blob storage container exists - command: az storage container create --name {{cert_service_container_name}} --public-access off - environment: - AZURE_STORAGE_ACCOUNT: "{{sunbird_private_storage_account_name}}" - AZURE_STORAGE_KEY: "{{sunbird_private_storage_account_key}}" - -- name: Upload to azure blob storage - command: az storage blob upload-batch --destination {{cert_service_container_name}} --source "out" - args: - chdir: "{{cert_location}}/cert-templates/certUtilScripts/" - environment: - AZURE_STORAGE_ACCOUNT: "{{sunbird_private_storage_account_name}}" - AZURE_STORAGE_KEY: "{{sunbird_private_storage_account_key}}" - async: 60 - poll: 10 +- name: upload batch of files to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + blob_container_name: "{{ cert_service_container_name }}" + container_public_access: "off" + blob_container_folder_path: "" + local_file_or_folder_path: "{{ cert_location }}/cert-templates/certUtilScripts/out" + storage_account_name: "{{ azure_private_storage_account_name }}" + storage_account_key: "{{ azure_private_storage_account_key }}" + when: cloud_service_provider == "azure" - name: list all the files shell: "ls -lR {{cert_location}}" diff --git a/ansible/roles/desktop-deploy/tasks/main.yml b/ansible/roles/desktop-deploy/tasks/main.yml index 0d301fbed0..963189ad4d 100644 --- a/ansible/roles/desktop-deploy/tasks/main.yml +++ b/ansible/roles/desktop-deploy/tasks/main.yml @@ -49,28 +49,28 @@ - name: run the installer script shell: "bash -x {{offline_repo_location}}/build.sh" -- name: Ensure azure blob storage container exists - command: az storage container create --name {{offline_installer_container_name}} --public-access blob - environment: - AZURE_STORAGE_ACCOUNT: "{{sunbird_public_storage_account_name}}" - AZURE_STORAGE_KEY: "{{sunbird_public_storage_account_key}}" +- name: this block consists of tasks related to azure storage + block: + - name: set common azure variables + set_fact: + blob_container_name: "{{ offline_installer_container_name }}" + container_public_access: "blob" + storage_account_name: "{{ azure_public_storage_account_name }}" + storage_account_key: "{{ azure_public_storage_account_key }}" -- name: Upload to azure blob storage - command: az storage blob upload-batch --destination {{offline_installer_container_name}} --source "desktop_uploader_assets" - args: - chdir: "{{offline_repo_location}}/" - environment: - AZURE_STORAGE_ACCOUNT: "{{sunbird_public_storage_account_name}}" - AZURE_STORAGE_KEY: "{{sunbird_public_storage_account_key}}" - async: 60 - poll: 10 - -- name: Upload to latest.json file to blob - command: az storage blob upload-batch --destination "{{offline_installer_container_name}}/latest" --source "{{offline_repo_location}}/desktop_uploader_assets/{{time}}/" - args: - chdir: "{{offline_repo_location}}/" - environment: - AZURE_STORAGE_ACCOUNT: "{{sunbird_public_storage_account_name}}" - AZURE_STORAGE_KEY: "{{sunbird_public_storage_account_key}}" - async: 60 - poll: 10 + - name: upload batch of files to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + blob_container_folder_path: "" + local_file_or_folder_path: "{{ offline_repo_location }}/desktop_uploader_assets" + + - name: upload batch of files to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + blob_container_folder_path: "/latest" + local_file_or_folder_path: "{{ offline_repo_location }}/desktop_uploader_assets/{{ time }}/" + when: cloud_service_provider == "azure" \ No newline at end of file diff --git a/ansible/roles/es-azure-snapshot/defaults/main.yml b/ansible/roles/es-azure-snapshot/defaults/main.yml index 99787b0f89..9072442956 100644 --- a/ansible/roles/es-azure-snapshot/defaults/main.yml +++ b/ansible/roles/es-azure-snapshot/defaults/main.yml @@ -10,4 +10,4 @@ snapshot_create_request_body: { es_snapshot_host: "localhost" snapshot_base_path: "default" -es_azure_backup_folder_name: "elasticsearch-snapshots" \ No newline at end of file +es_azure_backup_container_name: "elasticsearch-snapshots" \ No newline at end of file diff --git a/ansible/roles/es-azure-snapshot/tasks/main.yml b/ansible/roles/es-azure-snapshot/tasks/main.yml index d2880013d5..2b7f23e576 100644 --- a/ansible/roles/es-azure-snapshot/tasks/main.yml +++ b/ansible/roles/es-azure-snapshot/tasks/main.yml @@ -4,11 +4,15 @@ - set_fact: snapshot_number="snapshot_{{ lookup('pipe','date +%s') }}" -- name: Ensure backup folder exists in azure blob - shell: "az storage container create --name {{ es_azure_backup_folder_name }}" - environment: - AZURE_STORAGE_ACCOUNT: "{{ azure_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ azure_management_storage_account_key }}" +- name: create container in azure storage if it doesn't exist + include_role: + name: azure-cloud-storage + tasks_from: container-create.yml + vars: + blob_container_name: "{{ es_azure_backup_container_name }}" + container_public_access: "off" + storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_key: "{{ azure_management_storage_account_key }}" - name: Create Azure Repository uri: diff --git a/ansible/roles/grafana-backup/meta/main.yml b/ansible/roles/grafana-backup/meta/main.yml deleted file mode 100644 index a124d4f7cb..0000000000 --- a/ansible/roles/grafana-backup/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - azure-cli diff --git a/ansible/roles/grafana-backup/tasks/main.yml b/ansible/roles/grafana-backup/tasks/main.yml index 62d4c4dfe2..786bd13442 100644 --- a/ansible/roles/grafana-backup/tasks/main.yml +++ b/ansible/roles/grafana-backup/tasks/main.yml @@ -19,14 +19,18 @@ async: 3600 poll: 10 -- name: Ensure azure blob storage container exists - command: az storage container create --name {{ grafana_backup_azure_container_name }} - ignore_errors: true - -- name: Upload to azure blob storage - command: az storage blob upload --name {{ grafana_backup_gzip_file_name }} --file {{ grafana_backup_gzip_file_path }} --container-name {{ grafana_backup_azure_container_name }} - async: 5000 - poll: 10 +- name: upload file to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload.yml + vars: + blob_container_name: "{{ grafana_backup_azure_container_name }}" + container_public_access: "off" + blob_file_name: "{{ grafana_backup_gzip_file_name }}" + local_file_or_folder_path: "{{ grafana_backup_gzip_file_path }}" + storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_key: "{{ azure_management_storage_account_key }}" + when: cloud_service_provider == "azure" - name: clean up backup dir after upload file: path="{{ grafana_backup_dir }}" state=absent diff --git a/ansible/roles/jenkins-backup-upload/tasks/main.yml b/ansible/roles/jenkins-backup-upload/tasks/main.yml index f5ec21c7bb..e430e57c1c 100644 --- a/ansible/roles/jenkins-backup-upload/tasks/main.yml +++ b/ansible/roles/jenkins-backup-upload/tasks/main.yml @@ -12,11 +12,15 @@ - name: Create archive of backup directory archive: path="{{ jenkins_backup_base_dir }}/{{ LATEST_BACKUP_DIR.stdout }}" dest="/tmp/{{ LATEST_BACKUP_DIR.stdout }}.zip" format=zip -- name: Ensure azure blob storage container exists - command: az storage container create --name {{ jenkins_backup_azure_container_name }} - -- name: Upload to azure blob storage - command: az storage blob upload -c {{ jenkins_backup_azure_container_name }} --name "{{ LATEST_BACKUP_DIR.stdout }}.zip" -f "/tmp/{{ LATEST_BACKUP_DIR.stdout }}.zip" - async: 3600 - poll: 10 - +- name: upload file to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload.yml + vars: + blob_container_name: "{{ jenkins_backup_azure_container_name }}" + container_public_access: "off" + blob_file_name: "{{ LATEST_BACKUP_DIR.stdout }}.zip" + local_file_or_folder_path: "/tmp/{{ LATEST_BACKUP_DIR.stdout }}.zip" + storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_key: "{{ azure_management_storage_account_key }}" + when: cloud_service_provider == "azure" \ No newline at end of file diff --git a/ansible/roles/mongodb-backup/meta/main.yml b/ansible/roles/mongodb-backup/meta/main.yml deleted file mode 100644 index a124d4f7cb..0000000000 --- a/ansible/roles/mongodb-backup/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - azure-cli diff --git a/ansible/roles/mongodb-backup/tasks/main.yml b/ansible/roles/mongodb-backup/tasks/main.yml index 219ced55ea..4235e52c32 100644 --- a/ansible/roles/mongodb-backup/tasks/main.yml +++ b/ansible/roles/mongodb-backup/tasks/main.yml @@ -1,3 +1,4 @@ +--- - name: Create the directory file: path={{ mongo_backup_dir }} state=directory recurse=yes @@ -13,13 +14,18 @@ - name: Compress the backup file shell: "tar -czf {{ mongo_backup_file_path }}.tar.gz {{ mongo_backup_file_path }}" -- name: upload to azure +- name: upload file to azure storage include_role: - name: artifacts-upload-azure + name: azure-cloud-storage + tasks_from: blob-upload.yml vars: - artifact: "{{ mongo_backup_file_name }}.tar.gz" - artifact_path: "{{ mongo_backup_file_path }}.tar.gz" - artifacts_container: "{{ mongo_backup_azure_container_name }}" + blob_container_name: "{{ mongo_backup_azure_container_name }}" + container_public_access: "off" + blob_file_name: "{{ mongo_backup_file_name }}.tar.gz" + local_file_or_folder_path: "{{ mongo_backup_file_path }}.tar.gz" + storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_key: "{{ azure_management_storage_account_key }}" + when: cloud_service_provider == "azure" - name: clean up backup dir after upload file: path={{ mongo_backup_dir }} state=absent diff --git a/ansible/roles/offline-installer/tasks/main.yml b/ansible/roles/offline-installer/tasks/main.yml index 31bc47f627..e110fd3116 100644 --- a/ansible/roles/offline-installer/tasks/main.yml +++ b/ansible/roles/offline-installer/tasks/main.yml @@ -41,7 +41,7 @@ when: uploadInstaller is not defined - name: upload to azure - include: uploadToAzure.yml + include: upload_to_storage.yml when: uploadInstaller is defined - name: Delete offline installer folder if any issue diff --git a/ansible/roles/offline-installer/tasks/uploadToAzure.yml b/ansible/roles/offline-installer/tasks/upload_to_storage.yml similarity index 67% rename from ansible/roles/offline-installer/tasks/uploadToAzure.yml rename to ansible/roles/offline-installer/tasks/upload_to_storage.yml index 62d4378d10..3c62d6fc03 100644 --- a/ansible/roles/offline-installer/tasks/uploadToAzure.yml +++ b/ansible/roles/offline-installer/tasks/upload_to_storage.yml @@ -1,9 +1,4 @@ -- name: Ensure azure blob storage container exists - command: az storage container create --name {{offline_installer_container_name}} --public-access blob - environment: - AZURE_STORAGE_ACCOUNT: "{{sunbird_public_storage_account_name}}" - AZURE_STORAGE_KEY: "{{sunbird_public_storage_account_key}}" - +--- - name: Get the environment name for the artifact name shell: "cat {{offline_repo_location}}/offline-installer-repo/src/package.json | jq -r '.name'" register: env_name @@ -56,25 +51,31 @@ - artifacts.sh - metadata.sh -- name: Upload to azure blob storage - command: az storage blob upload-batch --destination {{offline_installer_container_name}} --source "offline_artifacts" - args: - chdir: "{{offline_repo_location}}/offline-installer-repo/" - environment: - AZURE_STORAGE_ACCOUNT: "{{sunbird_public_storage_account_name}}" - AZURE_STORAGE_KEY: "{{sunbird_public_storage_account_key}}" - async: 60 - poll: 10 - -- name: Upload to latest.json file to blob - command: az storage blob upload-batch --destination "{{offline_installer_container_name}}/latest" --source "{{folderName.stdout}}" - args: - chdir: "{{offline_repo_location}}/offline-installer-repo/offline_artifacts/" - environment: - AZURE_STORAGE_ACCOUNT: "{{sunbird_public_storage_account_name}}" - AZURE_STORAGE_KEY: "{{sunbird_public_storage_account_key}}" - async: 60 - poll: 10 +- name: this block consists of tasks related to azure storage + block: + - name: set common azure variables + set_fact: + blob_container_name: "{{ offline_installer_container_name }}" + container_public_access: "blob" + storage_account_name: "{{ azure_public_storage_account_name }}" + storage_account_key: "{{ azure_public_storage_account_key }}" + + - name: upload batch of files to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + blob_container_folder_path: "" + local_file_or_folder_path: "{{ offline_repo_location }}/offline-installer-repo/offline_artifacts" + + - name: upload batch of files to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + blob_container_folder_path: "/latest" + local_file_or_folder_path: "{{ offline_repo_location }}/offline-installer-repo/offline_artifacts/{{ folderName.stdout }}" + when: cloud_service_provider == "azure" - name: Create a zip of the folder to archieve the artifact archive: diff --git a/ansible/roles/postgres-azure-managed-service-backup/meta/main.yml b/ansible/roles/postgres-azure-managed-service-backup/meta/main.yml deleted file mode 100644 index 5927f82724..0000000000 --- a/ansible/roles/postgres-azure-managed-service-backup/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - #- azure-cli \ No newline at end of file diff --git a/ansible/roles/postgres-azure-managed-service-backup/tasks/main.yml b/ansible/roles/postgres-azure-managed-service-backup/tasks/main.yml index cc5ede8e0e..a64f3639af 100644 --- a/ansible/roles/postgres-azure-managed-service-backup/tasks/main.yml +++ b/ansible/roles/postgres-azure-managed-service-backup/tasks/main.yml @@ -11,7 +11,6 @@ - set_fact: postgresql_backup_gzip_file_path: "{{ postgresql_backup_dir }}/{{ postgresql_backup_gzip_file_name }}.zip" - - name: Dump an existing database to a file postgresql_db: login_user: "{{ sunbird_pg_user }}" @@ -42,20 +41,18 @@ async: 500 poll: 10 -- name: Ensure azure blob storage container exists - command: az storage container create --name {{ postgresql_backup_azure_container_name }} - ignore_errors: true - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" - -- name: Upload to azure blob storage - command: az storage blob upload --name {{ postgresql_backup_gzip_file_name }}.zip --file {{ postgresql_backup_gzip_file_path }} --container-name {{ postgresql_backup_azure_container_name }} - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" - async: 3600 - poll: 10 +- name: upload file to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload.yml + vars: + blob_container_name: "{{ postgresql_backup_azure_container_name }}" + container_public_access: "off" + blob_file_name: "{{ postgresql_backup_gzip_file_name }}.zip" + local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" + storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_key: "{{ azure_management_storage_account_key }}" + when: cloud_service_provider == "azure" - name: clean up backup dir after upload file: path="{{ postgresql_backup_dir }}" state=absent diff --git a/ansible/roles/postgres-azure-managed-service-restore/tasks/main.yml b/ansible/roles/postgres-azure-managed-service-restore/tasks/main.yml index ba413e7943..135c29280c 100644 --- a/ansible/roles/postgres-azure-managed-service-restore/tasks/main.yml +++ b/ansible/roles/postgres-azure-managed-service-restore/tasks/main.yml @@ -9,12 +9,17 @@ - set_fact: postgres_backup_filepath: "{{ postgresql_restore_dir }}/{{ postgres_backup_filename }}" -- name: Download backup from azure - command: az storage blob download -c {{ postgres_backup_azure_container_name }} --name {{ postgres_backup_filename }} -f {{ postgres_backup_filepath }} - args: - chdir: "{{ postgres_restore_dir }}" - async: 100 - poll: 10 +- name: download a file from azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-download.yml + vars: + blob_container_name: "{{ postgres_backup_azure_container_name }}" + blob_file_name: "{{ postgres_backup_filename }}" + local_file_or_folder_path: "{{ postgres_restore_dir }}/{{ postgres_backup_filepath }}" + storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_key: "{{ azure_management_storage_account_key }}" + when: cloud_service_provider == "azure" - name: unarchive artifact unarchive: src={{ postgresql_restore_dir }}/{{ postgres_backup_filename }} dest={{ postgresql_restore_dir }}/ copy=no diff --git a/ansible/roles/postgresql-backup/meta/main.yml b/ansible/roles/postgresql-backup/meta/main.yml deleted file mode 100644 index 23b18a800a..0000000000 --- a/ansible/roles/postgresql-backup/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - azure-cli \ No newline at end of file diff --git a/ansible/roles/postgresql-backup/tasks/main.yml b/ansible/roles/postgresql-backup/tasks/main.yml index c71f15510f..2e25619a10 100644 --- a/ansible/roles/postgresql-backup/tasks/main.yml +++ b/ansible/roles/postgresql-backup/tasks/main.yml @@ -13,20 +13,18 @@ async: 3600 poll: 10 -- name: Ensure azure blob storage container exists - command: az storage container create --name {{ postgresql_backup_azure_container_name }} - ignore_errors: true - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" - -- name: Upload to azure blob storage - command: az storage blob upload --name {{ postgresql_backup_gzip_file_name }} --file {{ postgresql_backup_gzip_file_path }} --container-name {{ postgresql_backup_azure_container_name }} - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" - async: 3600 - poll: 10 +- name: upload file to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload.yml + vars: + blob_container_name: "{{ postgresql_backup_azure_container_name }}" + container_public_access: "off" + blob_file_name: "{{ postgresql_backup_gzip_file_name }}" + local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" + storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_key: "{{ azure_management_storage_account_key }}" + when: cloud_service_provider == "azure" - name: clean up backup dir after upload - file: path="{{ postgresql_backup_dir }}" state=absent + file: path="{{ postgresql_backup_dir }}" state=absent \ No newline at end of file diff --git a/ansible/roles/postgresql-restore/meta/main.yml b/ansible/roles/postgresql-restore/meta/main.yml deleted file mode 100644 index 23b18a800a..0000000000 --- a/ansible/roles/postgresql-restore/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - azure-cli \ No newline at end of file diff --git a/ansible/roles/postgresql-restore/tasks/main.yml b/ansible/roles/postgresql-restore/tasks/main.yml index 47f9aa0f05..4075baa596 100644 --- a/ansible/roles/postgresql-restore/tasks/main.yml +++ b/ansible/roles/postgresql-restore/tasks/main.yml @@ -4,13 +4,17 @@ - set_fact: postgresql_restore_gzip_file_path: "{{ postgresql_restore_dir }}/{{ postgresql_restore_gzip_file_name }}" -- name: Download restore file from azure - command: az storage blob download --container-name {{ postgresql_restore_azure_container_name }} --name {{ postgresql_restore_gzip_file_name }} --file {{ postgresql_restore_gzip_file_path }} - environment: - AZURE_STORAGE_ACCOUNT: "{{ postgresql_restore_azure_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ postgresql_restore_azure_storage_access_key }}" - async: 3600 - poll: 10 +- name: download a file from azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-download.yml + vars: + blob_container_name: "{{ postgresql_restore_azure_container_name }}" + blob_file_name: "{{ postgresql_restore_gzip_file_name }}" + local_file_or_folder_path: "{{ postgresql_restore_gzip_file_path }}" + storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_key: "{{ azure_management_storage_account_key }}" + when: cloud_service_provider == "azure" - name: ensure postgresql service is stopped service: name=postgresql state=stopped diff --git a/ansible/roles/prometheus-backup-v2/tasks/main.yml b/ansible/roles/prometheus-backup-v2/tasks/main.yml index af34edddad..6286f31ebb 100644 --- a/ansible/roles/prometheus-backup-v2/tasks/main.yml +++ b/ansible/roles/prometheus-backup-v2/tasks/main.yml @@ -16,13 +16,18 @@ path: "{{ prometheus_data_dir }}/snapshots/{{ snapshot_name }}" dest: "/tmp/{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" -- name: upload to azure +- name: upload file to azure storage include_role: - name: artifacts-upload-azure + name: azure-cloud-storage + tasks_from: blob-upload.yml vars: - artifact: "{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" - artifact_path: "/tmp/{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" - artifacts_container: "{{ prometheus_backup_azure_container_name }}" + blob_container_name: "{{ prometheus_backup_azure_container_name }}" + container_public_access: "off" + blob_file_name: "{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" + local_file_or_folder_path: "/tmp/{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" + storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_key: "{{ azure_management_storage_account_key }}" + when: cloud_service_provider == "azure" - name: Deleting snapshot file: diff --git a/ansible/roles/prometheus-backup/meta/main.yml b/ansible/roles/prometheus-backup/meta/main.yml deleted file mode 100644 index bb605fa878..0000000000 --- a/ansible/roles/prometheus-backup/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - azure-cli diff --git a/ansible/roles/prometheus-backup/tasks/main.yml b/ansible/roles/prometheus-backup/tasks/main.yml index 712dd6faf3..1a71443c28 100644 --- a/ansible/roles/prometheus-backup/tasks/main.yml +++ b/ansible/roles/prometheus-backup/tasks/main.yml @@ -25,20 +25,18 @@ shell: "docker service scale monitor_prometheus=1" delegate_to: "{{groups['swarm-bootstrap-manager'][0]}}" -- name: Ensure azure blob storage container exists - command: az storage container create --name {{ prometheus_backup_azure_container_name }} - ignore_errors: true - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" - -- name: Upload to azure blob storage - command: az storage blob upload --name {{ prometheus_backup_gzip_file_name }} --file {{ prometheus_backup_gzip_file_path }} --container-name {{ prometheus_backup_azure_container_name }} - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_management_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_management_storage_account_key }}" - async: 3600 - poll: 10 +- name: upload file to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload.yml + vars: + blob_container_name: "{{ prometheus_backup_azure_container_name }}" + container_public_access: "off" + blob_file_name: "{{ prometheus_backup_gzip_file_name }}" + local_file_or_folder_path: "{{ prometheus_backup_gzip_file_path }}" + storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_key: "{{ azure_management_storage_account_key }}" + when: cloud_service_provider == "azure" - name: clean up backup dir after upload file: path="{{ prometheus_backup_dir }}" state=absent diff --git a/ansible/roles/prometheus-restore/tasks/main.yml b/ansible/roles/prometheus-restore/tasks/main.yml index 686d60e195..9b2a176882 100644 --- a/ansible/roles/prometheus-restore/tasks/main.yml +++ b/ansible/roles/prometheus-restore/tasks/main.yml @@ -1,20 +1,25 @@ - name: ensure backup dir exists file: path="{{ prometheus_backup_dir }}" state=directory -- name: Download backup from azure - command: az storage blob download -c {{ prometheus_backup_azure_container_name }} --name {{ prometheus_backup_filename }} -f {{ prometheus_backup_filepath }} - args: - chdir: "{{ prometheus_backup_dir }}" - async: 100 - poll: 10 +- name: download a file from azure storage + become: true + include_role: + name: azure-cloud-storage + tasks_from: blob-download.yml + vars: + blob_container_name: "{{ prometheus_backup_azure_container_name }}" + blob_file_name: "{{ prometheus_backup_filename }}" + local_file_or_folder_path: "{{ prometheus_backup_dir }}/{{ prometheus_backup_filepath }}" + storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_key: "{{ azure_management_storage_account_key }}" + when: cloud_service_provider == "azure" - name: ensure prometheus is stopped shell: "docker service scale {{prometheus_service_name}}=0 && sleep 10" delegate_to: "{{manager_host}}" #variable is passed as extra vars from jenkins - - name: Unarchive backup - become: yes + become: true unarchive: src: "{{ prometheus_backup_dir }}/{{ prometheus_backup_filename }}" dest: "{{prometheus_data_dir}}/" @@ -29,4 +34,3 @@ - name: clean up backup dir file: path="{{ prometheus_backup_dir }}" state=absent - diff --git a/ansible/roles/redis-backup/meta/main.yml b/ansible/roles/redis-backup/meta/main.yml deleted file mode 100644 index a124d4f7cb..0000000000 --- a/ansible/roles/redis-backup/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - azure-cli diff --git a/ansible/roles/redis-backup/tasks/main.yml b/ansible/roles/redis-backup/tasks/main.yml index 3519bb1ea9..fa621b4d6d 100644 --- a/ansible/roles/redis-backup/tasks/main.yml +++ b/ansible/roles/redis-backup/tasks/main.yml @@ -1,6 +1,5 @@ - name: Create the directory file: path={{ redis_backup_dir }} state=directory recurse=yes - - set_fact: redis_backup_file_name: "redis-backup-{{ lookup('pipe', 'date +%Y-%m-%d-%T') }}.rdb" @@ -8,22 +7,25 @@ - set_fact: redis_backup_file_path: "{{ redis_backup_dir }}/{{ redis_backup_file_name }}" - - name: copy dump.rdb file copy: src: /home/learning/redis-stable/dump.rdb dest: "{{ redis_backup_dir }}/{{ redis_backup_file_name }}" remote_src: yes - -- name: upload to azure +- name: upload file to azure storage include_role: - name: artifacts-upload-azure + name: azure-cloud-storage + tasks_from: blob-upload.yml vars: - artifact: "{{ redis_backup_file_name }}" - artifact_path: "{{ redis_backup_file_path }}" - artifacts_container: "{{ nodebb_redis_backup_azure_container_name }}" - + blob_container_name: "{{ nodebb_redis_backup_azure_container_name }}" + container_public_access: "off" + blob_file_name: "{{ redis_backup_file_name }}" + local_file_or_folder_path: "{{ redis_backup_file_path }}" + storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_key: "{{ azure_management_storage_account_key }}" + when: cloud_service_provider == "azure" + - name: clean up backup dir after upload file: path={{ redis_backup_dir }} state=absent diff --git a/ansible/roles/upload-batch/tasks/main.yml b/ansible/roles/upload-batch/tasks/main.yml deleted file mode 100644 index 1055bdb7f4..0000000000 --- a/ansible/roles/upload-batch/tasks/main.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Ensure azure blob storage container exists - command: az storage container create --name {{ container_name }} --public-access container - -- name: Upload to azure blob storage - command: az storage blob upload-batch --destination {{ destination_path }} --source {{ source_path }} - async: 3600 - poll: 10 diff --git a/ansible/uploadFAQs.yml b/ansible/uploadFAQs.yml index 1cc8fdbe8f..a4da2d4ede 100644 --- a/ansible/uploadFAQs.yml +++ b/ansible/uploadFAQs.yml @@ -1,47 +1,34 @@ - hosts: localhost - become: yes vars_files: - "{{inventory_dir}}/secrets.yml" - vars: - container_name: "{{ blob_container }}" - destination_path: "{{ blob_container }}" - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_public_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_public_storage_account_key }}" tasks: - - name: upload files - include_role: - name: upload-batch - vars: - source_path: "{{ playbook_dir }}/../utils/{{ item }}" - with_items: - - "{{ source_folder.split(',') }}" + - name: template schema files + template: + src: "{{ item }}" + dest: "{{ item }}" + with_fileglob: + - "{{ playbook_dir }}/../utils/sunbird-RC/schema/*.json" tags: - - upload-faqs - + - upload-RC-schema + - hosts: localhost - become: yes vars_files: - "{{inventory_dir}}/secrets.yml" - vars: - container_name: "{{ blob_container }}" - destination_path: "{{ blob_container }}" - environment: - AZURE_STORAGE_ACCOUNT: "{{ sunbird_public_storage_account_name }}" - AZURE_STORAGE_KEY: "{{ sunbird_public_storage_account_key }}" tasks: - - name: template schema files - template: - src: "{{ item }}" - dest: "{{ item }}" - with_fileglob: - - "{{ playbook_dir }}/../utils/sunbird-RC/schema/*.json" - - name: upload files - include_role: - name: upload-batch - vars: - source_path: "{{ playbook_dir }}/../utils/{{ item }}" - with_items: - - "{{ source_folder.split(',') }}" + - name: upload batch of files to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + blob_container_name: "{{ blob_container }}" + container_public_access: "container" + blob_container_folder_path: "" + local_file_or_folder_path: "{{ playbook_dir }}/../utils/{{ item }}" + storage_account_name: "{{ azure_public_storage_account_name }}" + storage_account_key: "{{ azure_public_storage_account_key }}" + with_items: + - "{{ source_folder.split(',') }}" + when: cloud_service_provider == "azure" tags: - - upload-RC-schema + - upload-faqs + - upload-RC-schema diff --git a/pipelines/certs-templates/Jenkinsfile.upload b/pipelines/certs-templates/Jenkinsfile.upload deleted file mode 100644 index 8b0ef0a6bd..0000000000 --- a/pipelines/certs-templates/Jenkinsfile.upload +++ /dev/null @@ -1,61 +0,0 @@ -@Library('deploy-conf') _ -node() { - try { - String ANSI_GREEN = "\u001B[32m" - String ANSI_NORMAL = "\u001B[0m" - String ANSI_BOLD = "\u001B[1m" - String ANSI_RED = "\u001B[31m" - String ANSI_YELLOW = "\u001B[33m" - - stage("upload") { - def inputFile = input message: 'Upload file', parameters: [file(name: 'certUploadedfile')] - new hudson.FilePath(new File("tmp/certUploadedfile")).copyFrom(inputFile) - } - - stage('checkout public repo') { - folder = new File("$WORKSPACE/.git") - if (folder.exists()) - { - println "Found .git folder. Clearing it.." - sh'git clean -fxd' - } - checkout scm - } - - ansiColor('xterm') { - stage('deploy'){ - values = [:] - envDir = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-3].trim() - module = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-2].trim() - jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() - currentWs = sh(returnStdout: true, script: 'pwd').trim() - ansiblePlaybook = "${currentWs}/ansible/cert-file-upload.yml" - ansibleExtraArgs = "--extra-vars \"file_name=${params.file_name}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" - values.put('currentWs', currentWs) - values.put('env', envDir) - values.put('module', module) - values.put('jobName', jobName) - values.put('ansiblePlaybook', ansiblePlaybook) - values.put('ansibleExtraArgs', ansibleExtraArgs) - println values - ansible_playbook_run(values) - currentBuild.result = "SUCCESS" - currentBuild.description = "Artifact: ${values.artifact_version}, Private: ${params.private_branch}, Public: ${params.branch_or_tag}" - } - } - - stage('remove tmp file'){ - sh """ - rm -rf /tmp/certUploadedfile - """ - } - } - catch (err) { - currentBuild.result = "FAILURE" - throw err - } - finally { - slack_notify(currentBuild.result) - email_notify() - } -} diff --git a/pipelines/deploy/org_sunbird_questionunit_quml/Jenkinsfile b/pipelines/deploy/org_sunbird_questionunit_quml/Jenkinsfile index b8173de1a0..9d4521b956 100644 --- a/pipelines/deploy/org_sunbird_questionunit_quml/Jenkinsfile +++ b/pipelines/deploy/org_sunbird_questionunit_quml/Jenkinsfile @@ -34,8 +34,8 @@ node() { chmod a+x content-plugins/az_copy.sh mv content-plugins ansible """ - ansiblePlaybook = "${currentWs}/ansible/plugin.yml" - ansibleExtraArgs = "--tags org_sunbird_questionunit_quml --extra-vars \" plugins_name=${params.plugin_name} source_file=${currentWs}/ansible/content-plugins/${params.plugin_name}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" + ansiblePlaybook = "${currentWs}/ansible/plugins.yml" + ansibleExtraArgs = "--extra-vars \" plugins_name=${params.plugin_name} source_file=${currentWs}/ansible/content-plugins/${params.plugin_name}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('ansiblePlaybook', ansiblePlaybook) values.put('ansibleExtraArgs', ansibleExtraArgs) println values diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index e54937985e..fc439777b4 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -18,15 +18,20 @@ mail_server_username: "apikey" # Email provider userna sunbird_mail_server_from_email: "support@myorg.com" # Email ID that should be as from address in mails # List of mail ids to whome the monitoring alerts should be sent. alerts_mailing_list : "devops@myorg.com" # Comma separated mail list for Alerts; eg: user1@mail.com, user2@mail.com -# Note - You can use the same azure account for the below variables or have separate azure accounts -sunbird_public_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing public data (like contents) -sunbird_private_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing private data (like reports, telemetry data) -sunbird_artifact_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing artifacts data (like jenkins build zip files) -sunbird_management_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing backup data (like cassandra backups) + # Define the below if you are using Azure Cloud # Management Storage Account +# Note - You can use the same azure account for the below variables or have separate azure accounts +sunbird_public_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing public data (like contents) +sunbird_private_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing private data (like reports, telemetry data) +sunbird_management_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing backup data (like cassandra backups) +sunbird_artifact_storage_account_name: "{{ sunbird_management_storage_account_name }}" # Azure account name for storing artifacts data (like jenkins build zip files) + +azure_public_storage_account_name: "{{ sunbird_public_storage_account_name }}" +azure_private_storage_account_name: "{{ sunbird_private_storage_account_name }}" azure_management_storage_account_name: "{{ sunbird_management_storage_account_name }}" +azure_artifact_storage_account_name: "{{ sunbird_artifact_storage_account_name }}" # Define the below if you are using AWS Cloud # Management Storage Bucket diff --git a/private_repo/ansible/inventory/dev/Core/secrets.yml b/private_repo/ansible/inventory/dev/Core/secrets.yml index 798aceb7e0..c373fa8c4f 100644 --- a/private_repo/ansible/inventory/dev/Core/secrets.yml +++ b/private_repo/ansible/inventory/dev/Core/secrets.yml @@ -8,27 +8,33 @@ core_vault_docker_registry_url: "change.docker.url" # for docker hub "https core_vault_docker_registry_user: "change.docker.username" core_vault_docker_registry_password: "change.docker.password" +# Define the below if you are using Azure Cloud +# Management Storage Account # Run the below command in shell # date +'%Y-%m-%dT%H:%m:%SZ' -d '+1 year' # sas_token=?`az storage account generate-sas --account-name "{{ azure_plugin_storage_account_name }}" --account-key "{{ azure_plugin_storage_account_key }}" --expiry $sas_expire_time --https-only --permissions acdlpruw --resource-types sco --services bfqt | xargs` # generate a sas for the blob for entire storage accout with write and read access -artifact_azure_account_sas: "change.azure.storage.account.sas" # SAS token value generated from above command sunbird_artifact_storage_account_sas: "change.azure.storage.account.sas" # SAS token value generated from above command sunbird_public_storage_account_sas: "change.azure.storage.account.sas" # SAS token value generated from above command +sunbird_management_storage_account_sas: "change.azure.storage.account.sas" # SAS token value generated from above command + sunbird_public_storage_account_key: "change.azure.storage.account.key" sunbird_private_storage_account_key: "change.azure.storage.account.key" sunbird_management_storage_account_key: "change.azure.storage.account.key" +sunbird_artifact_storage_account_key: "{{ sunbird_management_storage_account_key }}" -# Define the below if you are using Azure Cloud -# Management Storage Account +azure_public_storage_account_key: "{{ sunbird_public_storage_account_key }}" +azure_private_storage_account_key: "{{ sunbird_private_storage_account_key }}" azure_management_storage_account_key: "{{ sunbird_management_storage_account_key }}" +azure_artifact_storage_account_key: "{{ sunbird_artifact_storage_account_key }}" +azure_public_storage_account_sas: "{{ sunbird_public_storage_account_sas }}" +azure_management_storage_account_sas: "{{ sunbird_management_storage_account_sas }}" # Define the below if you are using AWS Cloud # Management Storage Bucket aws_management_bucket_user_access_key: "" aws_management_bucket_user_secret_key: "" - # Define the below if you are using Google Cloud # Management Storage Bucket gcs_management_bucket_service_account: | From 85f8e5c20c9339cfbb8e0e39d2100cc5763b1807 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Wed, 21 Sep 2022 14:36:30 +0530 Subject: [PATCH 004/616] fix: adding mandatore var cloud_service_provider Signed-off-by: Keshav Prasad --- private_repo/ansible/inventory/dev/Core/common.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index fc439777b4..8277399b44 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -1,6 +1,7 @@ # ------------------------------------------------------------------------------------------------------------ # # Mandatorty variables - DO NOT LEAVE ANYTHING BLANK # # ------------------------------------------------------------------------------------------------------------ # +cloud_service_provider: "" # Your cloud service provider name. Supported values are aws, azure, gcloud domain_name: "" # your domain name like example.com dockerhub: "change.docker.url" # docker hub username or url incase of private registry # This ip should be in the kubenetes subnet range. For example, if your kube cluster is running in `10.0.0.0/24, then it can be 10.0.0.5. Make sure this ip is not allocated to any other things.` From 847f536094008a2e7fef2bf994239a1c2e048ea1 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Fri, 23 Sep 2022 19:20:40 +0530 Subject: [PATCH 005/616] feat: modified vars for generic naming convention Signed-off-by: Keshav Prasad --- ansible/assets-upload.yml | 9 ++++++++- ansible/deploy-plugins.yml | 15 +++++++++++---- ansible/desktop-faq-upload.yml | 9 ++++++++- ansible/dial_upload-schema.yml | 9 ++++++++- ansible/kp_upload-schema.yml | 9 ++++++++- ansible/nodebbui-upload.yml | 11 +++++++++-- ansible/plugins.yml | 7 +++++++ ansible/roles/cassandra-backup/defaults/main.yml | 9 ++++++++- ansible/roles/cassandra-backup/tasks/main.yml | 2 +- ansible/roles/cassandra-restore/defaults/main.yml | 7 +++++++ ansible/roles/cassandra-restore/tasks/main.yml | 2 +- ansible/roles/cert-templates/defaults/main.yml | 7 +++++++ ansible/roles/cert-templates/tasks/main.yml | 2 +- ansible/roles/desktop-deploy/defaults/main.yml | 7 +++++++ ansible/roles/desktop-deploy/tasks/main.yml | 2 +- ansible/roles/es-azure-snapshot/defaults/main.yml | 9 ++++++++- ansible/roles/es-azure-snapshot/tasks/main.yml | 2 +- ansible/roles/es-gcs-snapshot/defaults/main.yml | 4 ++-- ansible/roles/es-s3-snapshot/defaults/main.yml | 4 ++-- ansible/roles/grafana-backup/defaults/main.yml | 7 +++++++ ansible/roles/grafana-backup/tasks/main.yml | 2 +- .../roles/jenkins-backup-upload/defaults/main.yml | 7 +++++++ .../roles/jenkins-backup-upload/tasks/main.yml | 2 +- ansible/roles/mongodb-backup/defaults/main.yml | 7 +++++++ ansible/roles/mongodb-backup/tasks/main.yml | 2 +- ansible/roles/offline-installer/defaults/main.yml | 7 +++++++ .../offline-installer/tasks/upload_to_storage.yml | 2 +- .../defaults/main.yml | 7 +++++++ .../tasks/main.yml | 2 +- .../defaults/main.yml | 7 +++++++ .../tasks/main.yml | 2 +- ansible/roles/postgresql-backup/defaults/main.yml | 7 ++++++- ansible/roles/postgresql-backup/tasks/main.yml | 2 +- .../roles/postgresql-restore/defaults/main.yml | 11 ++++++----- ansible/roles/postgresql-restore/tasks/main.yml | 2 +- .../roles/prometheus-backup-v2/defaults/main.yml | 9 ++++++++- ansible/roles/prometheus-backup-v2/tasks/main.yml | 2 +- ansible/roles/prometheus-backup/defaults/main.yml | 9 ++++++++- ansible/roles/prometheus-backup/tasks/main.yml | 2 +- .../roles/prometheus-restore/defaults/main.yml | 9 ++++++++- ansible/roles/prometheus-restore/tasks/main.yml | 2 +- ansible/roles/redis-backup/defaults/main.yml | 7 +++++++ ansible/roles/redis-backup/tasks/main.yml | 2 +- ansible/uploadFAQs.yml | 2 +- .../jobs/Kubernetes/jobs/UploadFAQs/config.xml | 2 +- .../Sunbird-RC/jobs/Upload_RC_Schema/config.xml | 2 +- pipelines/upload/faqs/Jenkinsfile | 2 +- 47 files changed, 206 insertions(+), 46 deletions(-) diff --git a/ansible/assets-upload.yml b/ansible/assets-upload.yml index 8bc0ac9123..db14234e4a 100644 --- a/ansible/assets-upload.yml +++ b/ansible/assets-upload.yml @@ -2,10 +2,17 @@ - hosts: localhost vars_files: - ['{{inventory_dir}}/secrets.yml', 'secrets/{{env}}.yml'] + # The vars: section is added for the below reason + # 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name + # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo + # or other default files and just assign the value to the newly introduced common variable + # 3. After few releases, we will remove the older variables and use only the new variables across the repos + vars: + player_cdn_storage: "{{ player_cdn_container }}" tasks: - name: set common azure variables set_fact: - blob_container_name: "{{ player_cdn_container }}" + blob_container_name: "{{ player_cdn_storage }}" container_public_access: "container" blob_container_folder_path: "" storage_account_name: "{{ azure_public_storage_account_name }}" diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index 7c4958a5f5..d1a0be8796 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -3,11 +3,18 @@ gather_facts: no vars_files: - "{{inventory_dir}}/secrets.yml" + # The vars: section is added for the below reason + # 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name + # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo + # or other default files and just assign the value to the newly introduced common variable + # 3. After few releases, we will remove the older variables and use only the new variables across the repos + vars: + plugin_storage: "{{ plugin_container_name }}" tasks: - name: rename env_domain in preview_cdn.html for CDN shell: | - echo "{{sunbird_portal_preview_cdn_url}}" - sed -i 's|cdn_url|{{sunbird_portal_preview_cdn_url}}|g' "{{currentws}}"/ansible/preview/preview_cdn.html + echo "{{ sunbird_portal_preview_cdn_url }}" + sed -i 's|cdn_url|{{ sunbird_portal_preview_cdn_url }}|g' "{{ currentws }}"/ansible/preview/preview_cdn.html when: sunbird_portal_preview_cdn_url is defined tags: - preview @@ -16,7 +23,7 @@ block: - name: set common azure variables set_fact: - blob_container_name: "{{ plugin_container_name }}" + blob_container_name: "{{ plugin_storage }}" container_public_access: "container" blob_container_folder_path: "/{{ folder_name }}" storage_account_name: "{{ azure_public_storage_account_name }}" @@ -62,7 +69,7 @@ - block: - name: run the az_copy.sh script - shell: "bash {{ az_file_path }} {{ plugin_container_name }} {{ source_file }}" + shell: "bash {{ az_file_path }} {{ plugin_storage }} {{ source_file }}" async: 3600 poll: 10 environment: diff --git a/ansible/desktop-faq-upload.yml b/ansible/desktop-faq-upload.yml index 7c7e992039..43d1789b00 100644 --- a/ansible/desktop-faq-upload.yml +++ b/ansible/desktop-faq-upload.yml @@ -1,12 +1,19 @@ - hosts: localhost vars_files: - "{{inventory_dir}}/secrets.yml" + # The vars: section is added for the below reason + # 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name + # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo + # or other default files and just assign the value to the newly introduced common variable + # 3. After few releases, we will remove the older variables and use only the new variables across the repos + vars: + desktop_container: "{{ desktop_container_storage }}" tasks: - name: this block consists of tasks related to azure storage block: - name: set common azure variables set_fact: - blob_container_name: "{{ desktop_container }}" + blob_container_name: "{{ desktop_container_storage }}" blob_file_name: "{{ destination_path }}" blob_container_folder_path: "/{{ destination_path }}" local_file_or_folder_path: "{{ playbook_dir }}/../{{ src_file_path }}" diff --git a/ansible/dial_upload-schema.yml b/ansible/dial_upload-schema.yml index 54b0672ed9..c846ecb95e 100644 --- a/ansible/dial_upload-schema.yml +++ b/ansible/dial_upload-schema.yml @@ -2,6 +2,13 @@ gather_facts: no vars_files: - "{{inventory_dir}}/secrets.yml" + # The vars: section is added for the below reason + # 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name + # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo + # or other default files and just assign the value to the newly introduced common variable + # 3. After few releases, we will remove the older variables and use only the new variables across the repos + vars: + dial_plugin_container_name: "{{ dial_plugin_storage }}" tasks: - name: Create directories file: @@ -24,7 +31,7 @@ name: azure-cloud-storage tasks_from: blob-upload-batch.yml vars: - blob_container_name: "{{ dial_plugin_container_name }}" + blob_container_name: "{{ dial_plugin_storage }}" container_public_access: "blob" blob_container_folder_path: "/schemas/local" local_file_or_folder_path: "dial_schema_template_files" diff --git a/ansible/kp_upload-schema.yml b/ansible/kp_upload-schema.yml index 843abfbd19..c13633e8ab 100644 --- a/ansible/kp_upload-schema.yml +++ b/ansible/kp_upload-schema.yml @@ -2,13 +2,20 @@ gather_facts: no vars_files: - "{{inventory_dir}}/secrets.yml" + # The vars: section is added for the below reason + # 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name + # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo + # or other default files and just assign the value to the newly introduced common variable + # 3. After few releases, we will remove the older variables and use only the new variables across the repos + vars: + plugin_container_name: "{{ plugin_storage }}" tasks: - name: upload batch of files to azure storage include_role: name: azure-cloud-storage tasks_from: blob-upload-batch.yml vars: - blob_container_name: "{{ plugin_container_name }}" + blob_container_name: "{{ plugin_storage }}" container_public_access: "container" blob_container_folder_path: "/schemas/local" local_file_or_folder_path: "{{ source_name }}" diff --git a/ansible/nodebbui-upload.yml b/ansible/nodebbui-upload.yml index 48f59dd327..809d67b914 100644 --- a/ansible/nodebbui-upload.yml +++ b/ansible/nodebbui-upload.yml @@ -3,13 +3,20 @@ gather_facts: no vars_files: - "{{inventory_dir}}/secrets.yml" + # The vars: section is added for the below reason + # 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name + # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo + # or other default files and just assign the value to the newly introduced common variable + # 3. After few releases, we will remove the older variables and use only the new variables across the repos + vars: + nodebbui_container_name: "{{ nodebbui_storage }}" tasks: - name: delete files and folders from azure storage using azcopy include_role: name: azure-cloud-storage tasks_from: delete-using-azcopy.yml vars: - blob_container_name: "{{ nodebbui_container_name }}" + blob_container_name: "{{ nodebbui_storage }}" blob_container_folder_path: "" storage_account_name: "{{ azure_public_storage_account_name }}" storage_account_sas_token: "{{ azure_public_storage_account_sas }}" @@ -20,7 +27,7 @@ name: azure-cloud-storage tasks_from: blob-upload-batch.yml vars: - blob_container_name: "{{ nodebbui_container_name }}" + blob_container_name: "{{ nodebbui_storage }}" container_public_access: "container" blob_container_folder_path: "" local_file_or_folder_path: "{{ source_name }}" diff --git a/ansible/plugins.yml b/ansible/plugins.yml index 0245f1801a..487f5c780d 100644 --- a/ansible/plugins.yml +++ b/ansible/plugins.yml @@ -3,6 +3,13 @@ gather_facts: false vars_files: - "{{inventory_dir}}/secrets.yml" + # The vars: section is added for the below reason + # 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name + # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo + # or other default files and just assign the value to the newly introduced common variable + # 3. After few releases, we will remove the older variables and use only the new variables across the repos + vars: + plugin_container_name: "{{ plugin_storage }}" tasks: - name: this block consists of tasks related to azure storage block: diff --git a/ansible/roles/cassandra-backup/defaults/main.yml b/ansible/roles/cassandra-backup/defaults/main.yml index 148bcf83b2..139fd1d810 100644 --- a/ansible/roles/cassandra-backup/defaults/main.yml +++ b/ansible/roles/cassandra-backup/defaults/main.yml @@ -1,3 +1,10 @@ cassandra_root_dir: '/etc/cassandra' data_dir: '/var/lib/cassandra/data' -cassandra_backup_azure_container_name: core-cassandra \ No newline at end of file +cassandra_backup_azure_container_name: core-cassandra + +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +cassandra_backup_storage: "{{ cassandra_backup_azure_container_name }}" \ No newline at end of file diff --git a/ansible/roles/cassandra-backup/tasks/main.yml b/ansible/roles/cassandra-backup/tasks/main.yml index d6365315d6..ac0682c58a 100755 --- a/ansible/roles/cassandra-backup/tasks/main.yml +++ b/ansible/roles/cassandra-backup/tasks/main.yml @@ -37,7 +37,7 @@ name: azure-cloud-storage tasks_from: upload-using-azcopy.yml vars: - blob_container_name: "{{ cassandra_backup_azure_container_name }}" + blob_container_name: "{{ cassandra_backup_storage }}" container_public_access: "off" local_file_or_folder_path: "/data/cassandra/backup/{{ cassandra_backup_gzip_file_name }}" storage_account_name: "{{ azure_management_storage_account_name }}" diff --git a/ansible/roles/cassandra-restore/defaults/main.yml b/ansible/roles/cassandra-restore/defaults/main.yml index 6353e09287..4a4828144e 100644 --- a/ansible/roles/cassandra-restore/defaults/main.yml +++ b/ansible/roles/cassandra-restore/defaults/main.yml @@ -1 +1,8 @@ user_home: "/home/{{ ansible_ssh_user }}/" + +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +cassandra_backup_storage: "{{ cassandra_backup_azure_container_name }}" \ No newline at end of file diff --git a/ansible/roles/cassandra-restore/tasks/main.yml b/ansible/roles/cassandra-restore/tasks/main.yml index 80b8f86863..717e2fe113 100755 --- a/ansible/roles/cassandra-restore/tasks/main.yml +++ b/ansible/roles/cassandra-restore/tasks/main.yml @@ -11,7 +11,7 @@ name: azure-cloud-storage tasks_from: blob-download.yml vars: - blob_container_name: "{{ cassandra_backup_azure_container_name }}" + blob_container_name: "{{ cassandra_backup_storage }}" blob_file_name: "{{ cassandra_restore_gzip_file_name }}" local_file_or_folder_path: "{{ cassandra_restore_gzip_file_path }}" storage_account_name: "{{ azure_management_storage_account_name }}" diff --git a/ansible/roles/cert-templates/defaults/main.yml b/ansible/roles/cert-templates/defaults/main.yml index c8710dd9d9..c621d6ddb8 100644 --- a/ansible/roles/cert-templates/defaults/main.yml +++ b/ansible/roles/cert-templates/defaults/main.yml @@ -2,3 +2,10 @@ certs_badge_upload_retry_count: 3 certs_badge_criteria: "" certs_badge_batch_id: "" certs_badge_key_id: "" + +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +cert_service_storage: "{{ cert_service_container_name }}" \ No newline at end of file diff --git a/ansible/roles/cert-templates/tasks/main.yml b/ansible/roles/cert-templates/tasks/main.yml index 321a91f139..dcbdeebadc 100644 --- a/ansible/roles/cert-templates/tasks/main.yml +++ b/ansible/roles/cert-templates/tasks/main.yml @@ -36,7 +36,7 @@ name: azure-cloud-storage tasks_from: blob-upload-batch.yml vars: - blob_container_name: "{{ cert_service_container_name }}" + blob_container_name: "{{ cert_service_storage }}" container_public_access: "off" blob_container_folder_path: "" local_file_or_folder_path: "{{ cert_location }}/cert-templates/certUtilScripts/out" diff --git a/ansible/roles/desktop-deploy/defaults/main.yml b/ansible/roles/desktop-deploy/defaults/main.yml index d71509fd05..ad3803dcd1 100644 --- a/ansible/roles/desktop-deploy/defaults/main.yml +++ b/ansible/roles/desktop-deploy/defaults/main.yml @@ -1,2 +1,9 @@ --- time: "YEAR-MONTH-DATE-HOUR-MINUTE-SECOND-INSTALLERTYPE" + +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +offline_installer_storage: "{{ offline_installer_container_name }}" \ No newline at end of file diff --git a/ansible/roles/desktop-deploy/tasks/main.yml b/ansible/roles/desktop-deploy/tasks/main.yml index 963189ad4d..e7763604c1 100644 --- a/ansible/roles/desktop-deploy/tasks/main.yml +++ b/ansible/roles/desktop-deploy/tasks/main.yml @@ -53,7 +53,7 @@ block: - name: set common azure variables set_fact: - blob_container_name: "{{ offline_installer_container_name }}" + blob_container_name: "{{ offline_installer_storage }}" container_public_access: "blob" storage_account_name: "{{ azure_public_storage_account_name }}" storage_account_key: "{{ azure_public_storage_account_key }}" diff --git a/ansible/roles/es-azure-snapshot/defaults/main.yml b/ansible/roles/es-azure-snapshot/defaults/main.yml index 9072442956..f527096f18 100644 --- a/ansible/roles/es-azure-snapshot/defaults/main.yml +++ b/ansible/roles/es-azure-snapshot/defaults/main.yml @@ -10,4 +10,11 @@ snapshot_create_request_body: { es_snapshot_host: "localhost" snapshot_base_path: "default" -es_azure_backup_container_name: "elasticsearch-snapshots" \ No newline at end of file +es_azure_backup_container_name: "elasticsearch-snapshots" + +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +es_backup_storage: "{{ es_azure_backup_container_name }}" \ No newline at end of file diff --git a/ansible/roles/es-azure-snapshot/tasks/main.yml b/ansible/roles/es-azure-snapshot/tasks/main.yml index 2b7f23e576..e804b4344d 100644 --- a/ansible/roles/es-azure-snapshot/tasks/main.yml +++ b/ansible/roles/es-azure-snapshot/tasks/main.yml @@ -9,7 +9,7 @@ name: azure-cloud-storage tasks_from: container-create.yml vars: - blob_container_name: "{{ es_azure_backup_container_name }}" + blob_container_name: "{{ es_backup_storage }}" container_public_access: "off" storage_account_name: "{{ azure_management_storage_account_name }}" storage_account_key: "{{ azure_management_storage_account_key }}" diff --git a/ansible/roles/es-gcs-snapshot/defaults/main.yml b/ansible/roles/es-gcs-snapshot/defaults/main.yml index 3c2efa9a7f..5e3cbece6f 100644 --- a/ansible/roles/es-gcs-snapshot/defaults/main.yml +++ b/ansible/roles/es-gcs-snapshot/defaults/main.yml @@ -2,11 +2,11 @@ snapshot_create_request_body: { type: gcs, settings: { bucket: "{{ gcs_management_bucket_name }}", - base_path: "{{ es_gcs_backup_folder_name }}/{{ snapshot_base_path }}_{{ base_path_date }}" + base_path: "{{ es_backup_storage }}/{{ snapshot_base_path }}_{{ base_path_date }}" } } # Override these values es_snapshot_host: "localhost" snapshot_base_path: "default" -es_gcs_backup_folder_name: "elasticsearch-snapshots" \ No newline at end of file +es_backup_storage: "elasticsearch-snapshots" \ No newline at end of file diff --git a/ansible/roles/es-s3-snapshot/defaults/main.yml b/ansible/roles/es-s3-snapshot/defaults/main.yml index 2ca18929b4..7ddda6ebd0 100644 --- a/ansible/roles/es-s3-snapshot/defaults/main.yml +++ b/ansible/roles/es-s3-snapshot/defaults/main.yml @@ -2,11 +2,11 @@ snapshot_create_request_body: { type: s3, settings: { bucket: "{{ aws_management_bucket_name }}", - base_path: "{{ es_s3_backup_folder_name }}/{{ snapshot_base_path }}_{{ base_path_date }}" + base_path: "{{ es_backup_storage }}/{{ snapshot_base_path }}_{{ base_path_date }}" } } # Override these values es_snapshot_host: "localhost" snapshot_base_path: "default" -es_s3_backup_folder_name: "elasticsearch-snapshots" \ No newline at end of file +es_backup_storage: "elasticsearch-snapshots" \ No newline at end of file diff --git a/ansible/roles/grafana-backup/defaults/main.yml b/ansible/roles/grafana-backup/defaults/main.yml index b32dea6dde..fc62843964 100644 --- a/ansible/roles/grafana-backup/defaults/main.yml +++ b/ansible/roles/grafana-backup/defaults/main.yml @@ -5,3 +5,10 @@ grafana_data_dir: /var/dockerdata/grafana/grafana.db sunbird_management_storage_account_name: sunbird_management_storage_account_key: '' grafana_backup_azure_container_name: grafana-backup + +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +grafana_backup_storage: "{{ grafana_backup_azure_container_name }}" \ No newline at end of file diff --git a/ansible/roles/grafana-backup/tasks/main.yml b/ansible/roles/grafana-backup/tasks/main.yml index 786bd13442..c898ada0d5 100644 --- a/ansible/roles/grafana-backup/tasks/main.yml +++ b/ansible/roles/grafana-backup/tasks/main.yml @@ -24,7 +24,7 @@ name: azure-cloud-storage tasks_from: blob-upload.yml vars: - blob_container_name: "{{ grafana_backup_azure_container_name }}" + blob_container_name: "{{ grafana_backup_storage }}" container_public_access: "off" blob_file_name: "{{ grafana_backup_gzip_file_name }}" local_file_or_folder_path: "{{ grafana_backup_gzip_file_path }}" diff --git a/ansible/roles/jenkins-backup-upload/defaults/main.yml b/ansible/roles/jenkins-backup-upload/defaults/main.yml index 18ab7e816d..40a231d3d5 100644 --- a/ansible/roles/jenkins-backup-upload/defaults/main.yml +++ b/ansible/roles/jenkins-backup-upload/defaults/main.yml @@ -3,3 +3,10 @@ jenkins_group: jenkins jenkins_backup_base_dir: /var/lib/jenkins/jenkins-backup jenkins_backup_azure_container_name: jenkins-backup jenkins_backup_max_delay_in_days: 1 + +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +jenkins_backup_storage: "{{ jenkins_backup_azure_container_name }}" \ No newline at end of file diff --git a/ansible/roles/jenkins-backup-upload/tasks/main.yml b/ansible/roles/jenkins-backup-upload/tasks/main.yml index e430e57c1c..d003bed89f 100644 --- a/ansible/roles/jenkins-backup-upload/tasks/main.yml +++ b/ansible/roles/jenkins-backup-upload/tasks/main.yml @@ -17,7 +17,7 @@ name: azure-cloud-storage tasks_from: blob-upload.yml vars: - blob_container_name: "{{ jenkins_backup_azure_container_name }}" + blob_container_name: "{{ jenkins_backup_storage }}" container_public_access: "off" blob_file_name: "{{ LATEST_BACKUP_DIR.stdout }}.zip" local_file_or_folder_path: "/tmp/{{ LATEST_BACKUP_DIR.stdout }}.zip" diff --git a/ansible/roles/mongodb-backup/defaults/main.yml b/ansible/roles/mongodb-backup/defaults/main.yml index 82a51650a5..d7b56ebefd 100644 --- a/ansible/roles/mongodb-backup/defaults/main.yml +++ b/ansible/roles/mongodb-backup/defaults/main.yml @@ -1,2 +1,9 @@ mongo_backup_dir: '/tmp/mongo-backup' mongo_backup_azure_container_name: "{{ mongo_backup_azure_container_name }}" + +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +mongo_backup_storage: "{{ mongo_backup_azure_container_name }}" \ No newline at end of file diff --git a/ansible/roles/mongodb-backup/tasks/main.yml b/ansible/roles/mongodb-backup/tasks/main.yml index 4235e52c32..1eefe6b077 100644 --- a/ansible/roles/mongodb-backup/tasks/main.yml +++ b/ansible/roles/mongodb-backup/tasks/main.yml @@ -19,7 +19,7 @@ name: azure-cloud-storage tasks_from: blob-upload.yml vars: - blob_container_name: "{{ mongo_backup_azure_container_name }}" + blob_container_name: "{{ mongo_backup_storage }}" container_public_access: "off" blob_file_name: "{{ mongo_backup_file_name }}.tar.gz" local_file_or_folder_path: "{{ mongo_backup_file_path }}.tar.gz" diff --git a/ansible/roles/offline-installer/defaults/main.yml b/ansible/roles/offline-installer/defaults/main.yml index d71509fd05..ad3803dcd1 100644 --- a/ansible/roles/offline-installer/defaults/main.yml +++ b/ansible/roles/offline-installer/defaults/main.yml @@ -1,2 +1,9 @@ --- time: "YEAR-MONTH-DATE-HOUR-MINUTE-SECOND-INSTALLERTYPE" + +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +offline_installer_storage: "{{ offline_installer_container_name }}" \ No newline at end of file diff --git a/ansible/roles/offline-installer/tasks/upload_to_storage.yml b/ansible/roles/offline-installer/tasks/upload_to_storage.yml index 3c62d6fc03..b8a68ba164 100644 --- a/ansible/roles/offline-installer/tasks/upload_to_storage.yml +++ b/ansible/roles/offline-installer/tasks/upload_to_storage.yml @@ -55,7 +55,7 @@ block: - name: set common azure variables set_fact: - blob_container_name: "{{ offline_installer_container_name }}" + blob_container_name: "{{ offline_installer_storage }}" container_public_access: "blob" storage_account_name: "{{ azure_public_storage_account_name }}" storage_account_key: "{{ azure_public_storage_account_key }}" diff --git a/ansible/roles/postgres-azure-managed-service-backup/defaults/main.yml b/ansible/roles/postgres-azure-managed-service-backup/defaults/main.yml index bf43091813..6e637bf3ce 100644 --- a/ansible/roles/postgres-azure-managed-service-backup/defaults/main.yml +++ b/ansible/roles/postgres-azure-managed-service-backup/defaults/main.yml @@ -8,3 +8,10 @@ db_name: postgres_admin_user: "{{sunbird_pg_user}}" postgres_hostname: "{{groups['postgresql-master-1'][0]}}" postgres_password: "{{postgres_password}}" + +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +postgresql_backup_storage: "{{ postgresql_backup_azure_container_name }}" \ No newline at end of file diff --git a/ansible/roles/postgres-azure-managed-service-backup/tasks/main.yml b/ansible/roles/postgres-azure-managed-service-backup/tasks/main.yml index a64f3639af..a8261d91a3 100644 --- a/ansible/roles/postgres-azure-managed-service-backup/tasks/main.yml +++ b/ansible/roles/postgres-azure-managed-service-backup/tasks/main.yml @@ -46,7 +46,7 @@ name: azure-cloud-storage tasks_from: blob-upload.yml vars: - blob_container_name: "{{ postgresql_backup_azure_container_name }}" + blob_container_name: "{{ postgresql_backup_storage }}" container_public_access: "off" blob_file_name: "{{ postgresql_backup_gzip_file_name }}.zip" local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" diff --git a/ansible/roles/postgres-azure-managed-service-restore/defaults/main.yml b/ansible/roles/postgres-azure-managed-service-restore/defaults/main.yml index 6a634e3bfd..4ac0d62151 100644 --- a/ansible/roles/postgres-azure-managed-service-restore/defaults/main.yml +++ b/ansible/roles/postgres-azure-managed-service-restore/defaults/main.yml @@ -12,3 +12,10 @@ postgres_user: postgres_password: postgres_hostname: postgres_env: + +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +postgres_backup_storage: "{{ postgres_backup_azure_container_name }}" \ No newline at end of file diff --git a/ansible/roles/postgres-azure-managed-service-restore/tasks/main.yml b/ansible/roles/postgres-azure-managed-service-restore/tasks/main.yml index 135c29280c..61b1fe3eca 100644 --- a/ansible/roles/postgres-azure-managed-service-restore/tasks/main.yml +++ b/ansible/roles/postgres-azure-managed-service-restore/tasks/main.yml @@ -14,7 +14,7 @@ name: azure-cloud-storage tasks_from: blob-download.yml vars: - blob_container_name: "{{ postgres_backup_azure_container_name }}" + blob_container_name: "{{ postgres_backup_storage }}" blob_file_name: "{{ postgres_backup_filename }}" local_file_or_folder_path: "{{ postgres_restore_dir }}/{{ postgres_backup_filepath }}" storage_account_name: "{{ azure_management_storage_account_name }}" diff --git a/ansible/roles/postgresql-backup/defaults/main.yml b/ansible/roles/postgresql-backup/defaults/main.yml index d64be512d8..f358e4f4f3 100644 --- a/ansible/roles/postgresql-backup/defaults/main.yml +++ b/ansible/roles/postgresql-backup/defaults/main.yml @@ -2,4 +2,9 @@ postgresql_backup_dir: /tmp/postgresql-backup postgresql_user: postgres postgresql_backup_azure_container_name: postgresql-backup -# Set these vars per environment as show in example below +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +postgresql_backup_storage: "{{ postgresql_backup_azure_container_name }}" \ No newline at end of file diff --git a/ansible/roles/postgresql-backup/tasks/main.yml b/ansible/roles/postgresql-backup/tasks/main.yml index 2e25619a10..81ce384afa 100644 --- a/ansible/roles/postgresql-backup/tasks/main.yml +++ b/ansible/roles/postgresql-backup/tasks/main.yml @@ -18,7 +18,7 @@ name: azure-cloud-storage tasks_from: blob-upload.yml vars: - blob_container_name: "{{ postgresql_backup_azure_container_name }}" + blob_container_name: "{{ postgresql_backup_storage }}" container_public_access: "off" blob_file_name: "{{ postgresql_backup_gzip_file_name }}" local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" diff --git a/ansible/roles/postgresql-restore/defaults/main.yml b/ansible/roles/postgresql-restore/defaults/main.yml index 0c6b17f851..5f0708ed34 100644 --- a/ansible/roles/postgresql-restore/defaults/main.yml +++ b/ansible/roles/postgresql-restore/defaults/main.yml @@ -5,8 +5,9 @@ postgresql_cluster_version: 9.5 postgresql_cluster_name: main postgresql_restore_azure_container_name: postgresql-backup -# Set these vars per environment as show in example below -# postgresql_restore_azure_storage_account_name: ntpbackupsstaging - -# Pass the parameter -# postgresql_restore_gzip_file_name: +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +postgresql_restore_storage: "{{ postgresql_restore_azure_container_name }}" \ No newline at end of file diff --git a/ansible/roles/postgresql-restore/tasks/main.yml b/ansible/roles/postgresql-restore/tasks/main.yml index 4075baa596..b95eff5751 100644 --- a/ansible/roles/postgresql-restore/tasks/main.yml +++ b/ansible/roles/postgresql-restore/tasks/main.yml @@ -9,7 +9,7 @@ name: azure-cloud-storage tasks_from: blob-download.yml vars: - blob_container_name: "{{ postgresql_restore_azure_container_name }}" + blob_container_name: "{{ postgresql_restore_storage }}" blob_file_name: "{{ postgresql_restore_gzip_file_name }}" local_file_or_folder_path: "{{ postgresql_restore_gzip_file_path }}" storage_account_name: "{{ azure_management_storage_account_name }}" diff --git a/ansible/roles/prometheus-backup-v2/defaults/main.yml b/ansible/roles/prometheus-backup-v2/defaults/main.yml index 0cd66df647..e3752a693f 100644 --- a/ansible/roles/prometheus-backup-v2/defaults/main.yml +++ b/ansible/roles/prometheus-backup-v2/defaults/main.yml @@ -1,3 +1,10 @@ --- # defaults file for ansible/roles/prometheus-backup-v2 -prometheus_backup_azure_container_name: prometheus-backup \ No newline at end of file +prometheus_backup_azure_container_name: prometheus-backup + +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +prometheus_backup_storage: "{{ prometheus_backup_azure_container_name }}" \ No newline at end of file diff --git a/ansible/roles/prometheus-backup-v2/tasks/main.yml b/ansible/roles/prometheus-backup-v2/tasks/main.yml index 6286f31ebb..071ed395e1 100644 --- a/ansible/roles/prometheus-backup-v2/tasks/main.yml +++ b/ansible/roles/prometheus-backup-v2/tasks/main.yml @@ -21,7 +21,7 @@ name: azure-cloud-storage tasks_from: blob-upload.yml vars: - blob_container_name: "{{ prometheus_backup_azure_container_name }}" + blob_container_name: "{{ prometheus_backup_storage }}" container_public_access: "off" blob_file_name: "{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" local_file_or_folder_path: "/tmp/{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" diff --git a/ansible/roles/prometheus-backup/defaults/main.yml b/ansible/roles/prometheus-backup/defaults/main.yml index dd43fbf572..17425092ee 100644 --- a/ansible/roles/prometheus-backup/defaults/main.yml +++ b/ansible/roles/prometheus-backup/defaults/main.yml @@ -4,4 +4,11 @@ prometheus_backup_azure_container_name: prometheus-backup # Set these vars per environment as show in example below # Override these values in group_vars backup_storage_name: backups -backup_storage_key: '' \ No newline at end of file +backup_storage_key: '' + +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +prometheus_backup_storage: "{{ prometheus_backup_azure_container_name }}" \ No newline at end of file diff --git a/ansible/roles/prometheus-backup/tasks/main.yml b/ansible/roles/prometheus-backup/tasks/main.yml index 1a71443c28..f9aaa54073 100644 --- a/ansible/roles/prometheus-backup/tasks/main.yml +++ b/ansible/roles/prometheus-backup/tasks/main.yml @@ -30,7 +30,7 @@ name: azure-cloud-storage tasks_from: blob-upload.yml vars: - blob_container_name: "{{ prometheus_backup_azure_container_name }}" + blob_container_name: "{{ prometheus_backup_storage }}" container_public_access: "off" blob_file_name: "{{ prometheus_backup_gzip_file_name }}" local_file_or_folder_path: "{{ prometheus_backup_gzip_file_path }}" diff --git a/ansible/roles/prometheus-restore/defaults/main.yml b/ansible/roles/prometheus-restore/defaults/main.yml index eba83809ca..bee405457a 100644 --- a/ansible/roles/prometheus-restore/defaults/main.yml +++ b/ansible/roles/prometheus-restore/defaults/main.yml @@ -1,2 +1,9 @@ prometheus_backup_dir: /tmp/prometheus-backup -prometheus_backup_azure_container_name: prometheus-backup \ No newline at end of file +prometheus_backup_azure_container_name: prometheus-backup + +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +prometheus_backup_storage: "{{ prometheus_backup_azure_container_name }}" \ No newline at end of file diff --git a/ansible/roles/prometheus-restore/tasks/main.yml b/ansible/roles/prometheus-restore/tasks/main.yml index 9b2a176882..40c9bd9225 100644 --- a/ansible/roles/prometheus-restore/tasks/main.yml +++ b/ansible/roles/prometheus-restore/tasks/main.yml @@ -7,7 +7,7 @@ name: azure-cloud-storage tasks_from: blob-download.yml vars: - blob_container_name: "{{ prometheus_backup_azure_container_name }}" + blob_container_name: "{{ prometheus_backup_storage }}" blob_file_name: "{{ prometheus_backup_filename }}" local_file_or_folder_path: "{{ prometheus_backup_dir }}/{{ prometheus_backup_filepath }}" storage_account_name: "{{ azure_management_storage_account_name }}" diff --git a/ansible/roles/redis-backup/defaults/main.yml b/ansible/roles/redis-backup/defaults/main.yml index e00b84ce4b..9f6055682a 100644 --- a/ansible/roles/redis-backup/defaults/main.yml +++ b/ansible/roles/redis-backup/defaults/main.yml @@ -1,3 +1,10 @@ redis_backup_dir: /tmp/redis-backup nodebb_redis_backup_azure_container_name: nodebb-redis-backup learner_user: learning + +# This variable is added for the below reason - +# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name +# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo +# or other default files and just assign the value to the newly introduced common variable +# 3. After few releases, we will remove the older variables and use only the new variables across the repos +nodebb_redis_backup_storage: "{{ nodebb_redis_backup_azure_container_name }}" \ No newline at end of file diff --git a/ansible/roles/redis-backup/tasks/main.yml b/ansible/roles/redis-backup/tasks/main.yml index fa621b4d6d..be66ea5292 100644 --- a/ansible/roles/redis-backup/tasks/main.yml +++ b/ansible/roles/redis-backup/tasks/main.yml @@ -18,7 +18,7 @@ name: azure-cloud-storage tasks_from: blob-upload.yml vars: - blob_container_name: "{{ nodebb_redis_backup_azure_container_name }}" + blob_container_name: "{{ nodebb_redis_backup_storage }}" container_public_access: "off" blob_file_name: "{{ redis_backup_file_name }}" local_file_or_folder_path: "{{ redis_backup_file_path }}" diff --git a/ansible/uploadFAQs.yml b/ansible/uploadFAQs.yml index a4da2d4ede..7109a65f68 100644 --- a/ansible/uploadFAQs.yml +++ b/ansible/uploadFAQs.yml @@ -20,7 +20,7 @@ name: azure-cloud-storage tasks_from: blob-upload-batch.yml vars: - blob_container_name: "{{ blob_container }}" + blob_container_name: "{{ container_name }}" container_public_access: "container" blob_container_folder_path: "" local_file_or_folder_path: "{{ playbook_dir }}/../utils/{{ item }}" diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadFAQs/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadFAQs/config.xml index c476a3de0f..b4ae2238c9 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadFAQs/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadFAQs/config.xml @@ -72,7 +72,7 @@ return """<b>This parameter is not used</b>""" true - blob_container + storage diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/Upload_RC_Schema/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/Upload_RC_Schema/config.xml index 997794c6dc..816c4f9f49 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/Upload_RC_Schema/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/Upload_RC_Schema/config.xml @@ -72,7 +72,7 @@ return """<b>This parameter is not used</b>""" true - blob_container + storage diff --git a/pipelines/upload/faqs/Jenkinsfile b/pipelines/upload/faqs/Jenkinsfile index 00588a8dd7..ee68678781 100644 --- a/pipelines/upload/faqs/Jenkinsfile +++ b/pipelines/upload/faqs/Jenkinsfile @@ -25,7 +25,7 @@ node() { jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() currentWs = sh(returnStdout: true, script: 'pwd').trim() ansiblePlaybook = "${currentWs}/ansible/uploadFAQs.yml" - ansibleExtraArgs = "--tags ${params.tag} --extra-vars \"container_name=${params.blob_container} source_folder=${params.source_folder} destination_path=${params.blob_container}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" + ansibleExtraArgs = "--tags ${params.tag} --extra-vars \"container_name=${params.storage} source_folder=${params.source_folder}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) From e4d9a4a255502a35658d7fa409f596b8848c095d Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Fri, 23 Sep 2022 19:25:41 +0530 Subject: [PATCH 006/616] fix: modified vars to use generic names Signed-off-by: Keshav Prasad --- ansible/uploadFAQs.yml | 2 +- .../Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadFAQs/config.xml | 2 +- .../jobs/dev/jobs/Sunbird-RC/jobs/Upload_RC_Schema/config.xml | 2 +- pipelines/upload/faqs/Jenkinsfile | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ansible/uploadFAQs.yml b/ansible/uploadFAQs.yml index 7109a65f68..8447fe4e47 100644 --- a/ansible/uploadFAQs.yml +++ b/ansible/uploadFAQs.yml @@ -20,7 +20,7 @@ name: azure-cloud-storage tasks_from: blob-upload-batch.yml vars: - blob_container_name: "{{ container_name }}" + blob_container_name: "{{ upload_storage }}" container_public_access: "container" blob_container_folder_path: "" local_file_or_folder_path: "{{ playbook_dir }}/../utils/{{ item }}" diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadFAQs/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadFAQs/config.xml index b4ae2238c9..85b7c81efb 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadFAQs/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadFAQs/config.xml @@ -72,7 +72,7 @@ return """<b>This parameter is not used</b>""" true - storage + upload_storage diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/Upload_RC_Schema/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/Upload_RC_Schema/config.xml index 816c4f9f49..ea47b8d14e 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/Upload_RC_Schema/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/Upload_RC_Schema/config.xml @@ -72,7 +72,7 @@ return """<b>This parameter is not used</b>""" true - storage + upload_storage diff --git a/pipelines/upload/faqs/Jenkinsfile b/pipelines/upload/faqs/Jenkinsfile index ee68678781..4f18801b4e 100644 --- a/pipelines/upload/faqs/Jenkinsfile +++ b/pipelines/upload/faqs/Jenkinsfile @@ -25,7 +25,7 @@ node() { jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() currentWs = sh(returnStdout: true, script: 'pwd').trim() ansiblePlaybook = "${currentWs}/ansible/uploadFAQs.yml" - ansibleExtraArgs = "--tags ${params.tag} --extra-vars \"container_name=${params.storage} source_folder=${params.source_folder}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" + ansibleExtraArgs = "--tags ${params.tag} --extra-vars \"upload_storage=${params.upload_storage} source_folder=${params.source_folder}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) From 89877d8c217c9e0b1b05611fcfaf4bbfe8114222 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Sat, 24 Sep 2022 18:17:11 +0530 Subject: [PATCH 007/616] fix: incorrect variable assignments Signed-off-by: Keshav Prasad --- ansible/desktop-faq-upload.yml | 2 +- ansible/dial_upload-schema.yml | 2 +- ansible/kp_upload-schema.yml | 2 +- ansible/nodebbui-upload.yml | 2 +- ansible/plugins.yml | 4 ++-- ansible/roles/es-azure-snapshot/defaults/main.yml | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/ansible/desktop-faq-upload.yml b/ansible/desktop-faq-upload.yml index 43d1789b00..c17f7e9b9a 100644 --- a/ansible/desktop-faq-upload.yml +++ b/ansible/desktop-faq-upload.yml @@ -7,7 +7,7 @@ # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos vars: - desktop_container: "{{ desktop_container_storage }}" + desktop_container_storage: "{{ desktop_container }}" tasks: - name: this block consists of tasks related to azure storage block: diff --git a/ansible/dial_upload-schema.yml b/ansible/dial_upload-schema.yml index c846ecb95e..ba7abf627b 100644 --- a/ansible/dial_upload-schema.yml +++ b/ansible/dial_upload-schema.yml @@ -8,7 +8,7 @@ # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos vars: - dial_plugin_container_name: "{{ dial_plugin_storage }}" + dial_plugin_storage: "{{ dial_plugin_container_name }}" tasks: - name: Create directories file: diff --git a/ansible/kp_upload-schema.yml b/ansible/kp_upload-schema.yml index c13633e8ab..7d7163437b 100644 --- a/ansible/kp_upload-schema.yml +++ b/ansible/kp_upload-schema.yml @@ -8,7 +8,7 @@ # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos vars: - plugin_container_name: "{{ plugin_storage }}" + plugin_storage: "{{ plugin_container_name }}" tasks: - name: upload batch of files to azure storage include_role: diff --git a/ansible/nodebbui-upload.yml b/ansible/nodebbui-upload.yml index 809d67b914..3c0bf414ae 100644 --- a/ansible/nodebbui-upload.yml +++ b/ansible/nodebbui-upload.yml @@ -9,7 +9,7 @@ # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos vars: - nodebbui_container_name: "{{ nodebbui_storage }}" + nodebbui_storage: "{{ nodebbui_container_name }}" tasks: - name: delete files and folders from azure storage using azcopy include_role: diff --git a/ansible/plugins.yml b/ansible/plugins.yml index 487f5c780d..35e34578d0 100644 --- a/ansible/plugins.yml +++ b/ansible/plugins.yml @@ -9,13 +9,13 @@ # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos vars: - plugin_container_name: "{{ plugin_storage }}" + plugin_storage: "{{ plugin_container_name }}" tasks: - name: this block consists of tasks related to azure storage block: - name: set common azure variables set_fact: - blob_container_name: "{{ plugin_container_name }}" + blob_container_name: "{{ plugin_storage }}" container_public_access: "container" blob_delete_pattern: "content-plugins/{{ plugins_name }}" blob_container_folder_path: "/content-plugins/{{ plugins_name }}" diff --git a/ansible/roles/es-azure-snapshot/defaults/main.yml b/ansible/roles/es-azure-snapshot/defaults/main.yml index f527096f18..396746aa32 100644 --- a/ansible/roles/es-azure-snapshot/defaults/main.yml +++ b/ansible/roles/es-azure-snapshot/defaults/main.yml @@ -1,7 +1,7 @@ snapshot_create_request_body: { type: azure, settings: { - container: "{{ es_azure_backup_folder_name }}", + container: "{{ es_backup_storage }}", base_path: "{{ snapshot_base_path }}_{{ base_path_date }}" } } From 4c373b9d2d05eb3ea7e81b62fa4541333c9c5860 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Sat, 24 Sep 2022 18:41:12 +0530 Subject: [PATCH 008/616] fix: moving comments section Signed-off-by: Keshav Prasad --- private_repo/ansible/inventory/dev/Core/secrets.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/private_repo/ansible/inventory/dev/Core/secrets.yml b/private_repo/ansible/inventory/dev/Core/secrets.yml index c373fa8c4f..f157d85862 100644 --- a/private_repo/ansible/inventory/dev/Core/secrets.yml +++ b/private_repo/ansible/inventory/dev/Core/secrets.yml @@ -8,8 +8,6 @@ core_vault_docker_registry_url: "change.docker.url" # for docker hub "https core_vault_docker_registry_user: "change.docker.username" core_vault_docker_registry_password: "change.docker.password" -# Define the below if you are using Azure Cloud -# Management Storage Account # Run the below command in shell # date +'%Y-%m-%dT%H:%m:%SZ' -d '+1 year' # sas_token=?`az storage account generate-sas --account-name "{{ azure_plugin_storage_account_name }}" --account-key "{{ azure_plugin_storage_account_key }}" --expiry $sas_expire_time --https-only --permissions acdlpruw --resource-types sco --services bfqt | xargs` @@ -23,6 +21,8 @@ sunbird_private_storage_account_key: "change.azure.storage.account.key" sunbird_management_storage_account_key: "change.azure.storage.account.key" sunbird_artifact_storage_account_key: "{{ sunbird_management_storage_account_key }}" +# Define the below if you are using Azure Cloud +# Management Storage Account azure_public_storage_account_key: "{{ sunbird_public_storage_account_key }}" azure_private_storage_account_key: "{{ sunbird_private_storage_account_key }}" azure_management_storage_account_key: "{{ sunbird_management_storage_account_key }}" From 516a76f92797a9e3529ebbc0520064862ca29ed0 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Sat, 24 Sep 2022 19:09:40 +0530 Subject: [PATCH 009/616] fix: add tags for set_fact task Signed-off-by: Keshav Prasad --- ansible/deploy-plugins.yml | 4 +++- ansible/desktop-faq-upload.yml | 2 ++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index d1a0be8796..be7a6054be 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -29,12 +29,14 @@ storage_account_name: "{{ azure_public_storage_account_name }}" storage_account_key: "{{ azure_public_storage_account_key }}" storage_account_sas_token: "{{ azure_public_storage_account_sas }}" + tags: + - always - block: - name: delete files and folders from azure storage using azcopy include_role: name: azure-cloud-storage - tasks_from: delete-using-azcopy.yml + tasks_from: delete-using-azcopy.yml tags: - content-editor - collection-editor diff --git a/ansible/desktop-faq-upload.yml b/ansible/desktop-faq-upload.yml index c17f7e9b9a..02f29db01d 100644 --- a/ansible/desktop-faq-upload.yml +++ b/ansible/desktop-faq-upload.yml @@ -17,6 +17,8 @@ blob_file_name: "{{ destination_path }}" blob_container_folder_path: "/{{ destination_path }}" local_file_or_folder_path: "{{ playbook_dir }}/../{{ src_file_path }}" + tags: + - always - block: - name: upload file to azure storage From 99d47256e22674380a93bb9e9a3a753b4044f6a7 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 26 Sep 2022 12:23:38 +0530 Subject: [PATCH 010/616] fix: invoke blob upload role, moved few vars Signed-off-by: Keshav Prasad --- ansible/deploy-plugins.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index be7a6054be..ef3ea0b44a 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -25,7 +25,6 @@ set_fact: blob_container_name: "{{ plugin_storage }}" container_public_access: "container" - blob_container_folder_path: "/{{ folder_name }}" storage_account_name: "{{ azure_public_storage_account_name }}" storage_account_key: "{{ azure_public_storage_account_key }}" storage_account_sas_token: "{{ azure_public_storage_account_sas }}" @@ -37,6 +36,8 @@ include_role: name: azure-cloud-storage tasks_from: delete-using-azcopy.yml + vars: + blob_container_folder_path: "/{{ folder_name }}" tags: - content-editor - collection-editor @@ -49,6 +50,7 @@ name: azure-cloud-storage tasks_from: blob-upload-batch.yml vars: + blob_container_folder_path: "/{{ folder_name }}" local_file_or_folder_path: "{{ source_name }}" tags: - content-editor @@ -62,7 +64,7 @@ - name: upload file to azure storage include_role: name: azure-cloud-storage - tasks_from: blob-upload-batch.yml + tasks_from: blob-upload.yml vars: blob_file_name: "artefacts/content-player/content-player-{{ player_version_number }}.zip" local_file_or_folder_path: "{{ source_file_name }}" From d7d98695bd236685d24106c3b912d8624a30b660 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 26 Sep 2022 12:48:52 +0530 Subject: [PATCH 011/616] fix: adding build description Signed-off-by: Keshav Prasad --- pipelines/upload/chatbot/Jenkinsfile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pipelines/upload/chatbot/Jenkinsfile b/pipelines/upload/chatbot/Jenkinsfile index 6aaf6d0c39..70910ef261 100644 --- a/pipelines/upload/chatbot/Jenkinsfile +++ b/pipelines/upload/chatbot/Jenkinsfile @@ -47,6 +47,8 @@ node() { values.put('ansibleExtraArgs', ansibleExtraArgs) println values ansible_playbook_run(values) + currentBuild.result = 'SUCCESS' + currentBuild.description = "Private: ${params.private_branch}, Public: ${params.branch_or_tag}" } } summary() From 498dbf13d73b8bc9e941df0242d94408e7a4495f Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 26 Sep 2022 13:29:57 +0530 Subject: [PATCH 012/616] fix: updated jenkins job param for container path Signed-off-by: Keshav Prasad --- ansible/desktop-faq-upload.yml | 4 ++-- .../dev/jobs/Kubernetes/jobs/UploadChatbotConfig/config.xml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ansible/desktop-faq-upload.yml b/ansible/desktop-faq-upload.yml index 02f29db01d..a2789218c2 100644 --- a/ansible/desktop-faq-upload.yml +++ b/ansible/desktop-faq-upload.yml @@ -7,13 +7,13 @@ # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos vars: - desktop_container_storage: "{{ desktop_container }}" + desktop_storage: "{{ desktop_container }}" tasks: - name: this block consists of tasks related to azure storage block: - name: set common azure variables set_fact: - blob_container_name: "{{ desktop_container_storage }}" + blob_container_name: "{{ desktop_storage }}" blob_file_name: "{{ destination_path }}" blob_container_folder_path: "/{{ destination_path }}" local_file_or_folder_path: "{{ playbook_dir }}/../{{ src_file_path }}" diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadChatbotConfig/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadChatbotConfig/config.xml index c0289cf30f..ec3dbfe716 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadChatbotConfig/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadChatbotConfig/config.xml @@ -102,7 +102,7 @@ return """<b>This parameter is not used</b>""" destination_path - chatbot/router/config + router/config false From 10dec83ed4999e809c7569ce63fd33cd22a11fc2 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 26 Sep 2022 13:53:23 +0530 Subject: [PATCH 013/616] fix: updated jenkins jobs and params Signed-off-by: Keshav Prasad --- .../config.xml | 2 +- pipelines/upload/chatbot/Jenkinsfile | 5 +++++ pipelines/upload/portal-csv/Jenkinsfile | 7 +++++++ 3 files changed, 13 insertions(+), 1 deletion(-) rename deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/{Upload_CollectionHierarchy_CSV => UploadCollectionHierarchyCSV}/config.xml (99%) diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/Upload_CollectionHierarchy_CSV/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml similarity index 99% rename from deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/Upload_CollectionHierarchy_CSV/config.xml rename to deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml index 4818cfc99b..314e1bcd74 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/Upload_CollectionHierarchy_CSV/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml @@ -125,7 +125,7 @@ return """<b>This parameter is not used</b>""" - sourcing/collection-hierarchy + collection-hierarchy diff --git a/pipelines/upload/chatbot/Jenkinsfile b/pipelines/upload/chatbot/Jenkinsfile index 70910ef261..764e73c9ba 100644 --- a/pipelines/upload/chatbot/Jenkinsfile +++ b/pipelines/upload/chatbot/Jenkinsfile @@ -54,7 +54,12 @@ node() { summary() } catch (err) { + currentBuild.result = 'FAILURE' throw err + } + finally { + slack_notify(currentBuild.result) + email_notify() } } diff --git a/pipelines/upload/portal-csv/Jenkinsfile b/pipelines/upload/portal-csv/Jenkinsfile index c8194f6694..19a0782e81 100644 --- a/pipelines/upload/portal-csv/Jenkinsfile +++ b/pipelines/upload/portal-csv/Jenkinsfile @@ -36,11 +36,18 @@ node() { values.put('ansibleExtraArgs', ansibleExtraArgs) println values ansible_playbook_run(values) + currentBuild.result = 'SUCCESS' + currentBuild.description = "Private: ${params.private_branch}, Public: ${params.branch_or_tag}" } } summary() } catch (err) { + currentBuild.result = 'FAILURE' throw err + } + finally { + slack_notify(currentBuild.result) + email_notify() } } From d1afa413d1aaac161c13457d48db7ddb6e738651 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 26 Sep 2022 15:09:06 +0530 Subject: [PATCH 014/616] fix: renamed jenkins parameters, added missing jenkins job Signed-off-by: Keshav Prasad --- ansible/desktop-faq-upload.yml | 9 +- .../UploadCollectionHierarchyCSV/config.xml | 2 +- .../jobs/UploadChatbotConfig/config.xml | 2 +- .../UploadCollectionHierarchyCSV/config.xml | 2 +- .../jobs/UploadDiscussionUIDocs/config.xml | 244 ++++++++++++++++++ pipelines/deploy/desktop-faq/Jenkinsfile | 2 +- pipelines/upload/chatbot/Jenkinsfile | 2 +- pipelines/upload/discussion-UI/Jenkinsfile | 2 +- pipelines/upload/portal-csv/Jenkinsfile | 2 +- 9 files changed, 252 insertions(+), 15 deletions(-) create mode 100644 deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadDiscussionUIDocs/config.xml diff --git a/ansible/desktop-faq-upload.yml b/ansible/desktop-faq-upload.yml index a2789218c2..0cdb89a07d 100644 --- a/ansible/desktop-faq-upload.yml +++ b/ansible/desktop-faq-upload.yml @@ -1,19 +1,12 @@ - hosts: localhost vars_files: - "{{inventory_dir}}/secrets.yml" - # The vars: section is added for the below reason - # 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name - # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo - # or other default files and just assign the value to the newly introduced common variable - # 3. After few releases, we will remove the older variables and use only the new variables across the repos - vars: - desktop_storage: "{{ desktop_container }}" tasks: - name: this block consists of tasks related to azure storage block: - name: set common azure variables set_fact: - blob_container_name: "{{ desktop_storage }}" + blob_container_name: "{{ upload_storage }}" blob_file_name: "{{ destination_path }}" blob_container_folder_path: "/{{ destination_path }}" local_file_or_folder_path: "{{ playbook_dir }}/../{{ src_file_path }}" diff --git a/deploy/jenkins/jobs/Deploy/jobs/Vidyadaan/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml b/deploy/jenkins/jobs/Deploy/jobs/Vidyadaan/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml index 0272d155c6..0236cab0eb 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/Vidyadaan/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/Vidyadaan/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml @@ -103,7 +103,7 @@ return """<b>This parameter is not used</b>""" false - container_name + upload_storage diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadChatbotConfig/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadChatbotConfig/config.xml index ec3dbfe716..defc3a0ddd 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadChatbotConfig/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadChatbotConfig/config.xml @@ -85,7 +85,7 @@ return """<b>This parameter is not used</b>""" false - container_name + upload_storage diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml index 314e1bcd74..d87aac4ee3 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml @@ -103,7 +103,7 @@ return """<b>This parameter is not used</b>""" false - container_name + upload_storage diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadDiscussionUIDocs/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadDiscussionUIDocs/config.xml new file mode 100644 index 0000000000..94f6f3a64d --- /dev/null +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadDiscussionUIDocs/config.xml @@ -0,0 +1,244 @@ + + + + false + + + + -1 + 10 + -1 + 2 + + + + + false + false + + + + + absolute_job_path + <font color=dimgray size=2><b>Do not change this value! The metadata.json will be copied from this job.</b></font> + ArtifactUpload/dev/Core/DiscussionsUI + false + + + artifact_source + <font color=dimgray size=2><b> +ArtifactRepo - Download the artifact from azure blob, JenkinsJob - Use the atrifact from Jenkins job.</b></font> + choice-parameter-9600649228560 + 1 + + true + + + + UploadDiscussionUIDocs + Deploy/dev/Kubernetes/UploadDiscussionUIDocs + + + PT_SINGLE_SELECT + false + 1 + + + build_number + + choice-parameter-9600651313765 + 1 + + true + + + + UploadDiscussionUIDocs + Deploy/dev/Kubernetes/UploadDiscussionUIDocs + + artifact_source + ET_FORMATTED_HTML + true + + + artifact_version + + choice-parameter-9600653373369 + 1 + + true + + + + UploadDiscussionUIDocs + Deploy/dev/Kubernetes/UploadDiscussionUIDocs + + artifact_source + ET_FORMATTED_HTML + true + + + private_branch + + choice-parameter-2544395024638227 + 1 + + true + + + + true + + + UploadDiscussionUIDocs + Deploy/dev/Kubernetes/UploadDiscussionUIDocs + + + ET_FORMATTED_HTML + true + + + branch_or_tag + + choice-parameter-2620434998790477 + 1 + + true + + + + true + + + UploadDiscussionUIDocs + Deploy/dev/Kubernetes/UploadDiscussionUIDocs + + + ET_FORMATTED_HTML + true + + + upload_storage + + + + discussion-ui + + + + + source_path + + + + documentation + + + + + destination_path + + + + discussion-ui/documentation + + + + + tag + + + + upload-batch + + + + + + + 0 + 0 + + false + project + false + + + + + + + + + + 2 + + + https://github.com/project-sunbird/sunbird-devops.git + + + + + ${branch_or_tag} + + + false + + + + true + false + + 0 + false + + + + pipelines/upload/discussion-UI/Jenkinsfile + false + + + false + diff --git a/pipelines/deploy/desktop-faq/Jenkinsfile b/pipelines/deploy/desktop-faq/Jenkinsfile index 620c5c2f5f..d282ec2884 100644 --- a/pipelines/deploy/desktop-faq/Jenkinsfile +++ b/pipelines/deploy/desktop-faq/Jenkinsfile @@ -25,7 +25,7 @@ node() { jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() currentWs = sh(returnStdout: true, script: 'pwd').trim() ansiblePlaybook = "${currentWs}/ansible/desktop-faq-upload.yml" - ansibleExtraArgs = "--extra-vars \" desktop_container=${params.desktop_container} src_file_path=${params.src_file_path} destination_path=${params.destination_path} env_name=$envDir\" --vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag}" + ansibleExtraArgs = "--extra-vars \" upload_storage=${params.upload_storage} src_file_path=${params.src_file_path} destination_path=${params.destination_path} env_name=$envDir\" --vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag}" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) diff --git a/pipelines/upload/chatbot/Jenkinsfile b/pipelines/upload/chatbot/Jenkinsfile index 764e73c9ba..c97597c44c 100644 --- a/pipelines/upload/chatbot/Jenkinsfile +++ b/pipelines/upload/chatbot/Jenkinsfile @@ -38,7 +38,7 @@ node() { jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() currentWs = sh(returnStdout: true, script: 'pwd').trim() ansiblePlaybook = "${currentWs}/ansible/desktop-faq-upload.yml" - ansibleExtraArgs = "--extra-vars \" desktop_container=${params.container_name} src_file_path=${params.source_path} destination_path=${params.destination_path}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag}" + ansibleExtraArgs = "--extra-vars \" upload_storage=${params.upload_storage} src_file_path=${params.source_path} destination_path=${params.destination_path}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag}" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) diff --git a/pipelines/upload/discussion-UI/Jenkinsfile b/pipelines/upload/discussion-UI/Jenkinsfile index 4de3383796..c4d794fb3e 100644 --- a/pipelines/upload/discussion-UI/Jenkinsfile +++ b/pipelines/upload/discussion-UI/Jenkinsfile @@ -30,7 +30,7 @@ node() { unzip ${artifact} """ ansiblePlaybook = "${currentWs}/ansible/desktop-faq-upload.yml" - ansibleExtraArgs = "--extra-vars \" desktop_container=${params.container_name} src_file_path=${params.source_path} destination_path=${params.destination_path}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag}" + ansibleExtraArgs = "--extra-vars \" upload_storage=${params.upload_storage} src_file_path=${params.source_path} destination_path=${params.destination_path}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag}" values.put('ansiblePlaybook', ansiblePlaybook) values.put('ansibleExtraArgs', ansibleExtraArgs) println values diff --git a/pipelines/upload/portal-csv/Jenkinsfile b/pipelines/upload/portal-csv/Jenkinsfile index 19a0782e81..6e8453d3e2 100644 --- a/pipelines/upload/portal-csv/Jenkinsfile +++ b/pipelines/upload/portal-csv/Jenkinsfile @@ -27,7 +27,7 @@ node() { jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() currentWs = sh(returnStdout: true, script: 'pwd').trim() ansiblePlaybook = "${currentWs}/ansible/desktop-faq-upload.yml" - ansibleExtraArgs = "--extra-vars \" desktop_container=${params.container_name} src_file_path=${params.source_path} destination_path=${params.destination_path}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag}" + ansibleExtraArgs = "--extra-vars \" upload_storage=${params.upload_storage} src_file_path=${params.source_path} destination_path=${params.destination_path}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag}" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) From 9887254ffcdb64601c2f190b408e34021b09525a Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 26 Sep 2022 15:38:00 +0530 Subject: [PATCH 015/616] fix: added missing jobs Signed-off-by: Keshav Prasad --- .../jobs/UploadDiscussionUIDocs/config.xml | 2 +- .../jobs/UploadPortalLabel/config.xml | 189 ++++++++++++++++++ 2 files changed, 190 insertions(+), 1 deletion(-) create mode 100644 deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadPortalLabel/config.xml diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadDiscussionUIDocs/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadDiscussionUIDocs/config.xml index 94f6f3a64d..a801645925 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadDiscussionUIDocs/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadDiscussionUIDocs/config.xml @@ -182,7 +182,7 @@ return """<b>This parameter is not used</b>""" - discussion-ui/documentation + documentation diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadPortalLabel/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadPortalLabel/config.xml new file mode 100644 index 0000000000..a75d9ee220 --- /dev/null +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadPortalLabel/config.xml @@ -0,0 +1,189 @@ + + + + false + + + + -1 + -1 + -1 + 5 + + + + + false + false + + + + + private_branch + + choice-parameter-2544395024638227 + 1 + + true + + + + true + + + UploadPortalLabel + Deploy/dev/Kubernetes/UploadPortalLabel + + + ET_FORMATTED_HTML + true + + + branch_or_tag + + choice-parameter-2620434998790477 + 1 + + true + + + + true + + + UploadPortalLabel + Deploy/dev/Kubernetes/UploadPortalLabel + + + ET_FORMATTED_HTML + true + + + upload_storage + + + + label + + + + + destination_path + + + + all_labels_ta.json + all_labels_bn.json + all_labels_en.json + all_labels_hi.json + all_labels_kn.json + all_labels_mr.json + all_labels_te.json + all_labels_ur.json + all_labels_gu.json + all_labels_ml.json + all_labels_as.json + all_labels_or.json + + + + + src_file_path + + + + utils/portal/labels/all_labels_ta.json + utils/portal/labels/all_labels_bn.json + utils/portal/labels/all_labels_en.json + utils/portal/labels/all_labels_hi.json + utils/portal/labels/all_labels_kn.json + utils/portal/labels/all_labels_mr.json + utils/portal/labels/all_labels_te.json + utils/portal/labels/all_labels_ur.json + utils/portal/labels/all_labels_gu.json + utils/portal/labels/all_labels_ml.json + utils/portal/labels/all_labels_as.json + utils/portal/labels/all_labels_or.json + + + + + tag + + + + upload-label + + + + + + + 0 + 0 + + false + project + false + + + + + + + + + + SUCCESS + 0 + BLUE + true + + + + + + + + 2 + + + https://github.com/project-sunbird/sunbird-devops.git + + + + + ${branch_or_tag} + + + false + + + + true + false + + 0 + false + + + + pipelines/deploy/desktop-faq/Jenkinsfile + false + + + false + From 01975c547fdc37826bf51ffee0fb78caf4102ff9 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 26 Sep 2022 15:48:38 +0530 Subject: [PATCH 016/616] fix: updated schema folder name Signed-off-by: Keshav Prasad --- pipelines/upload/schema/dial/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipelines/upload/schema/dial/Jenkinsfile b/pipelines/upload/schema/dial/Jenkinsfile index 1a0216c740..44cf0bca8b 100644 --- a/pipelines/upload/schema/dial/Jenkinsfile +++ b/pipelines/upload/schema/dial/Jenkinsfile @@ -29,7 +29,7 @@ node() { git clone https://github.com/project-sunbird/sunbird-dial-service.git -b ${params.dial_branch_or_tag} """ ansiblePlaybook = "${currentWs}/ansible/dial_upload-schema.yml" - ansibleExtraArgs = "--extra-vars \" source_name=${currentWs}/sunbird-dial-service/schemas \" --vault-password-file /var/lib/jenkins/secrets/vault-pass" + ansibleExtraArgs = "--extra-vars \" source_name=${currentWs}/sunbird-dial-service/jsonld-schema \" --vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) From 1406ea07d710241f779bbfe4a5cdbd651c5dfe7d Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 26 Sep 2022 19:36:38 +0530 Subject: [PATCH 017/616] fix: revert source folder changes Signed-off-by: Keshav Prasad --- pipelines/upload/schema/dial/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipelines/upload/schema/dial/Jenkinsfile b/pipelines/upload/schema/dial/Jenkinsfile index 44cf0bca8b..dd74b2f23f 100644 --- a/pipelines/upload/schema/dial/Jenkinsfile +++ b/pipelines/upload/schema/dial/Jenkinsfile @@ -29,7 +29,7 @@ node() { git clone https://github.com/project-sunbird/sunbird-dial-service.git -b ${params.dial_branch_or_tag} """ ansiblePlaybook = "${currentWs}/ansible/dial_upload-schema.yml" - ansibleExtraArgs = "--extra-vars \" source_name=${currentWs}/sunbird-dial-service/jsonld-schema \" --vault-password-file /var/lib/jenkins/secrets/vault-pass" + ansibleExtraArgs = "--extra-vars \" source_name=${currentWs}/sunbird-dial-service/schemas\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) From 8c4d0297587c1931bf5c7f83c17bbe3b846a0e65 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Tue, 27 Sep 2022 11:19:18 +0530 Subject: [PATCH 018/616] fix: renamed jobs, formatted lines, deleted unsued plays Signed-off-by: Keshav Prasad --- ansible/nodebbui-upload.yml | 36 ----------- .../config.xml | 0 pipelines/certs-templates/Jenkinsfile | 59 ++++++++++--------- pipelines/deploy/NodebbUI/Jenkinsfile | 58 ------------------ 4 files changed, 30 insertions(+), 123 deletions(-) delete mode 100644 ansible/nodebbui-upload.yml rename deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/{Upload_RC_Schema => UploadRCSchema}/config.xml (100%) delete mode 100644 pipelines/deploy/NodebbUI/Jenkinsfile diff --git a/ansible/nodebbui-upload.yml b/ansible/nodebbui-upload.yml deleted file mode 100644 index 3c0bf414ae..0000000000 --- a/ansible/nodebbui-upload.yml +++ /dev/null @@ -1,36 +0,0 @@ -- hosts: local - become: yes - gather_facts: no - vars_files: - - "{{inventory_dir}}/secrets.yml" - # The vars: section is added for the below reason - # 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name - # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo - # or other default files and just assign the value to the newly introduced common variable - # 3. After few releases, we will remove the older variables and use only the new variables across the repos - vars: - nodebbui_storage: "{{ nodebbui_container_name }}" - tasks: - - name: delete files and folders from azure storage using azcopy - include_role: - name: azure-cloud-storage - tasks_from: delete-using-azcopy.yml - vars: - blob_container_name: "{{ nodebbui_storage }}" - blob_container_folder_path: "" - storage_account_name: "{{ azure_public_storage_account_name }}" - storage_account_sas_token: "{{ azure_public_storage_account_sas }}" - when: cloud_service_provider == "azure" - - - name: upload batch of files to azure storage - include_role: - name: azure-cloud-storage - tasks_from: blob-upload-batch.yml - vars: - blob_container_name: "{{ nodebbui_storage }}" - container_public_access: "container" - blob_container_folder_path: "" - local_file_or_folder_path: "{{ source_name }}" - storage_account_name: "{{ azure_public_storage_account_name }}" - storage_account_key: "{{ azure_public_storage_account_key }}" - when: cloud_service_provider == "azure" \ No newline at end of file diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/Upload_RC_Schema/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/UploadRCSchema/config.xml similarity index 100% rename from deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/Upload_RC_Schema/config.xml rename to deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/UploadRCSchema/config.xml diff --git a/pipelines/certs-templates/Jenkinsfile b/pipelines/certs-templates/Jenkinsfile index ea4617aa53..74fea3b5e6 100644 --- a/pipelines/certs-templates/Jenkinsfile +++ b/pipelines/certs-templates/Jenkinsfile @@ -10,39 +10,40 @@ node() { stage('checkout utils repo') { cleanWs() checkout scm - sh """ - git clone https://github.com/project-sunbird/sunbird-utils.git -b ${sunbird_util_branch_or_tag} cert-templates - """ + sh "git clone https://github.com/project-sunbird/sunbird-utils.git -b ${sunbird_util_branch_or_tag} cert-templates" } - ansiColor('xterm') { - stage('inject vars') { - values = [:] - currentWs = sh(returnStdout: true, script: 'pwd').trim() - envDir = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-3].trim() - module = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-2].trim() - jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() - - ansiblePlaybook = "${currentWs}/ansible/cert-templates.yml --vault-password-file /var/lib/jenkins/secrets/vault-pass " - if (params.badgeType == "createBadge") { - ansibleExtraArgs = " --extra-vars \"cert_location=$currentWs createBadge=True\"" - } else if (params.badgeType == "createIssuer") { - ansibleExtraArgs = " --extra-vars \"cert_location=$currentWs createIssuer=True\"" - }else if (params.badgeType == "createPublicKey") { - ansibleExtraArgs = " --extra-vars \"cert_location=$currentWs createPublicKey=True\"" - }else { - ansibleExtraArgs = " --extra-vars \"cert_location=$currentWs\"" - } - + ansiColor('xterm') { + stage('inject vars') { + values = [:] + currentWs = sh(returnStdout: true, script: 'pwd').trim() + envDir = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-3].trim() + module = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-2].trim() + jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() + ansiblePlaybook = "${currentWs}/ansible/cert-templates.yml --vault-password-file /var/lib/jenkins/secrets/vault-pass" + if (params.badgeType == "createBadge") { + ansibleExtraArgs = " --extra-vars \"cert_location=$currentWs createBadge=True\"" + } + else if (params.badgeType == "createIssuer") { + ansibleExtraArgs = " --extra-vars \"cert_location=$currentWs createIssuer=True\"" + } + else if (params.badgeType == "createPublicKey") { + ansibleExtraArgs = " --extra-vars \"cert_location=$currentWs createPublicKey=True\"" + } + else { + ansibleExtraArgs = " --extra-vars \"cert_location=$currentWs\"" + } values.put('currentWs', currentWs) values.put('env', envDir) - values.put('module', module) - values.put('jobName', jobName) - values.put('ansiblePlaybook', ansiblePlaybook) - values.put('ansibleExtraArgs', ansibleExtraArgs) - println values - ansible_playbook_run(values) - } + values.put('module', module) + values.put('jobName', jobName) + values.put('ansiblePlaybook', ansiblePlaybook) + values.put('ansibleExtraArgs', ansibleExtraArgs) + println values + ansible_playbook_run(values) + currentBuild.result = 'SUCCESS' + currentBuild.description = "Private: ${params.private_branch}, Public: ${params.branch_or_tag}" } + } } catch (err) { currentBuild.result = "FAILURE" diff --git a/pipelines/deploy/NodebbUI/Jenkinsfile b/pipelines/deploy/NodebbUI/Jenkinsfile deleted file mode 100644 index 150a57f442..0000000000 --- a/pipelines/deploy/NodebbUI/Jenkinsfile +++ /dev/null @@ -1,58 +0,0 @@ -@Library('deploy-conf') _ -node() { - try { - String ANSI_GREEN = "\u001B[32m" - String ANSI_NORMAL = "\u001B[0m" - String ANSI_BOLD = "\u001B[1m" - String ANSI_RED = "\u001B[31m" - String ANSI_YELLOW = "\u001B[33m" - - stage('checkout public repo') { - folder = new File("$WORKSPACE/.git") - if (folder.exists()) - { - println "Found .git folder. Clearing it.." - sh'git clean -fxd' - } - checkout scm - } - - ansiColor('xterm') { - values = lp_dp_params() - values.put('module', 'Core') - stage('get artifact') { - currentWs = sh(returnStdout: true, script: 'pwd').trim() - artifact = values.artifact_name + ":" + values.artifact_version - values.put('currentWs', currentWs) - values.put('artifact', artifact) - artifact_download(values) - } - stage('deploy artifact'){ - sh """ - unzip -o ${artifact} -d discussion-ui - """ - - ansiblePlaybook = "${currentWs}/ansible/nodebbui-upload.yml" - ansibleExtraArgs = "--extra-vars \"source_name=${currentWs}/discussion-ui\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" - values.put('ansiblePlaybook', ansiblePlaybook) - values.put('ansibleExtraArgs', ansibleExtraArgs) - println values - ansible_playbook_run(values) - - currentBuild.result = 'SUCCESS' - archiveArtifacts artifacts: "${artifact}", fingerprint: true, onlyIfSuccessful: true - archiveArtifacts artifacts: 'metadata.json', onlyIfSuccessful: true - currentBuild.description = "Artifact: ${values.artifact_version}, Private: ${params.private_branch}, Public: ${params.branch_or_tag}" - } - } - summary() - } - catch (err) { - currentBuild.result = 'FAILURE' - throw err - } - finally { - slack_notify(currentBuild.result) - email_notify() - } -} From 95de617943d898651349109f64fdf7741444fa2f Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Tue, 27 Sep 2022 14:59:33 +0530 Subject: [PATCH 019/616] fix: remove debug statements Signed-off-by: Keshav Prasad --- ansible/roles/cert-templates/tasks/main.yml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/ansible/roles/cert-templates/tasks/main.yml b/ansible/roles/cert-templates/tasks/main.yml index dcbdeebadc..93619c9394 100644 --- a/ansible/roles/cert-templates/tasks/main.yml +++ b/ansible/roles/cert-templates/tasks/main.yml @@ -44,13 +44,6 @@ storage_account_key: "{{ azure_private_storage_account_key }}" when: cloud_service_provider == "azure" -- name: list all the files - shell: "ls -lR {{cert_location}}" - register: allfiles - -- debug: - var: allfiles - - name: Remove unwanted files file: path: "{{cert_location}}/cert-templates" From 4c20bc6d3837ae31b3e5c4b44fb9719688fac094 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Tue, 27 Sep 2022 15:15:36 +0530 Subject: [PATCH 020/616] fix: formatting, don't remove files to make debug easy Signed-off-by: Keshav Prasad --- ansible/roles/cert-templates/tasks/main.yml | 7 +------ pipelines/certs-templates/Jenkinsfile | 8 ++++---- 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/ansible/roles/cert-templates/tasks/main.yml b/ansible/roles/cert-templates/tasks/main.yml index 93619c9394..ee05f2adb3 100644 --- a/ansible/roles/cert-templates/tasks/main.yml +++ b/ansible/roles/cert-templates/tasks/main.yml @@ -42,9 +42,4 @@ local_file_or_folder_path: "{{ cert_location }}/cert-templates/certUtilScripts/out" storage_account_name: "{{ azure_private_storage_account_name }}" storage_account_key: "{{ azure_private_storage_account_key }}" - when: cloud_service_provider == "azure" - -- name: Remove unwanted files - file: - path: "{{cert_location}}/cert-templates" - state: absent + when: cloud_service_provider == "azure" \ No newline at end of file diff --git a/pipelines/certs-templates/Jenkinsfile b/pipelines/certs-templates/Jenkinsfile index 74fea3b5e6..eebc455109 100644 --- a/pipelines/certs-templates/Jenkinsfile +++ b/pipelines/certs-templates/Jenkinsfile @@ -21,16 +21,16 @@ node() { jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() ansiblePlaybook = "${currentWs}/ansible/cert-templates.yml --vault-password-file /var/lib/jenkins/secrets/vault-pass" if (params.badgeType == "createBadge") { - ansibleExtraArgs = " --extra-vars \"cert_location=$currentWs createBadge=True\"" + ansibleExtraArgs = " --extra-vars \"cert_location=$currentWs createBadge=True\"" } else if (params.badgeType == "createIssuer") { - ansibleExtraArgs = " --extra-vars \"cert_location=$currentWs createIssuer=True\"" + ansibleExtraArgs = " --extra-vars \"cert_location=$currentWs createIssuer=True\"" } else if (params.badgeType == "createPublicKey") { - ansibleExtraArgs = " --extra-vars \"cert_location=$currentWs createPublicKey=True\"" + ansibleExtraArgs = " --extra-vars \"cert_location=$currentWs createPublicKey=True\"" } else { - ansibleExtraArgs = " --extra-vars \"cert_location=$currentWs\"" + ansibleExtraArgs = " --extra-vars \"cert_location=$currentWs\"" } values.put('currentWs', currentWs) values.put('env', envDir) From 16db27a09088927388f024c82266542dafa6a9ac Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Tue, 27 Sep 2022 16:22:27 +0530 Subject: [PATCH 021/616] fix: added missing jobs, clean up unused files Signed-off-by: Keshav Prasad --- .../Core/jobs/OfflineInstaller/config.xml | 108 ++++++++ .../jobs/OfflineInstaller/config.xml | 235 ++++++++++++++++++ pipelines/offlineinstaller/Jenkinsfile.Deploy | 80 ------ 3 files changed, 343 insertions(+), 80 deletions(-) create mode 100644 deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/Core/jobs/OfflineInstaller/config.xml create mode 100644 deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/OfflineInstaller/config.xml delete mode 100644 pipelines/offlineinstaller/Jenkinsfile.Deploy diff --git a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/Core/jobs/OfflineInstaller/config.xml b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/Core/jobs/OfflineInstaller/config.xml new file mode 100644 index 0000000000..cbff43d2d0 --- /dev/null +++ b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/Core/jobs/OfflineInstaller/config.xml @@ -0,0 +1,108 @@ + + + + + hudson.model.ParametersDefinitionProperty + com.sonyericsson.rebuild.RebuildSettings + + + + + false + + + + -1 + -1 + -1 + 5 + + + + + false + false + + + + + absolute_job_path + <font color=dimgray size=2><b>Do not change this value! The metadata.json will be copied from this job.</b></font> + Build/Core/OfflineInstaller + false + + + build_number + <font color=darkgreen size=2><b>OPTIONAL: Specify the build job number to upload / copy the artifact built in that job.</b></font> + lastSuccessfulBuild + false + + + artifact_source + <font color=dimgray size=2><b> +ArtifactRepo - Upload and keep a copy of the artifact in Jenkins, JenkinsJob - Just keep a copy of the artifact in Jenkins.</b></font> + + + ArtifactRepo + JenkinsJob + + + + + + + 0 + 0 + + false + project + false + + + + + + + + Build/Core/OfflineInstaller + + SUCCESS + 0 + BLUE + true + + + + + + + + 2 + + + https://github.com/project-sunbird/sunbird-devops.git + + + + + ${public_repo_branch} + + + false + + + + true + false + + 0 + false + + + + pipelines/upload/artifacts/Jenkinsfile + false + + + false + diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/OfflineInstaller/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/OfflineInstaller/config.xml new file mode 100644 index 0000000000..0544e9948d --- /dev/null +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/OfflineInstaller/config.xml @@ -0,0 +1,235 @@ + + + + false + + + + -1 + -1 + -1 + 5 + + + + + + ArtifactUpload/dev/Core/OfflineInstaller + + + + false + false + + + + + absolute_job_path + <font color=dimgray size=2><b>Do not change this value! The metadata.json will be copied from this job.</b></font> + ArtifactUpload/dev/Core/OfflineInstaller + false + + + private_branch + + choice-parameter-2544395024638227 + 1 + + true + + + + true + + + OfflineInstaller + Deploy/dev/Kubernetes/OfflineInstaller + + + ET_FORMATTED_HTML + true + + + branch_or_tag + + choice-parameter-2620434998790477 + 1 + + true + + + + true + + + OfflineInstaller + Deploy/dev/Kubernetes/OfflineInstaller + + + ET_FORMATTED_HTML + true + + + offline_installer_type + <font color=dimgray size=2><b>Choose the type of installer you wanted to build</b></font> + + + windows64bit + linux64bit + windows32bit + + + + + artifact_source + <font color=dimgray size=2><b> +ArtifactRepo - Download the artifact from azure blob, JenkinsJob - Use the atrifact from Jenkins job.</b></font> + choice-parameter-1754928650096303 + 1 + + true + + + + OfflineInstaller + Deploy/dev/Kubernetes/OfflineInstaller + + + PT_SINGLE_SELECT + false + 1 + + + build_number + + choice-parameter-1754928651800681 + 1 + + true + + + + OfflineInstaller + Deploy/dev/Kubernetes/OfflineInstaller + + artifact_source + ET_FORMATTED_HTML + true + + + artifact_version + + choice-parameter-1754928653885653 + 1 + + true + + + + OfflineInstaller + Deploy/dev/Kubernetes/OfflineInstaller + + artifact_source + ET_FORMATTED_HTML + true + + + + + 0 + 0 + + false + project + false + + + + + + + + ArtifactUpload/dev/Core/OfflineInstaller + + SUCCESS + 0 + BLUE + true + + + + + + + + 2 + + + https://github.com/project-sunbird/sunbird-devops.git + + + + + ${branch_or_tag} + + + false + + + + true + false + + 1 + false + + + + pipelines/offlineinstaller/Jenkinsfile + false + + + false + diff --git a/pipelines/offlineinstaller/Jenkinsfile.Deploy b/pipelines/offlineinstaller/Jenkinsfile.Deploy deleted file mode 100644 index 710cfebc48..0000000000 --- a/pipelines/offlineinstaller/Jenkinsfile.Deploy +++ /dev/null @@ -1,80 +0,0 @@ -@Library('deploy-conf') _ -node() { - try { - String ANSI_GREEN = "\u001B[32m" - String ANSI_NORMAL = "\u001B[0m" - String ANSI_BOLD = "\u001B[1m" - String ANSI_RED = "\u001B[31m" - String ANSI_YELLOW = "\u001B[33m" - - stage('checkout public repo') { - cleanWs() - checkout scm - } - ansiColor('xterm') { - values = lp_dp_params() - stage('get artifact') { - currentWs = sh(returnStdout: true, script: 'pwd').trim() - artifact = values.artifact_name + ":" + values.artifact_version - values.put('currentWs', currentWs) - values.put('artifact', artifact) - artifact_download(values) - } - stage('deploy artifact') { - sh """ - unzip ${artifact} - mkdir offline-installer-repo - tar -xvzf src.tar.gz -C offline-installer-repo/ - """ - - ansiblePlaybook = "${currentWs}/ansible/offline-installer.yml" - ansibleExtraArgs = "--extra-vars \"offline_repo_location=$currentWs offline_installer_type=${params.offline_installer_type}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" - values.put('ansiblePlaybook', ansiblePlaybook) - values.put('ansibleExtraArgs', ansibleExtraArgs) - println values - ansible_playbook_run(values) - currentBuild.result = "SUCCESS" - currentBuild.description = "Artifact: ${values.artifact_version}, Private: ${params.private_branch}, Public: ${params.branch_or_tag}" - archiveArtifacts artifacts: "${artifact}", fingerprint: true, onlyIfSuccessful: true - archiveArtifacts artifacts: 'metadata.json', onlyIfSuccessful: true - } - - try { - stage('Build Installer, create and upload it to azure') { - dir('offline-installer-repo') { - sh """ - bash -x build.sh - """ - } - ansiblePlaybook = "${currentWs}/ansible/offline-installer.yml --vault-password-file /var/lib/jenkins/secrets/vault-pass " - ansibleExtraArgs = " --extra-vars \"offline_repo_location=$currentWs uploadInstaller=True offline_installer_type=${offline_installer_type}\"" - values.put('currentWs', currentWs) - values.put('ansiblePlaybook', ansiblePlaybook) - values.put('ansibleExtraArgs', ansibleExtraArgs) - println values - ansible_playbook_run(values) - archiveArtifacts artifacts: '*.zip', onlyIfSuccessful: true - archiveArtifacts artifacts: 'latest.json', onlyIfSuccessful: true - } - } - catch (err) { - ansiblePlaybook = "${currentWs}/ansible/offline-installer.yml --vault-password-file /var/lib/jenkins/secrets/vault-pass " - ansibleExtraArgs = " --extra-vars \"offline_repo_location=$currentWs removeOfflineInstallerFolder=True offline_installer_type=${offline_installer_type}\"" - values.put('currentWs', currentWs) - values.put('ansiblePlaybook', ansiblePlaybook) - values.put('ansibleExtraArgs', ansibleExtraArgs) - println values - ansible_playbook_run(values) - currentBuild.result = 'SUCCESS' - } - } - } - catch (err) { - currentBuild.result = "FAILURE" - throw err - } - finally { - slack_notify(currentBuild.result) - email_notify() - } -} From ea59c10aae333b9bea41fc97d38595327b4831a5 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Tue, 27 Sep 2022 16:28:16 +0530 Subject: [PATCH 022/616] fix: adding default offline store value Signed-off-by: Keshav Prasad --- ansible/inventory/env/group_vars/all.yml | 2 -- ansible/roles/desktop-deploy/defaults/main.yml | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index 1f7bc11430..deec3a4a2a 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -502,8 +502,6 @@ content_import_remove_props: '["downloadUrl","variants","previewUrl","streamingU sunbird_portal_updateLoginTimeEnabled: false # Desktop app vars -#sunbird_offline_azure_storage_account: "" #added this var for adopter usecase -offline_installer_container_name: "" #added this var for adopter usecase cloud_storage_url: "{{ sunbird_public_storage_account_name }}.blob.core.windows.net" # Search-service diff --git a/ansible/roles/desktop-deploy/defaults/main.yml b/ansible/roles/desktop-deploy/defaults/main.yml index ad3803dcd1..3010db2349 100644 --- a/ansible/roles/desktop-deploy/defaults/main.yml +++ b/ansible/roles/desktop-deploy/defaults/main.yml @@ -1,5 +1,6 @@ --- time: "YEAR-MONTH-DATE-HOUR-MINUTE-SECOND-INSTALLERTYPE" +offline_installer_container_name: "{{env}}-offlineinstaller" # This variable is added for the below reason - # 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name From 306923f43f422ab4f5716c35fb2301b808fbd7ec Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Tue, 27 Sep 2022 17:01:49 +0530 Subject: [PATCH 023/616] fix: clean up unused roles Signed-off-by: Keshav Prasad --- .../roles/offline-installer/defaults/main.yml | 9 -- .../roles/offline-installer/tasks/main.yml | 50 --------- .../roles/offline-installer/tasks/remove.yml | 8 -- .../tasks/upload_to_storage.yml | 103 ------------------ .../templates/32-bit-prerequisite.sh.j2 | 7 -- .../offline-installer/templates/Dockerfile.j2 | 13 --- .../templates/artifacts.sh.j2 | 15 --- .../offline-installer/templates/build.sh.j2 | 18 --- .../offline-installer/templates/env.json.j2 | 10 -- .../offline-installer/templates/envfile.j2 | 3 - .../templates/metadata.sh.j2 | 8 -- .../templates/setupOfflineInstaller.sh.j2 | 25 ----- pipelines/offlineinstaller/Jenkinsfile | 2 +- 13 files changed, 1 insertion(+), 270 deletions(-) delete mode 100644 ansible/roles/offline-installer/defaults/main.yml delete mode 100644 ansible/roles/offline-installer/tasks/main.yml delete mode 100644 ansible/roles/offline-installer/tasks/remove.yml delete mode 100644 ansible/roles/offline-installer/tasks/upload_to_storage.yml delete mode 100644 ansible/roles/offline-installer/templates/32-bit-prerequisite.sh.j2 delete mode 100644 ansible/roles/offline-installer/templates/Dockerfile.j2 delete mode 100644 ansible/roles/offline-installer/templates/artifacts.sh.j2 delete mode 100644 ansible/roles/offline-installer/templates/build.sh.j2 delete mode 100644 ansible/roles/offline-installer/templates/env.json.j2 delete mode 100644 ansible/roles/offline-installer/templates/envfile.j2 delete mode 100644 ansible/roles/offline-installer/templates/metadata.sh.j2 delete mode 100644 ansible/roles/offline-installer/templates/setupOfflineInstaller.sh.j2 diff --git a/ansible/roles/offline-installer/defaults/main.yml b/ansible/roles/offline-installer/defaults/main.yml deleted file mode 100644 index ad3803dcd1..0000000000 --- a/ansible/roles/offline-installer/defaults/main.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -time: "YEAR-MONTH-DATE-HOUR-MINUTE-SECOND-INSTALLERTYPE" - -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -offline_installer_storage: "{{ offline_installer_container_name }}" \ No newline at end of file diff --git a/ansible/roles/offline-installer/tasks/main.yml b/ansible/roles/offline-installer/tasks/main.yml deleted file mode 100644 index e110fd3116..0000000000 --- a/ansible/roles/offline-installer/tasks/main.yml +++ /dev/null @@ -1,50 +0,0 @@ ---- -- name: get the date and time for the artifact - set_fact: - time: "{{ lookup('pipe', 'date +\"%Y-%b-%d-%H-%M-%S\"') }}-{{offline_installer_type}}" - when: uploadInstaller is not defined - -- name: copy the env.json file to the repo - template: - src: "{{item}}.j2" - dest: "{{offline_repo_location}}/offline-installer-repo/src/{{item}}" - mode: '0755' - with_items: - - env.json - when: uploadInstaller is not defined - -- name: copy the installer script file and build script for building offline installer - template: - src: "{{item}}.j2" - dest: "{{offline_repo_location}}/offline-installer-repo/{{item}}" - mode: '0755' - with_items: - - build.sh - - envfile - - 32-bit-prerequisite.sh - when: uploadInstaller is not defined - -- name: create a directory to store artifacts - file: - path: "{{offline_repo_location}}/offline-installer-repo/offline_artifacts/{{time}}" - state: directory - recurse: yes - when: uploadInstaller is not defined - -- name: copy the installer script file and build script for building offline installer - template: - src: "{{item}}.j2" - dest: "{{offline_repo_location}}/offline-installer-repo/{{item}}" - mode: '0755' - with_items: - - setupOfflineInstaller.sh - when: uploadInstaller is not defined - -- name: upload to azure - include: upload_to_storage.yml - when: uploadInstaller is defined - -- name: Delete offline installer folder if any issue - include: remove.yml - when: removeOfflineInstallerFolder is defined - diff --git a/ansible/roles/offline-installer/tasks/remove.yml b/ansible/roles/offline-installer/tasks/remove.yml deleted file mode 100644 index da1512de90..0000000000 --- a/ansible/roles/offline-installer/tasks/remove.yml +++ /dev/null @@ -1,8 +0,0 @@ -- name: Delete offline installer repo - file: - path: "{{offline_repo_location}}/offline-installer-repo/" - state: absent - -- name: Notify build failure - fail: - msg: "Please check the build script, it had been failed" diff --git a/ansible/roles/offline-installer/tasks/upload_to_storage.yml b/ansible/roles/offline-installer/tasks/upload_to_storage.yml deleted file mode 100644 index b8a68ba164..0000000000 --- a/ansible/roles/offline-installer/tasks/upload_to_storage.yml +++ /dev/null @@ -1,103 +0,0 @@ ---- -- name: Get the environment name for the artifact name - shell: "cat {{offline_repo_location}}/offline-installer-repo/src/package.json | jq -r '.name'" - register: env_name - -- name: Display the environment name of the installer - debug: - msg: "{{env_name.stdout}}" - -- name: Create a variable to inject environment name to upload to azure blob - set_fact: - environment_name: "{{ env_name.stdout }}" - -- name: Get the version from the package.json file - shell: "cat {{offline_repo_location}}/offline-installer-repo/src/package.json | jq -r '.version'" - register: version - -- name: Display the version number of the installer - debug: - msg: "{{version.stdout}}" - -- name: Create a variable to inject version in the template - set_fact: - installer_version: "{{ version.stdout }}" - -- name: get the directory name - shell: "ls {{offline_repo_location}}/offline-installer-repo/offline_artifacts/" - register: folderName - -- debug: - msg: "{{folderName.stdout}}" - -- name: set the folder name to copy the artifacts - set_fact: - time: "{{folderName.stdout}}" - -- name: copy the installer artifacts and metadata files to upload it to azure blob and generate latest.json file - template: - src: "{{item}}.j2" - dest: "{{offline_repo_location}}/offline-installer-repo/{{item}}" - mode: '0755' - with_items: - - artifacts.sh - - metadata.sh - -- name: copy the artifacts and generate the metadata file - shell: "bash {{offline_repo_location}}/offline-installer-repo/{{item}}" - args: - chdir: "{{offline_repo_location}}/offline-installer-repo/" - with_items: - - artifacts.sh - - metadata.sh - -- name: this block consists of tasks related to azure storage - block: - - name: set common azure variables - set_fact: - blob_container_name: "{{ offline_installer_storage }}" - container_public_access: "blob" - storage_account_name: "{{ azure_public_storage_account_name }}" - storage_account_key: "{{ azure_public_storage_account_key }}" - - - name: upload batch of files to azure storage - include_role: - name: azure-cloud-storage - tasks_from: blob-upload-batch.yml - vars: - blob_container_folder_path: "" - local_file_or_folder_path: "{{ offline_repo_location }}/offline-installer-repo/offline_artifacts" - - - name: upload batch of files to azure storage - include_role: - name: azure-cloud-storage - tasks_from: blob-upload-batch.yml - vars: - blob_container_folder_path: "/latest" - local_file_or_folder_path: "{{ offline_repo_location }}/offline-installer-repo/offline_artifacts/{{ folderName.stdout }}" - when: cloud_service_provider == "azure" - -- name: Create a zip of the folder to archieve the artifact - archive: - path: - - "{{offline_repo_location}}/offline-installer-repo/offline_artifacts/{{folderName.stdout}}" - dest: "{{offline_repo_location}}/{{offline_installer_type}}.zip" - owner: jenkins - group: jenkins - format: zip - -- name: copy latest.json file to archieve it in jenkins - copy: - src: "{{offline_repo_location}}/offline-installer-repo/offline_artifacts/{{folderName.stdout}}/latest.json" - dest: "{{offline_repo_location}}/latest.json" - owner: jenkins - group: jenkins - remote_src: yes - -- name: change the ownership of the directory to jenkins user - file: - path: "{{offline_repo_location}}" - state: directory - recurse: yes - owner: jenkins - group: jenkins \ No newline at end of file diff --git a/ansible/roles/offline-installer/templates/32-bit-prerequisite.sh.j2 b/ansible/roles/offline-installer/templates/32-bit-prerequisite.sh.j2 deleted file mode 100644 index cbfa755b0e..0000000000 --- a/ansible/roles/offline-installer/templates/32-bit-prerequisite.sh.j2 +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -#Build the offline installer -cd /private/src/ -rm -rf node_modules -npm install leveldown --verbose -npm run dist diff --git a/ansible/roles/offline-installer/templates/Dockerfile.j2 b/ansible/roles/offline-installer/templates/Dockerfile.j2 deleted file mode 100644 index 348c4c6e0a..0000000000 --- a/ansible/roles/offline-installer/templates/Dockerfile.j2 +++ /dev/null @@ -1,13 +0,0 @@ -#FROM electronuserland/builder:wine -#MAINTAINER "S M Y ALTAMASH" "" -#ENV ELECTRON_CACHE="/root/.cache/electron" -#ENV ELECTRON_BUILDER_CACHE="/root/.cache/electron-builder" -#ENV GITHUB_ACCESS_TOKEN={{offline_git_access_token}} -#ENV GITHUB_PRIVATE_REPO={{offline_git_private_repo}} -#ENV TARGET_ENVIRONMENT={{offline_target_env}} -#WORKDIR /private/ -#ADD . /private/ -#WORKDIR /private/src/ -#CMD npm install && npm run dist -#CMD npm run dist-win64 -#CMD npm run dist-linux diff --git a/ansible/roles/offline-installer/templates/artifacts.sh.j2 b/ansible/roles/offline-installer/templates/artifacts.sh.j2 deleted file mode 100644 index ea5db269de..0000000000 --- a/ansible/roles/offline-installer/templates/artifacts.sh.j2 +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - - -if [ "{{offline_installer_type}}" == "windows32bit" ]; -then - cp '{{offline_repo_location}}/offline-installer-repo/src/dist/{{installer_version}}/win/ia32/{{environment_name}} Setup {{installer_version}}.exe' offline_artifacts/{{time}}/{{environment_name}}_{{installer_version}}_windows32bit.exe -elif [ "{{offline_installer_type}}" == "windows64bit" ]; -then - cp '{{offline_repo_location}}/offline-installer-repo/src/dist/{{installer_version}}/win/x64/{{environment_name}} Setup {{installer_version}}.exe' offline_artifacts/{{time}}/{{environment_name}}_{{installer_version}}_windows64bit.exe -elif [ "{{offline_installer_type}}" == "linux64bit" ]; -then - cp '{{offline_repo_location}}/offline-installer-repo/src/dist/{{installer_version}}/linux/x64/{{environment_name}}_{{installer_version}}_amd64.deb' offline_artifacts/{{time}}/{{environment_name}}_{{installer_version}}_linux64bit.deb -fi - - diff --git a/ansible/roles/offline-installer/templates/build.sh.j2 b/ansible/roles/offline-installer/templates/build.sh.j2 deleted file mode 100644 index 720b64b8c6..0000000000 --- a/ansible/roles/offline-installer/templates/build.sh.j2 +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -echo "Offline Installer for Sunbird" - -# Build script -set -eo pipefail - -if [ "{{offline_installer_type}}" == "windows32bit" ]; -then - - docker run --rm -v ${PWD}:/private/ i386/node:8.16.2-stretch bash -x /private/32-bit-prerequisite.sh - -fi - -#chmod +x setupOfflineInstaller.sh -docker run --rm --env-file envfile --env ELECTRON_CACHE="/root/.cache/electron" --env ELECTRON_BUILDER_CACHE="/root/.cache/electron-builder" -v ${PWD}:/project electronuserland/builder:wine bash -x setupOfflineInstaller.sh - -echo "Build the installer succesfully" diff --git a/ansible/roles/offline-installer/templates/env.json.j2 b/ansible/roles/offline-installer/templates/env.json.j2 deleted file mode 100644 index 8705f96ab9..0000000000 --- a/ansible/roles/offline-installer/templates/env.json.j2 +++ /dev/null @@ -1,10 +0,0 @@ -{ - "APP_BASE_URL": "https://sunbird.org", - "CHANNEL": "sunbird", - "TELEMETRY_SYNC_INTERVAL_IN_SECS": 30, - "APP_ID": "local.sunbird.desktop", - "TELEMETRY_PACKET_SIZE": 200, - "APP_BASE_URL_TOKEN": "{{offline_app_base_url_token}}", - "APP_NAME": "SUNBIRD", - "MODE": "standalone" -} diff --git a/ansible/roles/offline-installer/templates/envfile.j2 b/ansible/roles/offline-installer/templates/envfile.j2 deleted file mode 100644 index 9b98165e0b..0000000000 --- a/ansible/roles/offline-installer/templates/envfile.j2 +++ /dev/null @@ -1,3 +0,0 @@ -GITHUB_ACCESS_TOKEN={{offline_git_access_token}} -GITHUB_PRIVATE_REPO={{offline_git_private_repo}} -TARGET_ENVIRONMENT={{offline_target_env}} diff --git a/ansible/roles/offline-installer/templates/metadata.sh.j2 b/ansible/roles/offline-installer/templates/metadata.sh.j2 deleted file mode 100644 index 9d7e7a0dd7..0000000000 --- a/ansible/roles/offline-installer/templates/metadata.sh.j2 +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -env_name={{environment_name}} -version={{installer_version}} -artifactFolder=$(find offline_artifacts/* -type d) - -# constructing the latest.json file -echo "{\"version\":\"${version}\",\"windows\":{\"32bit\":\"${env_name}_${version}_windows32bit.exe\",\"64bit\":\"${env_name}_${version}_windows64bit.exe\"},\"linux\":{\"64bit\":\"${env_name}_${version}_linux64bit.deb\"}}" | jq '.' | tee -a {{offline_repo_location}}/offline-installer-repo/${artifactFolder}/latest.json diff --git a/ansible/roles/offline-installer/templates/setupOfflineInstaller.sh.j2 b/ansible/roles/offline-installer/templates/setupOfflineInstaller.sh.j2 deleted file mode 100644 index 64b5a019b8..0000000000 --- a/ansible/roles/offline-installer/templates/setupOfflineInstaller.sh.j2 +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -# Downgrade the node version -npm install -g n -n 8.16 -cd /project/src - -if [ "{{offline_installer_type}}" != "windows32bit" ]; -then -#Build the offline installer - npm install - npm run dist -fi - -if [ "{{offline_installer_type}}" == "windows32bit" ]; -then -# npm run dist - npm run dist-win32 -elif [ "{{offline_installer_type}}" == "windows64bit" ]; -then - npm run dist-win64 -elif [ "{{offline_installer_type}}" == "linux64bit" ]; -then - npm run dist-linux -fi diff --git a/pipelines/offlineinstaller/Jenkinsfile b/pipelines/offlineinstaller/Jenkinsfile index a4e6a8f610..c97c01a9bd 100644 --- a/pipelines/offlineinstaller/Jenkinsfile +++ b/pipelines/offlineinstaller/Jenkinsfile @@ -31,7 +31,7 @@ node() { } stage('Install the offline desktop Application') { ansiblePlaybook = "${currentWs}/ansible/offline-installer.yml --vault-password-file /var/lib/jenkins/secrets/vault-pass" - ansibleExtraArgs = "--extra-vars \"offline_repo_location=$currentWs offline_installer_type=${offline_installer_type}\" -v" + ansibleExtraArgs = "--extra-vars \"offline_repo_location=$currentWs offline_installer_type=${offline_installer_type}\"" values.put('ansiblePlaybook', ansiblePlaybook) values.put('ansibleExtraArgs', ansibleExtraArgs) println values From 6ffe54280dc385838ade36df168fa6b83d008afd Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Wed, 28 Sep 2022 11:57:28 +0530 Subject: [PATCH 024/616] fix: removed unused files, typos Signed-off-by: Keshav Prasad --- .../roles/deploy-player/tasks/main.yml | 2 +- .../ansible/roles/helm-deploy/tasks/main.yml | 10 ++-- .../roles/sunbird-deploy/tasks/main.yml | 2 +- .../backup/jenkins-backup-upload/Jenkinsfile | 50 ------------------- 4 files changed, 7 insertions(+), 57 deletions(-) delete mode 100644 pipelines/backup/jenkins-backup-upload/Jenkinsfile diff --git a/kubernetes/ansible/roles/deploy-player/tasks/main.yml b/kubernetes/ansible/roles/deploy-player/tasks/main.yml index 5abdc85449..52500df2e3 100644 --- a/kubernetes/ansible/roles/deploy-player/tasks/main.yml +++ b/kubernetes/ansible/roles/deploy-player/tasks/main.yml @@ -48,7 +48,7 @@ loop_control: loop_var: outer_item -- name: Create the token pubic key file +- name: Create the token public key file copy: dest: "{{ chart_path }}/keys/{{ adminutil_refresh_token_public_key_kid }}" content: "{{ core_vault_sunbird_sso_publickey }}" diff --git a/kubernetes/ansible/roles/helm-deploy/tasks/main.yml b/kubernetes/ansible/roles/helm-deploy/tasks/main.yml index bd40bcfb82..a57c847b97 100644 --- a/kubernetes/ansible/roles/helm-deploy/tasks/main.yml +++ b/kubernetes/ansible/roles/helm-deploy/tasks/main.yml @@ -38,13 +38,13 @@ loop_control: loop_var: outer_item -- name: Create the token pubic key file +- name: Create the token public key file copy: dest: "{{ chart_path }}/keys/{{ adminutil_refresh_token_public_key_prefix }}" content: "{{ core_vault_sunbird_sso_publickey }}" when: release_name == "adminutils" -- name: Create the token pubic key file for ML Services +- name: Create the token public key file for ML Services copy: dest: "{{ chart_path }}/keys/{{ adminutil_refresh_token_public_key_kid }}" content: "{{ core_vault_sunbird_sso_publickey }}" @@ -107,7 +107,7 @@ args: executable: /bin/bash register: deployment_result - ignore_errors: yes + ignore_errors: true - name: Get deployed image name - deployments shell: kubectl get deployments.apps {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[0].image | split("/")[1]' @@ -121,7 +121,7 @@ args: executable: /bin/bash register: daemonset_result - ignore_errors: yes + ignore_errors: true - name: Get deployed image name - daemonsets shell: "kubectl get daemonsets {{ release_name }} -o json -n {{ namespace }} | jq '.spec.template.spec.containers | .[].image' -r | awk -F/ '{print $2}'" @@ -135,7 +135,7 @@ args: executable: /bin/bash register: statefulset_result - ignore_errors: yes + ignore_errors: true - name: Get deployed image name - statefulsets shell: "kubectl get statefulsets {{ release_name }} -o json -n {{ namespace }} | jq '.spec.template.spec.containers | .[].image' -r | awk -F/ '{print $2}'" diff --git a/kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml b/kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml index 33fba6fb42..09e96cf25e 100644 --- a/kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml +++ b/kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml @@ -39,7 +39,7 @@ loop_control: loop_var: outer_item -- name: Create the token pubic key file +- name: Create the token public key file copy: dest: "{{ chart_path }}/keys/{{ adminutil_refresh_token_public_key_kid }}" content: "{{ core_vault_sunbird_sso_publickey }}" diff --git a/pipelines/backup/jenkins-backup-upload/Jenkinsfile b/pipelines/backup/jenkins-backup-upload/Jenkinsfile deleted file mode 100644 index cd880c9390..0000000000 --- a/pipelines/backup/jenkins-backup-upload/Jenkinsfile +++ /dev/null @@ -1,50 +0,0 @@ -@Library('deploy-conf') _ -node() { - try { - String ANSI_GREEN = "\u001B[32m" - String ANSI_NORMAL = "\u001B[0m" - String ANSI_BOLD = "\u001B[1m" - String ANSI_RED = "\u001B[31m" - String ANSI_YELLOW = "\u001B[33m" - - stage('checkout public repo') { - folder = new File("$WORKSPACE/.git") - if (folder.exists()) - { - println "Found .git folder. Clearing it.." - sh'git clean -fxd' - } - checkout scm - } - - ansiColor('xterm') { - stage('deploy'){ - values = [:] - currentWs = sh(returnStdout: true, script: 'pwd').trim() - envDir = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-3].trim() - module = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-2].trim() - jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() - ansiblePlaybook = "${currentWs}/ansible/jenkins-backup.yml" - ansibleExtraArgs = "--vault-password-file /var/lib/jenkins/secrets/vault-pass" - values.put('currentWs', currentWs) - values.put('env', envDir) - values.put('module', module) - values.put('jobName', jobName) - values.put('ansiblePlaybook', ansiblePlaybook) - values.put('ansibleExtraArgs', ansibleExtraArgs) - println values - ansible_playbook_run(values) - currentBuild.result = 'SUCCESS' - currentBuild.description = "Private: ${params.private_branch}, Public: ${params.branch_or_tag}" - } - } - } - catch (err) { - currentBuild.result = "FAILURE" - throw err - } - finally { - slack_notify(currentBuild.result) - email_notify() - } -} From e635d07e7640e3aabccb8e06a85dadb0d593634f Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Wed, 28 Sep 2022 12:05:16 +0530 Subject: [PATCH 025/616] fix: remove verbosity Signed-off-by: Keshav Prasad --- pipelines/backup/jenkins-backup/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipelines/backup/jenkins-backup/Jenkinsfile b/pipelines/backup/jenkins-backup/Jenkinsfile index 27570e2a87..787a44fe17 100644 --- a/pipelines/backup/jenkins-backup/Jenkinsfile +++ b/pipelines/backup/jenkins-backup/Jenkinsfile @@ -25,7 +25,7 @@ node() { jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() currentWs = sh(returnStdout: true, script: 'pwd').trim() ansiblePlaybook = "${currentWs}/ansible/jenkins-backup.yml" - ansibleExtraArgs = "-v --vault-password-file /var/lib/jenkins/secrets/vault-pass" + ansibleExtraArgs = "--vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) From 4d1b0a5222f3b7dd4203676415363ad6ca495313 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Wed, 28 Sep 2022 14:31:57 +0530 Subject: [PATCH 026/616] fix: adding default container name Signed-off-by: Keshav Prasad --- ansible/mongodb-backup.yml | 3 ++- ansible/roles/mongodb-backup/defaults/main.yml | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/ansible/mongodb-backup.yml b/ansible/mongodb-backup.yml index 2ab4091fc4..4db8d263bd 100644 --- a/ansible/mongodb-backup.yml +++ b/ansible/mongodb-backup.yml @@ -1,5 +1,6 @@ +--- - hosts: "{{ host }}" - become: yes + become: true vars_files: - ['{{inventory_dir}}/secrets.yml'] roles: diff --git a/ansible/roles/mongodb-backup/defaults/main.yml b/ansible/roles/mongodb-backup/defaults/main.yml index d7b56ebefd..da5a0f710f 100644 --- a/ansible/roles/mongodb-backup/defaults/main.yml +++ b/ansible/roles/mongodb-backup/defaults/main.yml @@ -1,5 +1,5 @@ mongo_backup_dir: '/tmp/mongo-backup' -mongo_backup_azure_container_name: "{{ mongo_backup_azure_container_name }}" +mongo_backup_azure_container_name: "mongodb-backup" # This variable is added for the below reason - # 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name From 19b876d2c6c217bd3273425222be8e77551b6be5 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Wed, 28 Sep 2022 15:25:46 +0530 Subject: [PATCH 027/616] feat: adding new mongo backup jobs Signed-off-by: Keshav Prasad --- .../Core/jobs/GraylogMongoDbBackup/config.xml | 130 ++++++++++++++++++ .../jobs/Core/jobs/MongoDbBackup/config.xml | 130 ++++++++++++++++++ 2 files changed, 260 insertions(+) create mode 100644 deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/GraylogMongoDbBackup/config.xml create mode 100644 deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/MongoDbBackup/config.xml diff --git a/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/GraylogMongoDbBackup/config.xml b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/GraylogMongoDbBackup/config.xml new file mode 100644 index 0000000000..f1e05c88a9 --- /dev/null +++ b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/GraylogMongoDbBackup/config.xml @@ -0,0 +1,130 @@ + + + + false + + + + -1 + 10 + -1 + 2 + + + + + false + false + + + + + private_branch + + choice-parameter-189743214208409 + 1 + + true + + + + true + + + GraylogMongoDbBackup + OpsAdministration/dev/Core/GraylogMongoDbBackup + + + ET_FORMATTED_HTML + true + + + branch_or_tag + + choice-parameter-189743216959018 + 1 + + true + + + + true + + + GraylogMongoDbBackup + OpsAdministration/dev/Core/GraylogMongoDbBackup + + + ET_FORMATTED_HTML + true + + + host + + graylog + false + + + + + 0 + 0 + + false + project + false + + + + + + + 00 4 * * * + + + + + + + 2 + + + https://github.com/keshavprasadms/sunbird-devops.git + + + + + ${branch_or_tag} + + + false + + + + true + false + + 0 + false + + + + pipelines/backup/mongodb-backup/Jenkinsfile + false + + + false + diff --git a/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/MongoDbBackup/config.xml b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/MongoDbBackup/config.xml new file mode 100644 index 0000000000..ff3d4bd8f3 --- /dev/null +++ b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/MongoDbBackup/config.xml @@ -0,0 +1,130 @@ + + + + false + + + + -1 + 10 + -1 + 2 + + + + + false + false + + + + + private_branch + + choice-parameter-189743214208409 + 1 + + true + + + + true + + + MongoDbBackup + OpsAdministration/dev/Core/MongoDbBackup + + + ET_FORMATTED_HTML + true + + + branch_or_tag + + choice-parameter-189743216959018 + 1 + + true + + + + true + + + MongoDbBackup + OpsAdministration/dev/Core/MongoDbBackup + + + ET_FORMATTED_HTML + true + + + host + + mongo_master + false + + + + + 0 + 0 + + false + project + false + + + + + + + 30 3 * * * + + + + + + + 2 + + + https://github.com/keshavprasadms/sunbird-devops.git + + + + + ${branch_or_tag} + + + false + + + + true + false + + 0 + false + + + + pipelines/backup/mongodb-backup/Jenkinsfile + false + + + false + From 332c7d70b3b37991aebcb75c0a021076780fb95c Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Wed, 28 Sep 2022 15:34:49 +0530 Subject: [PATCH 028/616] fix: delegate container creation to local Signed-off-by: Keshav Prasad --- ansible/roles/es-azure-snapshot/tasks/main.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ansible/roles/es-azure-snapshot/tasks/main.yml b/ansible/roles/es-azure-snapshot/tasks/main.yml index e804b4344d..8ce0fcd267 100644 --- a/ansible/roles/es-azure-snapshot/tasks/main.yml +++ b/ansible/roles/es-azure-snapshot/tasks/main.yml @@ -8,6 +8,8 @@ include_role: name: azure-cloud-storage tasks_from: container-create.yml + apply: + delegate_to: localhost vars: blob_container_name: "{{ es_backup_storage }}" container_public_access: "off" From 765c15c25bf08de64b2ca46450f3f6507bc5851c Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Thu, 29 Sep 2022 06:41:34 +0530 Subject: [PATCH 029/616] fix: updated pip package name Signed-off-by: Keshav Prasad --- ansible/bootstrap.yml | 2 +- pipelines/backup/es-backup/Jenkinsfile | 2 +- private_repo/ansible/inventory/dev/Core/common.yml | 3 --- private_repo/ansible/inventory/dev/Core/secrets.yml | 3 --- 4 files changed, 2 insertions(+), 8 deletions(-) diff --git a/ansible/bootstrap.yml b/ansible/bootstrap.yml index aba26fbbd4..d8bf9fa494 100644 --- a/ansible/bootstrap.yml +++ b/ansible/bootstrap.yml @@ -8,7 +8,7 @@ - name: Installing other packages apt: state: present - name: ['python-pkg-resources', 'python2-pip'] + name: ['python-pkg-resources', 'python-pip'] when: ansible_distribution_version | float < 18 - name: Installing other packages apt: diff --git a/pipelines/backup/es-backup/Jenkinsfile b/pipelines/backup/es-backup/Jenkinsfile index 81d46a2a2b..a941380cb9 100644 --- a/pipelines/backup/es-backup/Jenkinsfile +++ b/pipelines/backup/es-backup/Jenkinsfile @@ -25,7 +25,7 @@ node() { jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() currentWs = sh(returnStdout: true, script: 'pwd').trim() ansiblePlaybook = "${currentWs}/ansible/es.yml" - ansibleExtraArgs = "--tags \"es_backup\" -v --vault-password-file /var/lib/jenkins/secrets/vault-pass" + ansibleExtraArgs = "--tags \"es_backup\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index 8277399b44..bd034f9bb3 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -22,7 +22,6 @@ alerts_mailing_list : "devops@myorg.com" # Comma separat # Define the below if you are using Azure Cloud -# Management Storage Account # Note - You can use the same azure account for the below variables or have separate azure accounts sunbird_public_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing public data (like contents) sunbird_private_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing private data (like reports, telemetry data) @@ -35,11 +34,9 @@ azure_management_storage_account_name: "{{ sunbird_management_storage_account_na azure_artifact_storage_account_name: "{{ sunbird_artifact_storage_account_name }}" # Define the below if you are using AWS Cloud -# Management Storage Bucket aws_management_bucket_name: "" # Define the below if you are using Google Cloud -# Management Storage Bucket gcs_management_bucket_name: "" # ------------------------------------------------------------------------------------------------------------ # diff --git a/private_repo/ansible/inventory/dev/Core/secrets.yml b/private_repo/ansible/inventory/dev/Core/secrets.yml index f157d85862..cf76c3d66e 100644 --- a/private_repo/ansible/inventory/dev/Core/secrets.yml +++ b/private_repo/ansible/inventory/dev/Core/secrets.yml @@ -22,7 +22,6 @@ sunbird_management_storage_account_key: "change.azure.storage.account.key" sunbird_artifact_storage_account_key: "{{ sunbird_management_storage_account_key }}" # Define the below if you are using Azure Cloud -# Management Storage Account azure_public_storage_account_key: "{{ sunbird_public_storage_account_key }}" azure_private_storage_account_key: "{{ sunbird_private_storage_account_key }}" azure_management_storage_account_key: "{{ sunbird_management_storage_account_key }}" @@ -31,12 +30,10 @@ azure_public_storage_account_sas: "{{ sunbird_public_storage_account_sas }}" azure_management_storage_account_sas: "{{ sunbird_management_storage_account_sas }}" # Define the below if you are using AWS Cloud -# Management Storage Bucket aws_management_bucket_user_access_key: "" aws_management_bucket_user_secret_key: "" # Define the below if you are using Google Cloud -# Management Storage Bucket gcs_management_bucket_service_account: | From 769de28fb936a02ab4f881674fa521ec28a13d9d Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Thu, 29 Sep 2022 06:50:28 +0530 Subject: [PATCH 030/616] fix: remove unnessary all tags Signed-off-by: Keshav Prasad --- ansible/bootstrap.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/ansible/bootstrap.yml b/ansible/bootstrap.yml index d8bf9fa494..30d57ca52f 100644 --- a/ansible/bootstrap.yml +++ b/ansible/bootstrap.yml @@ -19,9 +19,8 @@ - bootstrap_any tags: - bootstrap_any - - all -- hosts: "{{hosts}}" +- hosts: "{{ hosts }}" become: yes ignore_unreachable: yes vars_files: @@ -40,5 +39,4 @@ roles: - vm-agents-nodeexporter tags: - - node_exporter - - all + - node_exporter \ No newline at end of file From beeabe072c2705b52a580c23ce87153113d41642 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Thu, 29 Sep 2022 14:03:28 +0530 Subject: [PATCH 031/616] fix: renamed to cassandra backup folder (#3560) Signed-off-by: Keshav Prasad --- ansible/roles/cassandra-backup/tasks/main.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/ansible/roles/cassandra-backup/tasks/main.yml b/ansible/roles/cassandra-backup/tasks/main.yml index ac0682c58a..bbc7246c48 100755 --- a/ansible/roles/cassandra-backup/tasks/main.yml +++ b/ansible/roles/cassandra-backup/tasks/main.yml @@ -14,11 +14,11 @@ mode: 0755 - set_fact: - cassandra_backup_gzip_file_name: "cassandra-backup-{{ lookup('pipe', 'date +%Y%m%d') }}-{{ ansible_hostname }}-new" + cassandra_backup_folder_name: "cassandra-backup-{{ lookup('pipe', 'date +%Y%m%d') }}-{{ ansible_hostname }}-new" - name: run the backup script become: true - shell: python3 cassandra_backup.py --snapshotname "{{ cassandra_backup_gzip_file_name }}" --snapshotdirectory "{{ cassandra_backup_gzip_file_name }}" "{{additional_arguments|d('')}}" + shell: python3 cassandra_backup.py --snapshotname "{{ cassandra_backup_folder_name }}" --snapshotdirectory "{{ cassandra_backup_folder_name }}" "{{additional_arguments|d('')}}" args: chdir: /data/cassandra/backup async: 14400 @@ -39,7 +39,8 @@ vars: blob_container_name: "{{ cassandra_backup_storage }}" container_public_access: "off" - local_file_or_folder_path: "/data/cassandra/backup/{{ cassandra_backup_gzip_file_name }}" + blob_container_folder_path: "" + local_file_or_folder_path: "/data/cassandra/backup/{{ cassandra_backup_folder_name }}" storage_account_name: "{{ azure_management_storage_account_name }}" storage_account_sas_token: "{{ azure_management_storage_account_sas }}" when: cloud_service_provider == "azure" From 29dc554718d918fc76027b6427c32bce9b6d99db Mon Sep 17 00:00:00 2001 From: Akhil <30873558+saiakhil46@users.noreply.github.com> Date: Mon, 17 Oct 2022 16:44:45 +0530 Subject: [PATCH 032/616] Updated post-install script (#3564) --- .../tasks/knowledge_platform_tasks.yaml | 12 +++---- ansible/roles/post-install/tasks/main.yml | 6 ++-- .../roles/post-install/tasks/user_org.yaml | 32 +------------------ kubernetes/pipelines/post-install/Jenkinsfile | 2 +- 4 files changed, 11 insertions(+), 41 deletions(-) diff --git a/ansible/roles/post-install/tasks/knowledge_platform_tasks.yaml b/ansible/roles/post-install/tasks/knowledge_platform_tasks.yaml index f7a788d417..d88878755d 100644 --- a/ansible/roles/post-install/tasks/knowledge_platform_tasks.yaml +++ b/ansible/roles/post-install/tasks/knowledge_platform_tasks.yaml @@ -11,7 +11,7 @@ } } with_items: "{{ master_category }}" - delegate_to: "{{ learning_vm_ip }}" + delegate_to: "{{ learningservice_ip }}" - name: Create Object category uri: @@ -73,7 +73,7 @@ } } } - delegate_to: "{{ learning_vm_ip }}" + delegate_to: "{{ learningservice_ip }}" - name: Create framework category uri: @@ -89,7 +89,7 @@ } } with_items: "{{ framework_category }}" - delegate_to: "{{ learning_vm_ip }}" + delegate_to: "{{ learningservice_ip }}" - name: Create framework terms uri: @@ -105,7 +105,7 @@ } } with_items: "{{ framework_terms }}" - delegate_to: "{{ learning_vm_ip }}" + delegate_to: "{{ learningservice_ip }}" - name: Publish Framework uri: @@ -116,7 +116,7 @@ X-Channel-Id: "{{ sunbird_custodian_org_id }}" body: |- {} - delegate_to: "{{ learning_vm_ip }}" + delegate_to: "{{ learningservice_ip }}" - name: Assosiating framework with channel uri: @@ -153,7 +153,7 @@ "fields": ["name","identifier","code","description"] } } - delegate_to: "{{ learning_vm_ip }}" + delegate_to: "{{ learningservice_ip }}" register: created_framework - name: Printing the output debug: diff --git a/ansible/roles/post-install/tasks/main.yml b/ansible/roles/post-install/tasks/main.yml index 121e4d0653..84513e9d84 100644 --- a/ansible/roles/post-install/tasks/main.yml +++ b/ansible/roles/post-install/tasks/main.yml @@ -65,6 +65,6 @@ } } -# - import_tasks: knowledge_platform_tasks.yaml -# - import_tasks: user_org.yaml -# - import_tasks: forms.yaml +- import_tasks: knowledge_platform_tasks.yaml +- import_tasks: user_org.yaml +- import_tasks: forms.yaml diff --git a/ansible/roles/post-install/tasks/user_org.yaml b/ansible/roles/post-install/tasks/user_org.yaml index caee5ebaa7..e06f28ad2b 100644 --- a/ansible/roles/post-install/tasks/user_org.yaml +++ b/ansible/roles/post-install/tasks/user_org.yaml @@ -72,11 +72,6 @@ "operation":"add", "scope": [{ "organisationId": "{{ sunbird_custodian_org_id }}" }] }, - { - "role": "OFFICIAL_TEXTBOOK_BADGE_ISSUER", - "operation":"add", - "scope": [{ "organisationId": "{{ sunbird_custodian_org_id }}" }] - }, { "role": "PUBLIC", "operation":"add", @@ -87,11 +82,6 @@ "operation":"add", "scope": [{ "organisationId": "{{ sunbird_custodian_org_id }}" }] }, - { - "role": "TEACHER_BADGE_ISSUER", - "operation":"add", - "scope": [{ "organisationId": "{{ sunbird_custodian_org_id }}" }] - }, { "role": "CONTENT_CREATOR", "operation":"add", @@ -103,12 +93,7 @@ "scope": [{ "organisationId": "{{ sunbird_custodian_org_id }}" }] }, { - "role": "SYSTEM_ADMINISTRATION", - "operation":"add", - "scope": [{ "organisationId": "{{ sunbird_custodian_org_id }}" }] - }, - { - "role": "ANNOUNCEMENT_SENDER", + "role": "SYSTEM_ADMINISTRATION", "operation":"add", "scope": [{ "organisationId": "{{ sunbird_custodian_org_id }}" }] }, @@ -127,26 +112,11 @@ "operation":"add", "scope": [{ "organisationId": "{{ sunbird_custodian_org_id }}" }] }, - { - "role": "CONTENT_REVIEW", - "operation":"add", - "scope": [{ "organisationId": "{{ sunbird_custodian_org_id }}" }] - }, - { - "role": "CONTENT_CREATION", - "operation":"add", - "scope": [{ "organisationId": "{{ sunbird_custodian_org_id }}" }] - }, { "role": "MEMBERSHIP_MANAGEMENT", "operation":"add", "scope": [{ "organisationId": "{{ sunbird_custodian_org_id }}" }] }, - { - "role": "COURSE_CREATOR", - "operation":"add", - "scope": [{ "organisationId": "{{ sunbird_custodian_org_id }}" }] - }, { "role": "BOOK_CREATOR", "operation":"add", diff --git a/kubernetes/pipelines/post-install/Jenkinsfile b/kubernetes/pipelines/post-install/Jenkinsfile index deda3ff045..bab10fb0fa 100644 --- a/kubernetes/pipelines/post-install/Jenkinsfile +++ b/kubernetes/pipelines/post-install/Jenkinsfile @@ -24,7 +24,7 @@ node() { jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim().toLowerCase() currentWs = sh(returnStdout: true, script: 'pwd').trim() ansiblePlaybook = "$currentWs/ansible/post-install.yaml" - ansibleExtraArgs = "--vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag} -v" + ansibleExtraArgs = "--vault-password-file /var/lib/jenkins/secrets/vault-pass -v" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) From a1ab95bf3c2b49c74523a86e92c827ec8581ca87 Mon Sep 17 00:00:00 2001 From: VISHNUDAS-tunerlabse Date: Mon, 17 Oct 2022 19:24:22 +0530 Subject: [PATCH 033/616] certificates api added --- ansible/roles/kong-api/defaults/main.yml | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/ansible/roles/kong-api/defaults/main.yml b/ansible/roles/kong-api/defaults/main.yml index 907e250460..edd37b3470 100644 --- a/ansible/roles/kong-api/defaults/main.yml +++ b/ansible/roles/kong-api/defaults/main.yml @@ -9748,3 +9748,23 @@ kong_apis: - name: opa-checks config.required: true config.enabled: true +- name: getDetailsOfProjectsWithCertificate + uris: "{{ userProjects_service_prefix }}/mlprojects/v1/certificates" + upstream_url: "{{ ml_project_service_url }}/v1/userProjects/certificates" + strip_uri: true + plugins: + - name: jwt + - name: cors + - "{{ statsd_pulgin }}" + - name: acl + config.whitelist: + - projectAccess + - name: rate-limiting + config.policy: local + config.hour: "{{ medium_rate_limit_per_hour }}" + config.limit_by: credential + - name: request-size-limiting + config.allowed_payload_size: "{{ small_request_size_limit }}" + - name: opa-checks + config.required: false + config.enabled: false From e37ca7291abf51ec385d9c464a3852f32b5724f1 Mon Sep 17 00:00:00 2001 From: Jayaprakash8887 Date: Wed, 19 Oct 2022 14:07:45 +0530 Subject: [PATCH 034/616] Issue #KN-9 feat: Content Publish API refactor. --- .../stack-sunbird/templates/content-service_application.conf | 1 + 1 file changed, 1 insertion(+) diff --git a/ansible/roles/stack-sunbird/templates/content-service_application.conf b/ansible/roles/stack-sunbird/templates/content-service_application.conf index d33dbecf6f..b45d85dee1 100644 --- a/ansible/roles/stack-sunbird/templates/content-service_application.conf +++ b/ansible/roles/stack-sunbird/templates/content-service_application.conf @@ -493,6 +493,7 @@ kafka { urls : "{{ kafka_urls }}" topic.send.enable : true topics.instruction : "{{ env_name }}.learning.job.request" + publish.request.topic : "{{ env_name }}.publish.job.request" } # DIAL Link Config From cde575b2de6bfa1e30fdc04fa999cdeae37c4bbb Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Thu, 20 Oct 2022 16:49:51 +0530 Subject: [PATCH 035/616] Updated the ingestion specs --- ansible/roles/ml-analytics-service/defaults/main.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index 915992714e..abd8aae3b6 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -52,10 +52,10 @@ ml_analytics_api_access_token: "{{ml_api_access_token | default('ml_core_interna ml_analytics_druid_observation_status_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type": "azure","uris": ["azure://telemetry-data-store/observation/status/sl_observation_status.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"sl-observation-status","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"updatedAt","format":"auto"},"dimensionsSpec":{"dimensions":["status","entity_externalId","entity_id","entity_type","solution_id","solution_externalId","submission_id","entity_name","solution_name","role_title","school_name","school_code","school_externalId","state_name","state_code","state_externalId","district_name","district_code","district_externalId","block_name","block_code","block_externalId","cluster_name","cluster_code","cluster_externalId","completedDate","channel","parent_channel","program_id","program_externalId","program_name","app_name","user_id","private_program","solution_type","organisation_name","ecm_marked_na","board_name","updatedAt","organisation_id","user_type","observed_school_name","observed_school_id","observed_school_code","observed_state_name","observed_state_id","observed_state_code","observed_district_name","observed_district_id","observed_district_code","observed_block_name","observed_block_id","observed_block_code","observed_cluster_name","observed_cluster_id","observed_cluster_code"]},"metricsSpec":[]}}}' ml_analytics_druid_project_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type": "azure","uris": ["azure://telemetry-data-store/projects/sl_projects.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"sl-project","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"project_updated_date","format":"auto"},"dimensionsSpec":{"dimensions":[]},"metricsSpec":[]}}}' ml_analytics_azure_sas_token: "{{ sunbird_private_storage_account_key }}" -ml_analytics_druid_distinctCnt_obs_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/distinctCount/ml_observation_distinctCount_status.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-obs-status","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"solution_name"},{"type":"string","name":"solution_id"},{"type":"string","name":"status"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"parent_channel"},{"type":"string","name":"solution_type"},{"type":"string","name":"private_program"},{"type":"long","name":"unique_submissions"},{"type":"long","name":"unique_entities"},{"type":"long","name":"unique_users"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' -ml_analytics_druid_distinctCnt_obs_domain_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/distinctCount_domain/ml_observation_distinctCount_domain.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-obs-domain","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"solution_name"},{"type":"string","name":"solution_id"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"parent_channel"},{"type":"string","name":"solution_type"},{"type":"string","name":"private_program"},{"type":"string","name":"domain_name"},{"type":"string","name":"domain_externalId"},{"type":"string","name":"domain_level"},{"type":"long","name":"unique_submissions"},{"type":"long","name":"unique_entities"},{"type":"long","name":"unique_users"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' -ml_analytics_druid_distinctCnt_obs_domain_criteria_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/distinctCount_domain_criteria/ml_observation_distinctCount_domain_criteria.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-obs-domain-criteria","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"solution_name"},{"type":"string","name":"solution_id"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"parent_channel"},{"type":"string","name":"solution_type"},{"type":"string","name":"private_program"},{"type":"string","name":"domain_name"},{"type":"string","name":"domain_externalId"},{"type":"string","name":"domain_level"},{"type":"string","name":"criteria_name"},{"type":"string","name":"criteria_score"},{"type":"string","name":"criteria_id"},{"type":"long","name":"unique_submissions"},{"type":"long","name":"unique_entities"},{"type":"long","name":"unique_users"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' -ml_analytics_druid_distinctCnt_projects_status_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/projects/distinctCount/ml_projects_distinctCount.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-project-status","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"project_title"},{"type":"string","name":"solution_id"},{"type":"string","name":"status_of_project"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"private_program"},{"type":"string","name":"project_created_type"},{"type":"string","name":"parent_channel"},{"type":"long","name":"unique_projects"},{"type":"long","name":"unique_users"},{"type":"long","name":"no_of_imp_with_evidence"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' +ml_analytics_druid_distinctCnt_obs_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/distinctCount/ml_observation_distinctCount_status.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-obs-status","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"solution_name"},{"type":"string","name":"solution_id"},{"type":"string","name":"status"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"parent_channel"},{"type":"string","name":"solution_type"},{"type":"string","name":"private_program"},{"type":"long","name":"unique_submissions"},{"type":"long","name":"unique_entities"},{"type":"long","name":"unique_users"},{"type":"long","name":"unique_solution"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' +ml_analytics_druid_distinctCnt_obs_domain_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/distinctCount_domain/ml_observation_distinctCount_domain.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-obs-domain","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"solution_name"},{"type":"string","name":"solution_id"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"parent_channel"},{"type":"string","name":"solution_type"},{"type":"string","name":"private_program"},{"type":"string","name":"domain_name"},{"type":"string","name":"domain_externalId"},{"type":"string","name":"domain_level"},{"type":"long","name":"unique_submissions"},{"type":"long","name":"unique_entities"},{"type":"long","name":"unique_users"},{"type":"long","name":"unique_solution"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' +ml_analytics_druid_distinctCnt_obs_domain_criteria_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/distinctCount_domain_criteria/ml_observation_distinctCount_domain_criteria.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-obs-domain-criteria","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"solution_name"},{"type":"string","name":"solution_id"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"parent_channel"},{"type":"string","name":"solution_type"},{"type":"string","name":"private_program"},{"type":"string","name":"domain_name"},{"type":"string","name":"domain_externalId"},{"type":"string","name":"domain_level"},{"type":"string","name":"criteria_name"},{"type":"string","name":"criteria_score"},{"type":"string","name":"criteria_id"},{"type":"long","name":"unique_submissions"},{"type":"long","name":"unique_entities"},{"type":"long","name":"unique_users"},{"type":"long","name":"unique_solution"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' +ml_analytics_druid_distinctCnt_projects_status_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/projects/distinctCount/ml_projects_distinctCount.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-project-status","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"project_title"},{"type":"string","name":"solution_id"},{"type":"string","name":"status_of_project"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"private_program"},{"type":"string","name":"project_created_type"},{"type":"string","name":"parent_channel"},{"type":"long","name":"unique_projects"},{"type":"long","name":"unique_users"},{"type":"long","name":"unique_solution"},{"type":"long","name":"no_of_imp_with_evidence"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' ml_analytics_obs_distinctCnt_azure_blob_path: "observation/distinctCount/" ml_analytics_obs_distinctCnt_domain_azure_blob_path: "observation/distinctCount_domain/" ml_analytics_obs_distinctCnt_domain_criteria_azure_blob_path: "observation/distinctCount_domain_criteria/" From f2dacabae324e635d74e42ca451f9dcf080b8add Mon Sep 17 00:00:00 2001 From: VISHNUDAS-tunerlabse Date: Tue, 25 Oct 2022 11:57:37 +0530 Subject: [PATCH 036/616] project certificate schema jsons added --- .../registry/schemas/ProjectCertificate.json | 75 ++++++ .../final_project_credential_template.json | 28 +++ .../schema/final_project_sunbird_context.json | 91 +++++++ .../schema/final_project_v1_context.json | 237 ++++++++++++++++++ 4 files changed, 431 insertions(+) create mode 100644 kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json create mode 100644 utils/sunbird-RC/schema/final_project_credential_template.json create mode 100644 utils/sunbird-RC/schema/final_project_sunbird_context.json create mode 100644 utils/sunbird-RC/schema/final_project_v1_context.json diff --git a/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json b/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json new file mode 100644 index 0000000000..c8555839d8 --- /dev/null +++ b/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json @@ -0,0 +1,75 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema", + "type": "object", + "properties": { + "ProjectCertificate": { + "$ref": "#/definitions/ProjectCertificate" + } + }, + "required": [ + "ProjectCertificate" + ], + "title": "ProjectCertificate", + "definitions": { + "ProjectCertificate": { + "$id": "#/properties/ProjectCertificate", + "type": "object", + "title": "The ProjectCertificate Schema", + "required": [ + "recipient" + ], + "properties": { + "status": { + "type": "string", + "enum": ["ACTIVE", "REVOKED", "DELETED"] + }, + "recipient":{ + "$id": "#/properties/recipient", + "$ref": "Recipient.json#/definitions/Recipient" + }, + "templateUrl": { + "type": "string" + }, + "issuer":{ + "$id": "#/properties/issuer", + "$ref": "Issuer.json#/definitions/Issuer" + }, + "projectName":{ + "type": "string" + }, + "projectId":{ + "type": "string" + }, + "solutionId":{ + "type": "string" + }, + "solutionName":{ + "type": "string" + }, + "programId":{ + "type": "string" + }, + "programName":{ + "type": "string" + }, + "completedDate": { + "type": "string" + } + + } + } + }, + "_osConfig": { + "uniqueIndexFields": [ + ], + "ownershipAttributes": [], + "roles": [ + ], + "inviteRoles": [ + "anonymous" + ], + "systemFields": ["osCreatedAt", "osUpdatedAt", "osCreatedBy", "osUpdatedBy"], + "enableLogin": false, + "credentialTemplate": "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/schema/final_project_credential_template.json" + } +} \ No newline at end of file diff --git a/utils/sunbird-RC/schema/final_project_credential_template.json b/utils/sunbird-RC/schema/final_project_credential_template.json new file mode 100644 index 0000000000..2ff3508e79 --- /dev/null +++ b/utils/sunbird-RC/schema/final_project_credential_template.json @@ -0,0 +1,28 @@ +{ + "@context": [ + "https://www.w3.org/2018/credentials/v1", + "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/schema/final_project_v1_context.json", + "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/schema/final_project_sunbird_context.json" + ], + "type": [ + "VerifiableCredential" + ], + "id":"did:sunbird:{{osid}}", + "issuanceDate": "{{osCreatedAt}}", + "credentialSubject": { + "type":"{{certificateLabel}}", + "recipientName": "{{recipient.name}}", + "projectName": "{{projectName}}", + "projectId": "{{projectId}}", + "solutionId": "{{solutionId}}", + "solutionName": "{{solutionName}}" + }, + "issuer":{ + "id":"https://raw.githubusercontent.com/project-sunbird/sunbird-devops/release-4.8.0/kubernetes/helm_charts/sunbird-RC/registry/templates/READ.md#Issuer", + "type":[ + "Issuer" + ], + "name":"{{issuer.name}}", + "publicKey":["{{issuer.kid}}"] + } + } \ No newline at end of file diff --git a/utils/sunbird-RC/schema/final_project_sunbird_context.json b/utils/sunbird-RC/schema/final_project_sunbird_context.json new file mode 100644 index 0000000000..bbc31a4010 --- /dev/null +++ b/utils/sunbird-RC/schema/final_project_sunbird_context.json @@ -0,0 +1,91 @@ +{ + "@context": { + "@version": 1.1, + "@protected": true, + "ProjectCertificate": { + "@id": "https://raw.githubusercontent.com/project-sunbird/sunbird-devops/release-4.8.0/kubernetes/helm_charts/sunbird-RC/registry/templates/READ.md#ProjectCertificate", + "@context": { + "id": "@id", + "@version": 1.1, + "@protected": true, + "ProjectCertificate": "schema:Text" + } + }, + "projectName":{ + "@id":"https://raw.githubusercontent.com/project-sunbird/sunbird-devops/release-4.8.0/kubernetes/helm_charts/sunbird-RC/registry/templates/READ.md#projectName", + "@context": { + "name":"schema:Text" + } + }, + "projectId":{ + "@id":"https://raw.githubusercontent.com/project-sunbird/sunbird-devops/release-4.8.0/kubernetes/helm_charts/sunbird-RC/registry/templates/READ.md#projectId", + "@context": { + "name":"schema:Text" + } + }, + "solutionName":{ + "@id":"https://raw.githubusercontent.com/project-sunbird/sunbird-devops/release-4.8.0/kubernetes/helm_charts/sunbird-RC/registry/templates/READ.md#solutionName", + "@context": { + "name":"schema:Text" + } + }, + "solutionId":{ + "@id":"https://raw.githubusercontent.com/project-sunbird/sunbird-devops/release-4.8.0/kubernetes/helm_charts/sunbird-RC/registry/templates/READ.md#solutionId", + "@context": { + "name":"schema:Text" + } + }, + "recipientName":{ + "@id":"https://raw.githubusercontent.com/project-sunbird/sunbird-devops/release-4.8.0/kubernetes/helm_charts/sunbird-RC/registry/templates/READ.md#recipientName", + "@context": { + "name":"schema:Text" + } + }, + "name":{ + "@id":"https://raw.githubusercontent.com/project-sunbird/sunbird-devops/release-4.8.0/kubernetes/helm_charts/sunbird-RC/registry/templates/READ.md#name", + "@context": { + "name":"schema:Text" + } + }, + "publicKey":{ + "@id":"https://raw.githubusercontent.com/project-sunbird/sunbird-devops/release-4.8.0/kubernetes/helm_charts/sunbird-RC/registry/templates/READ.md#publicKey", + "@context": { + "name":"schema:Text" + } + }, + "url":{ + "@id":"https://raw.githubusercontent.com/project-sunbird/sunbird-devops/release-4.8.0/kubernetes/helm_charts/sunbird-RC/registry/templates/READ.md#url", + "@context": { + "name":"schema:Text" + } + }, + "designation":{ + "@id":"https://raw.githubusercontent.com/project-sunbird/sunbird-devops/release-4.8.0/kubernetes/helm_charts/sunbird-RC/registry/templates/READ.md#designation", + "@context": { + "name":"schema:Text" + } + }, + "image":{ + "@id":"https://raw.githubusercontent.com/project-sunbird/sunbird-devops/release-4.8.0/kubernetes/helm_charts/sunbird-RC/registry/templates/READ.md#image", + "@context": { + "name":"schema:Text" + } + }, + "identity":{ + "@id":"https://raw.githubusercontent.com/project-sunbird/sunbird-devops/release-4.8.0/kubernetes/helm_charts/sunbird-RC/registry/templates/READ.md#identity", + "@context": { + "name":"schema:Text" + } + }, + "signatory": { + "@id":"https://github.com/sunbird-specs/vc-specs#signatory", + "@container": "@list" + }, + "templateUrl": { + "@id": "https://raw.githubusercontent.com/project-sunbird/sunbird-devops/release-4.8.0/kubernetes/helm_charts/sunbird-RC/registry/templates/READ.md#templateUrl", + "@context": { + "name": "schema:Text" + } + } + } +} \ No newline at end of file diff --git a/utils/sunbird-RC/schema/final_project_v1_context.json b/utils/sunbird-RC/schema/final_project_v1_context.json new file mode 100644 index 0000000000..d028ec2a3f --- /dev/null +++ b/utils/sunbird-RC/schema/final_project_v1_context.json @@ -0,0 +1,237 @@ +{ + "@context": { + "@version": 1.1, + "@protected": true, + + "id": "@id", + "type": "@type", + + "VerifiableCredential": { + "@id": "https://www.w3.org/2018/credentials#VerifiableCredential", + "@context": { + "@version": 1.1, + "@protected": true, + + "id": "@id", + "type": "@type", + + "cred": "https://www.w3.org/2018/credentials#", + "sec": "https://w3id.org/security#", + "xsd": "http://www.w3.org/2001/XMLSchema#", + + "credentialSchema": { + "@id": "cred:credentialSchema", + "@type": "@id", + "@context": { + "@version": 1.1, + "@protected": true, + + "id": "@id", + "type": "@type", + + "cred": "https://www.w3.org/2018/credentials#", + + "JsonSchemaValidator2018": "cred:JsonSchemaValidator2018" + } + }, + "credentialStatus": {"@id": "cred:credentialStatus", "@type": "@id"}, + "credentialSubject": {"@id": "cred:credentialSubject", "@type": "@id"}, + "evidence": {"@id": "cred:evidence", "@type": "@id"}, + "expirationDate": {"@id": "cred:expirationDate", "@type": "xsd:dateTime"}, + "holder": {"@id": "cred:holder", "@type": "@id"}, + "issued": {"@id": "cred:issued", "@type": "xsd:dateTime"}, + "issuer": {"@id": "cred:issuer", "@type": "@id"}, + "issuanceDate": {"@id": "cred:issuanceDate", "@type": "xsd:dateTime"}, + "proof": {"@id": "sec:proof", "@type": "@id", "@container": "@graph"}, + "refreshService": { + "@id": "cred:refreshService", + "@type": "@id", + "@context": { + "@version": 1.1, + "@protected": true, + + "id": "@id", + "type": "@type", + + "cred": "https://www.w3.org/2018/credentials#", + + "ManualRefreshService2018": "cred:ManualRefreshService2018" + } + }, + "termsOfUse": {"@id": "cred:termsOfUse", "@type": "@id"}, + "validFrom": {"@id": "cred:validFrom", "@type": "xsd:dateTime"}, + "validUntil": {"@id": "cred:validUntil", "@type": "xsd:dateTime"} + } + }, + + "VerifiablePresentation": { + "@id": "https://www.w3.org/2018/credentials#VerifiablePresentation", + "@context": { + "@version": 1.1, + "@protected": true, + + "id": "@id", + "type": "@type", + + "cred": "https://www.w3.org/2018/credentials#", + "sec": "https://w3id.org/security#", + + "holder": {"@id": "cred:holder", "@type": "@id"}, + "proof": {"@id": "sec:proof", "@type": "@id", "@container": "@graph"}, + "verifiableCredential": {"@id": "cred:verifiableCredential", "@type": "@id", "@container": "@graph"} + } + }, + + "EcdsaSecp256k1Signature2019": { + "@id": "https://w3id.org/security#EcdsaSecp256k1Signature2019", + "@context": { + "@version": 1.1, + "@protected": true, + + "id": "@id", + "type": "@type", + + "sec": "https://w3id.org/security#", + "xsd": "http://www.w3.org/2001/XMLSchema#", + + "challenge": "sec:challenge", + "created": {"@id": "http://purl.org/dc/terms/created", "@type": "xsd:dateTime"}, + "domain": "sec:domain", + "expires": {"@id": "sec:expiration", "@type": "xsd:dateTime"}, + "jws": "sec:jws", + "nonce": "sec:nonce", + "proofPurpose": { + "@id": "sec:proofPurpose", + "@type": "@vocab", + "@context": { + "@version": 1.1, + "@protected": true, + + "id": "@id", + "type": "@type", + + "sec": "https://w3id.org/security#", + + "assertionMethod": {"@id": "sec:assertionMethod", "@type": "@id", "@container": "@set"}, + "authentication": {"@id": "sec:authenticationMethod", "@type": "@id", "@container": "@set"} + } + }, + "proofValue": "sec:proofValue", + "verificationMethod": {"@id": "sec:verificationMethod", "@type": "@id"} + } + }, + + "EcdsaSecp256r1Signature2019": { + "@id": "https://w3id.org/security#EcdsaSecp256r1Signature2019", + "@context": { + "@version": 1.1, + "@protected": true, + + "id": "@id", + "type": "@type", + + "sec": "https://w3id.org/security#", + "xsd": "http://www.w3.org/2001/XMLSchema#", + + "challenge": "sec:challenge", + "created": {"@id": "http://purl.org/dc/terms/created", "@type": "xsd:dateTime"}, + "domain": "sec:domain", + "expires": {"@id": "sec:expiration", "@type": "xsd:dateTime"}, + "jws": "sec:jws", + "nonce": "sec:nonce", + "proofPurpose": { + "@id": "sec:proofPurpose", + "@type": "@vocab", + "@context": { + "@version": 1.1, + "@protected": true, + + "id": "@id", + "type": "@type", + + "sec": "https://w3id.org/security#", + + "assertionMethod": {"@id": "sec:assertionMethod", "@type": "@id", "@container": "@set"}, + "authentication": {"@id": "sec:authenticationMethod", "@type": "@id", "@container": "@set"} + } + }, + "proofValue": "sec:proofValue", + "verificationMethod": {"@id": "sec:verificationMethod", "@type": "@id"} + } + }, + + "Ed25519Signature2018": { + "@id": "https://w3id.org/security#Ed25519Signature2018", + "@context": { + "@version": 1.1, + "@protected": true, + + "id": "@id", + "type": "@type", + + "sec": "https://w3id.org/security#", + "xsd": "http://www.w3.org/2001/XMLSchema#", + + "challenge": "sec:challenge", + "created": {"@id": "http://purl.org/dc/terms/created", "@type": "xsd:dateTime"}, + "domain": "sec:domain", + "expires": {"@id": "sec:expiration", "@type": "xsd:dateTime"}, + "jws": "sec:jws", + "nonce": "sec:nonce", + "proofPurpose": { + "@id": "sec:proofPurpose", + "@type": "@vocab", + "@context": { + "@version": 1.1, + "@protected": true, + + "id": "@id", + "type": "@type", + + "sec": "https://w3id.org/security#", + + "assertionMethod": {"@id": "sec:assertionMethod", "@type": "@id", "@container": "@set"}, + "authentication": {"@id": "sec:authenticationMethod", "@type": "@id", "@container": "@set"} + } + }, + "proofValue": "sec:proofValue", + "verificationMethod": {"@id": "sec:verificationMethod", "@type": "@id"} + } + }, + + "RsaSignature2018": { + "@id": "https://w3id.org/security#RsaSignature2018", + "@context": { + "@version": 1.1, + "@protected": true, + + "challenge": "sec:challenge", + "created": {"@id": "http://purl.org/dc/terms/created", "@type": "xsd:dateTime"}, + "domain": "sec:domain", + "expires": {"@id": "sec:expiration", "@type": "xsd:dateTime"}, + "jws": "sec:jws", + "nonce": "sec:nonce", + "proofPurpose": { + "@id": "sec:proofPurpose", + "@type": "@vocab", + "@context": { + "@version": 1.1, + "@protected": true, + + "id": "@id", + "type": "@type", + + "sec": "https://w3id.org/security#", + + "assertionMethod": {"@id": "sec:assertionMethod", "@type": "@id", "@container": "@set"}, + "authentication": {"@id": "sec:authenticationMethod", "@type": "@id", "@container": "@set"} + } + }, + "proofValue": "sec:proofValue", + "verificationMethod": {"@id": "sec:verificationMethod", "@type": "@id"} + } + }, + + "proof": {"@id": "https://w3id.org/security#proof", "@type": "@id", "@container": "@graph"} + } + } \ No newline at end of file From d32a99d8194a51e78f7a0c59a161cd52282528b9 Mon Sep 17 00:00:00 2001 From: VISHNUDAS-tunerlabse Date: Tue, 25 Oct 2022 17:29:21 +0530 Subject: [PATCH 037/616] certicificate schema filename changes and project certificate api added --- ansible/roles/kong-api/defaults/main.yml | 25 ++++++++++- .../registry/schemas/ProjectCertificate.json | 44 +++++++++---------- ....json => project_credential_template.json} | 4 +- ...text.json => project_sunbird_context.json} | 0 ...1_context.json => project_v1_context.json} | 0 5 files changed, 48 insertions(+), 25 deletions(-) rename utils/sunbird-RC/schema/{final_project_credential_template.json => project_credential_template.json} (82%) rename utils/sunbird-RC/schema/{final_project_sunbird_context.json => project_sunbird_context.json} (100%) rename utils/sunbird-RC/schema/{final_project_v1_context.json => project_v1_context.json} (100%) diff --git a/ansible/roles/kong-api/defaults/main.yml b/ansible/roles/kong-api/defaults/main.yml index edd37b3470..2673650119 100644 --- a/ansible/roles/kong-api/defaults/main.yml +++ b/ansible/roles/kong-api/defaults/main.yml @@ -9748,7 +9748,8 @@ kong_apis: - name: opa-checks config.required: true config.enabled: true -- name: getDetailsOfProjectsWithCertificate + +- name: projectCertificateList uris: "{{ userProjects_service_prefix }}/mlprojects/v1/certificates" upstream_url: "{{ ml_project_service_url }}/v1/userProjects/certificates" strip_uri: true @@ -9768,3 +9769,25 @@ kong_apis: - name: opa-checks config.required: false config.enabled: false +- name: createRCProjectCertificate + uris: "{{ registry_service_prefix }}/certificate/v1/create" + upstream_url: "{{ registry_service_url }}/api/v1/ProjectCertificate" + strip_uri: true + plugins: + - name: jwt + - name: cors + - "{{ statsd_pulgin }}" + - name: acl + config.whitelist: + - certificateCreate + - name: rate-limiting + config.policy: local + config.hour: "{{ medium_rate_limit_per_hour }}" + config.limit_by: credential + - name: request-size-limiting + config.allowed_payload_size: "{{ small_request_size_limit }}" + - name: request-transformer + config.remove.headers: Authorization + - name: opa-checks + config.required: true + config.enabled: true diff --git a/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json b/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json index c8555839d8..31257ac8b1 100644 --- a/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json +++ b/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json @@ -34,27 +34,27 @@ "$id": "#/properties/issuer", "$ref": "Issuer.json#/definitions/Issuer" }, - "projectName":{ - "type": "string" - }, - "projectId":{ - "type": "string" - }, - "solutionId":{ - "type": "string" - }, - "solutionName":{ - "type": "string" - }, - "programId":{ - "type": "string" - }, - "programName":{ - "type": "string" - }, - "completedDate": { - "type": "string" - } + "projectName":{ + "type": "string" + }, + "projectId":{ + "type": "string" + }, + "solutionId":{ + "type": "string" + }, + "solutionName":{ + "type": "string" + }, + "programId":{ + "type": "string" + }, + "programName":{ + "type": "string" + }, + "completedDate": { + "type": "string" + } } } @@ -70,6 +70,6 @@ ], "systemFields": ["osCreatedAt", "osUpdatedAt", "osCreatedBy", "osUpdatedBy"], "enableLogin": false, - "credentialTemplate": "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/schema/final_project_credential_template.json" + "credentialTemplate": "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/schema/project_credential_template.json" } } \ No newline at end of file diff --git a/utils/sunbird-RC/schema/final_project_credential_template.json b/utils/sunbird-RC/schema/project_credential_template.json similarity index 82% rename from utils/sunbird-RC/schema/final_project_credential_template.json rename to utils/sunbird-RC/schema/project_credential_template.json index 2ff3508e79..230fdccce9 100644 --- a/utils/sunbird-RC/schema/final_project_credential_template.json +++ b/utils/sunbird-RC/schema/project_credential_template.json @@ -1,8 +1,8 @@ { "@context": [ "https://www.w3.org/2018/credentials/v1", - "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/schema/final_project_v1_context.json", - "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/schema/final_project_sunbird_context.json" + "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/schema/project_v1_context.json", + "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/schema/project_sunbird_context.json" ], "type": [ "VerifiableCredential" diff --git a/utils/sunbird-RC/schema/final_project_sunbird_context.json b/utils/sunbird-RC/schema/project_sunbird_context.json similarity index 100% rename from utils/sunbird-RC/schema/final_project_sunbird_context.json rename to utils/sunbird-RC/schema/project_sunbird_context.json diff --git a/utils/sunbird-RC/schema/final_project_v1_context.json b/utils/sunbird-RC/schema/project_v1_context.json similarity index 100% rename from utils/sunbird-RC/schema/final_project_v1_context.json rename to utils/sunbird-RC/schema/project_v1_context.json From 179885117785d858a1071ca54718bf659ce1e3f1 Mon Sep 17 00:00:00 2001 From: VISHNUDAS-tunerlabse Date: Tue, 25 Oct 2022 17:39:48 +0530 Subject: [PATCH 038/616] format changes --- ansible/roles/kong-api/defaults/main.yml | 1 + .../registry/schemas/ProjectCertificate.json | 54 +++++++++---------- 2 files changed, 28 insertions(+), 27 deletions(-) diff --git a/ansible/roles/kong-api/defaults/main.yml b/ansible/roles/kong-api/defaults/main.yml index 2673650119..f9ae07e61c 100644 --- a/ansible/roles/kong-api/defaults/main.yml +++ b/ansible/roles/kong-api/defaults/main.yml @@ -9769,6 +9769,7 @@ kong_apis: - name: opa-checks config.required: false config.enabled: false + - name: createRCProjectCertificate uris: "{{ registry_service_prefix }}/certificate/v1/create" upstream_url: "{{ registry_service_url }}/api/v1/ProjectCertificate" diff --git a/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json b/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json index 31257ac8b1..98f3bb91e9 100644 --- a/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json +++ b/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json @@ -11,29 +11,29 @@ ], "title": "ProjectCertificate", "definitions": { - "ProjectCertificate": { - "$id": "#/properties/ProjectCertificate", - "type": "object", - "title": "The ProjectCertificate Schema", - "required": [ - "recipient" - ], - "properties": { - "status": { - "type": "string", - "enum": ["ACTIVE", "REVOKED", "DELETED"] - }, - "recipient":{ - "$id": "#/properties/recipient", - "$ref": "Recipient.json#/definitions/Recipient" - }, - "templateUrl": { - "type": "string" - }, - "issuer":{ - "$id": "#/properties/issuer", - "$ref": "Issuer.json#/definitions/Issuer" - }, + "ProjectCertificate": { + "$id": "#/properties/ProjectCertificate", + "type": "object", + "title": "The ProjectCertificate Schema", + "required": [ + "recipient" + ], + "properties": { + "status": { + "type": "string", + "enum": ["ACTIVE", "REVOKED", "DELETED"] + }, + "recipient":{ + "$id": "#/properties/recipient", + "$ref": "Recipient.json#/definitions/Recipient" + }, + "templateUrl": { + "type": "string" + }, + "issuer":{ + "$id": "#/properties/issuer", + "$ref": "Issuer.json#/definitions/Issuer" + }, "projectName":{ "type": "string" }, @@ -55,10 +55,10 @@ "completedDate": { "type": "string" } - - } - } - }, + + } + } + }, "_osConfig": { "uniqueIndexFields": [ ], From 2b7747c2e0636f53ff529cf97de0369c80d756f7 Mon Sep 17 00:00:00 2001 From: VISHNUDAS-tunerlabse Date: Fri, 28 Oct 2022 18:56:55 +0530 Subject: [PATCH 039/616] project certificate download new api added --- ansible/roles/kong-api/defaults/main.yml | 25 +++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/ansible/roles/kong-api/defaults/main.yml b/ansible/roles/kong-api/defaults/main.yml index f9ae07e61c..ff4323914e 100644 --- a/ansible/roles/kong-api/defaults/main.yml +++ b/ansible/roles/kong-api/defaults/main.yml @@ -9769,7 +9769,7 @@ kong_apis: - name: opa-checks config.required: false config.enabled: false - + - name: createRCProjectCertificate uris: "{{ registry_service_prefix }}/certificate/v1/create" upstream_url: "{{ registry_service_url }}/api/v1/ProjectCertificate" @@ -9792,3 +9792,26 @@ kong_apis: - name: opa-checks config.required: true config.enabled: true + +- name: getProjectRCCertificate + uris: "{{ registry_service_prefix }}/projetCertificate/v1/download" + upstream_url: "{{ registry_service_url }}/api/v1/ProjectCertificate" + strip_uri: true + plugins: + - name: jwt + - name: cors + - "{{ statsd_pulgin }}" + - name: acl + config.whitelist: + - anonymousCertificateAccess + - name: rate-limiting + config.policy: local + config.hour: "{{ medium_rate_limit_per_hour }}" + config.limit_by: credential + - name: request-size-limiting + config.allowed_payload_size: "{{ small_request_size_limit }}" + - name: request-transformer + config.remove.headers: Authorization + - name: opa-checks + config.required: true + config.enabled: true From 97c0afc24d3fc9810364545022352b5bcae8eb34 Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Sun, 30 Oct 2022 09:03:04 +0530 Subject: [PATCH 040/616] Add gcloud role and related tasks (#3566) --- ansible/roles/cassandra-backup/tasks/main.yml | 12 +++++ ansible/roles/gcloud-cli/tasks/main.yml | 19 +++++++ .../roles/gcp-cloud-storage/defaults/main.yml | 49 +++++++++++++++++++ .../gcp-cloud-storage/tasks/delete-batch.yml | 11 +++++ .../gcp-cloud-storage/tasks/download.yml | 11 +++++ .../gcp-cloud-storage/tasks/gcloud-auth.yml | 14 ++++++ .../gcp-cloud-storage/tasks/gcloud-revoke.yml | 8 +++ .../roles/gcp-cloud-storage/tasks/main.yml | 20 ++++++++ .../gcp-cloud-storage/tasks/upload-batch.yml | 11 +++++ .../roles/gcp-cloud-storage/tasks/upload.yml | 11 +++++ .../roles/postgresql-backup/defaults/main.yml | 2 +- .../roles/postgresql-backup/tasks/main.yml | 11 +++++ .../roles/postgresql-restore/tasks/main.yml | 20 ++++---- .../ansible/inventory/dev/Core/common.yml | 7 ++- .../ansible/inventory/dev/Core/secrets.yml | 3 +- 15 files changed, 197 insertions(+), 12 deletions(-) create mode 100644 ansible/roles/gcloud-cli/tasks/main.yml create mode 100644 ansible/roles/gcp-cloud-storage/defaults/main.yml create mode 100644 ansible/roles/gcp-cloud-storage/tasks/delete-batch.yml create mode 100644 ansible/roles/gcp-cloud-storage/tasks/download.yml create mode 100644 ansible/roles/gcp-cloud-storage/tasks/gcloud-auth.yml create mode 100644 ansible/roles/gcp-cloud-storage/tasks/gcloud-revoke.yml create mode 100644 ansible/roles/gcp-cloud-storage/tasks/main.yml create mode 100644 ansible/roles/gcp-cloud-storage/tasks/upload-batch.yml create mode 100644 ansible/roles/gcp-cloud-storage/tasks/upload.yml diff --git a/ansible/roles/cassandra-backup/tasks/main.yml b/ansible/roles/cassandra-backup/tasks/main.yml index bbc7246c48..fc662bcea5 100755 --- a/ansible/roles/cassandra-backup/tasks/main.yml +++ b/ansible/roles/cassandra-backup/tasks/main.yml @@ -44,6 +44,18 @@ storage_account_name: "{{ azure_management_storage_account_name }}" storage_account_sas_token: "{{ azure_management_storage_account_sas }}" when: cloud_service_provider == "azure" + +- name: upload file to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload-batch.yml + vars: + gcp_bucket_name: "{{ gcloud_management_bucket_name }}" + dest_folder_name: "{{ cassandra_backup_storage }}" + dest_folder_path: "" + local_file_or_folder_path: "/data/cassandra/backup/{{ cassandra_backup_folder_name }}" + when: cloud_service_provider == "gcloud" - name: clean up backup dir after upload file: path="{{ cassandra_backup_dir }}" state=absent + diff --git a/ansible/roles/gcloud-cli/tasks/main.yml b/ansible/roles/gcloud-cli/tasks/main.yml new file mode 100644 index 0000000000..4e39b7ceaf --- /dev/null +++ b/ansible/roles/gcloud-cli/tasks/main.yml @@ -0,0 +1,19 @@ +--- +- name: Add gcloud signing key + apt_key: + url: https://packages.cloud.google.com/apt/doc/apt-key.gpg + state: present + +- name: Add gcloud repository into sources list + apt_repository: + repo: "deb https://packages.cloud.google.com/apt cloud-sdk main" + state: present + +- name: Install google cloud cli with specific version and dependent packages + apt: + pkg: + - ca-certificates + - curl + - apt-transport-https + - gnupg + - google-cloud-cli=406.0.0-0 diff --git a/ansible/roles/gcp-cloud-storage/defaults/main.yml b/ansible/roles/gcp-cloud-storage/defaults/main.yml new file mode 100644 index 0000000000..086cf9c50d --- /dev/null +++ b/ansible/roles/gcp-cloud-storage/defaults/main.yml @@ -0,0 +1,49 @@ +# GCP bucket name +# Example - +# bucket_name: "sunbird-dev-public" +gcp_bucket_name: "" + +# The service account key file +# Example - +# gcp_storage_key_file: "/tmp/gcp.json" +gcp_storage_key_file: "" + +# Folder name in GCP bucket +# Example - +# dest_folder_name: "my-destination-folder" +dest_folder_name: "" + +# The delete pattern to delete files and folder +# Example - +# file_delete_pattern: "my-drectory/*" +# file_delete_pattern: "my-drectory/another-directory/*" +# file_delete_pattern: "*" +file_delete_pattern: "" + +# The path to local file which has to be uploaded to gcloud storage +# The local path to store the file after downloading from gcloud storage +# Example - +# local_file_or_folder_path: "/workspace/my-folder/myfile.json" +# local_file_or_folder_path: "/workspace/my-folder" +local_file_or_folder_path: "" + +# The name of the file in gcloud storage after uploading from local path +# The name of the file in gcloud storage that has to be downloaded +# Example - +# dest_file_name: "/myfile-blob.json" +dest_file_name: "" + + +# The folder path in gcloud storage to upload the files starting from the root of the bucket +# This path should start with / if we provide a value for this variable since we are going to append this path as below +# {{ bucket_name }}{{ dest_folder_name }} +# The above translates to "my-bucket/my-folder-path" +# Example - +# dest_folder_path: "/my-folder/json-files-folder" +# This variable can also be empty as shown below, which means we will upload directly at the root path of the bucket +dest_folder_path: "" + +# The local folder path which has to be uploaded to gcloud storage +# Example - +# local_source_folder: "/workspace/my-folder/json-files-folder" +local_source_folder: "" diff --git a/ansible/roles/gcp-cloud-storage/tasks/delete-batch.yml b/ansible/roles/gcp-cloud-storage/tasks/delete-batch.yml new file mode 100644 index 0000000000..ad0e4449d6 --- /dev/null +++ b/ansible/roles/gcp-cloud-storage/tasks/delete-batch.yml @@ -0,0 +1,11 @@ +--- +- name: Authenticate to gcloud + include_tasks: gcloud-auth.yml + +- name: Delete folder recursively in gcp storage + shell: gsutil rm -r "gs://{{ gcp_bucket_name }}/{{ file_delete_pattern }" + async: 3600 + poll: 10 + +- name: Revoke gcloud access + include_tasks: gcloud-revoke.yml diff --git a/ansible/roles/gcp-cloud-storage/tasks/download.yml b/ansible/roles/gcp-cloud-storage/tasks/download.yml new file mode 100644 index 0000000000..c8c6e956ad --- /dev/null +++ b/ansible/roles/gcp-cloud-storage/tasks/download.yml @@ -0,0 +1,11 @@ +--- +- name: Authenticate to gcloud + include_tasks: gcloud-auth.yml + +- name: Download from gcloud storage + shell: gsutil cp "gs://{{ gcp_bucket_name }}/{{ dest_folder_name }}/{{ dest_file_name }}" "{{ local_file_or_folder_path }}" + async: 3600 + poll: 10 + +- name: Revoke gcloud access + include_tasks: gcloud-revoke.yml \ No newline at end of file diff --git a/ansible/roles/gcp-cloud-storage/tasks/gcloud-auth.yml b/ansible/roles/gcp-cloud-storage/tasks/gcloud-auth.yml new file mode 100644 index 0000000000..a480bdc275 --- /dev/null +++ b/ansible/roles/gcp-cloud-storage/tasks/gcloud-auth.yml @@ -0,0 +1,14 @@ +--- +- name: create tmp gcp service key file + tempfile: + state: file + suffix: gcp + register: config_key + +- name: Copy service account key file + copy: + content: "{{ gcp_storage_key_file }}" + dest: "{{ config_key.path }}" + +- name: Configure gcloud service account + shell: gcloud auth activate-service-account "{{ gcp_storage_service_account_name }}" --key-file="{{ config_key.path }}" diff --git a/ansible/roles/gcp-cloud-storage/tasks/gcloud-revoke.yml b/ansible/roles/gcp-cloud-storage/tasks/gcloud-revoke.yml new file mode 100644 index 0000000000..8c26cd0ef0 --- /dev/null +++ b/ansible/roles/gcp-cloud-storage/tasks/gcloud-revoke.yml @@ -0,0 +1,8 @@ +- name: Revoke gcloud service account access + shell: gcloud auth revoke "{{ gcp_storage_service_account_name }}" + +- name: Remove key file + file: + path: "{{ config_key.path }}" + state: absent + when: config_key.path is defined diff --git a/ansible/roles/gcp-cloud-storage/tasks/main.yml b/ansible/roles/gcp-cloud-storage/tasks/main.yml new file mode 100644 index 0000000000..aa41c090ed --- /dev/null +++ b/ansible/roles/gcp-cloud-storage/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- name: upload file to gcloud storage + include: upload.yml + tags: + - file-upload + +- name: upload batch of files to gcloud storage + include: upload-batch.yml + tags: + - upload-batch + +- name: delete batch of files from gcloud storage + include: delete-batch.yml + tags: + - delete-batch + +- name: download a file from gcloud storage + include: download.yml + tags: + - file-download \ No newline at end of file diff --git a/ansible/roles/gcp-cloud-storage/tasks/upload-batch.yml b/ansible/roles/gcp-cloud-storage/tasks/upload-batch.yml new file mode 100644 index 0000000000..49abd5b822 --- /dev/null +++ b/ansible/roles/gcp-cloud-storage/tasks/upload-batch.yml @@ -0,0 +1,11 @@ +--- +- name: Authenticate to gcloud + include_tasks: gcloud-auth.yml + +- name: Upload files from a local directory gcp storage + shell: gsutil -m cp -r "{{ local_file_or_folder_path }}" "gs://{{ gcp_bucket_name }}/{{ dest_folder_name }}/{{ dest_folder_path }}" + async: 3600 + poll: 10 + +- name: Revoke gcloud access + include_tasks: gcloud-revoke.yml diff --git a/ansible/roles/gcp-cloud-storage/tasks/upload.yml b/ansible/roles/gcp-cloud-storage/tasks/upload.yml new file mode 100644 index 0000000000..2f88d9407f --- /dev/null +++ b/ansible/roles/gcp-cloud-storage/tasks/upload.yml @@ -0,0 +1,11 @@ +--- +- name: Authenticate to gcloud + include_tasks: gcloud-auth.yml + +- name: Upload to gcloud storage + shell: gsutil cp "{{ local_file_or_folder_path }}" "gs://{{ gcp_bucket_name }}/{{ dest_folder_name }}/{{ dest_file_name }}" + async: 3600 + poll: 10 + +- name: Revoke gcloud access + include_tasks: gcloud-revoke.yml diff --git a/ansible/roles/postgresql-backup/defaults/main.yml b/ansible/roles/postgresql-backup/defaults/main.yml index f358e4f4f3..0b6a9bca4a 100644 --- a/ansible/roles/postgresql-backup/defaults/main.yml +++ b/ansible/roles/postgresql-backup/defaults/main.yml @@ -7,4 +7,4 @@ postgresql_backup_azure_container_name: postgresql-backup # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos -postgresql_backup_storage: "{{ postgresql_backup_azure_container_name }}" \ No newline at end of file +postgresql_backup_storage: "{{ postgresql_backup_azure_container_name }}" diff --git a/ansible/roles/postgresql-backup/tasks/main.yml b/ansible/roles/postgresql-backup/tasks/main.yml index 81ce384afa..0704d4847f 100644 --- a/ansible/roles/postgresql-backup/tasks/main.yml +++ b/ansible/roles/postgresql-backup/tasks/main.yml @@ -26,5 +26,16 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: upload file to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload.yml + vars: + gcp_bucket_name: "{{ gcloud_management_bucket_name }}" + dest_folder_name: "{{ postgresql_backup_storage }}" + dest_file_name: "{{ postgresql_backup_gzip_file_name }}" + local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" + when: cloud_service_provider == "gcloud" + - name: clean up backup dir after upload file: path="{{ postgresql_backup_dir }}" state=absent \ No newline at end of file diff --git a/ansible/roles/postgresql-restore/tasks/main.yml b/ansible/roles/postgresql-restore/tasks/main.yml index b95eff5751..ec6a40494d 100644 --- a/ansible/roles/postgresql-restore/tasks/main.yml +++ b/ansible/roles/postgresql-restore/tasks/main.yml @@ -16,21 +16,23 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: download file from gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: download.yml + vars: + gcp_bucket_name: "{{ gcloud_management_bucket_name }}" + dest_folder_name: "{{ postgresql_restore_storage }}" + dest_file_name: "{{ postgresql_restore_gzip_file_name }}" + local_file_or_folder_path: "{{ postgresql_restore_gzip_file_path }}" + when: cloud_service_provider == "gcloud" + - name: ensure postgresql service is stopped service: name=postgresql state=stopped - name: wait for postgresql to be stopped wait_for: port={{ postgresql_port }} state=stopped -- name: drop cluster - command: pg_dropcluster {{ postgresql_cluster_version }} {{ postgresql_cluster_name }} - become_user: "{{ postgresql_user }}" - ignore_errors: true - -- name: create cluster - command: pg_createcluster {{ postgresql_cluster_version }} {{ postgresql_cluster_name }} - become_user: "{{ postgresql_user }}" - - name: ensure postgresql service is started service: name=postgresql state=started diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index bd034f9bb3..d314ecf925 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -37,7 +37,12 @@ azure_artifact_storage_account_name: "{{ sunbird_artifact_storage_account_name } aws_management_bucket_name: "" # Define the below if you are using Google Cloud -gcs_management_bucket_name: "" +gcloud_private_bucket_name: "" +gcloud_public_bucket_name: "" +gcloud_artifact_bucket_name: "" +gcloud_management_bucket_name: "" + +gcloud_private_bucket_projectId: "" # ------------------------------------------------------------------------------------------------------------ # # Cloud / Infra Specific values - Check these and update accordingly diff --git a/private_repo/ansible/inventory/dev/Core/secrets.yml b/private_repo/ansible/inventory/dev/Core/secrets.yml index cf76c3d66e..bbb1a526b1 100644 --- a/private_repo/ansible/inventory/dev/Core/secrets.yml +++ b/private_repo/ansible/inventory/dev/Core/secrets.yml @@ -34,7 +34,8 @@ aws_management_bucket_user_access_key: "" aws_management_bucket_user_secret_key: "" # Define the below if you are using Google Cloud -gcs_management_bucket_service_account: | +gcp_storage_service_account_name: "" +gcp_storage_key_file: "" # gcloud service account key - refer: https://cloud.google.com/iam/docs/creating-managing-service-account-keys # The proxy key and crt values should be padded to the right by a couple of spaces From 2bf8f187e388b5923f613898d5e28edb7218abd8 Mon Sep 17 00:00:00 2001 From: VISHNUDAS <95604247+VISHNUDAS-tunerlabs@users.noreply.github.com> Date: Mon, 31 Oct 2022 11:15:29 +0530 Subject: [PATCH 041/616] Update project_credential_template.json --- utils/sunbird-RC/schema/project_credential_template.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/sunbird-RC/schema/project_credential_template.json b/utils/sunbird-RC/schema/project_credential_template.json index 230fdccce9..4366d2f82f 100644 --- a/utils/sunbird-RC/schema/project_credential_template.json +++ b/utils/sunbird-RC/schema/project_credential_template.json @@ -10,7 +10,7 @@ "id":"did:sunbird:{{osid}}", "issuanceDate": "{{osCreatedAt}}", "credentialSubject": { - "type":"{{certificateLabel}}", + "type":"project", "recipientName": "{{recipient.name}}", "projectName": "{{projectName}}", "projectId": "{{projectId}}", @@ -25,4 +25,4 @@ "name":"{{issuer.name}}", "publicKey":["{{issuer.kid}}"] } - } \ No newline at end of file + } From bab91717d6ed441afc765707a46a9455a675559b Mon Sep 17 00:00:00 2001 From: VISHNUDAS-tunerlabse Date: Mon, 31 Oct 2022 13:58:47 +0530 Subject: [PATCH 042/616] issuenceDate change --- utils/sunbird-RC/schema/project_credential_template.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/sunbird-RC/schema/project_credential_template.json b/utils/sunbird-RC/schema/project_credential_template.json index 230fdccce9..3c7fdf3e31 100644 --- a/utils/sunbird-RC/schema/project_credential_template.json +++ b/utils/sunbird-RC/schema/project_credential_template.json @@ -8,9 +8,9 @@ "VerifiableCredential" ], "id":"did:sunbird:{{osid}}", - "issuanceDate": "{{osCreatedAt}}", + "issuanceDate": "{{completedDate}}", "credentialSubject": { - "type":"{{certificateLabel}}", + "type":"project", "recipientName": "{{recipient.name}}", "projectName": "{{projectName}}", "projectId": "{{projectId}}", From d8f60500923be72cf92acb322f95d6531422a3d8 Mon Sep 17 00:00:00 2001 From: VISHNUDAS-tunerlabse Date: Mon, 31 Oct 2022 15:12:42 +0530 Subject: [PATCH 043/616] project certificate flag env --- ansible/roles/stack-sunbird/templates/ml-core-service.env | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ansible/roles/stack-sunbird/templates/ml-core-service.env b/ansible/roles/stack-sunbird/templates/ml-core-service.env index 6bf2405d86..d092fbd49b 100755 --- a/ansible/roles/stack-sunbird/templates/ml-core-service.env +++ b/ansible/roles/stack-sunbird/templates/ml-core-service.env @@ -82,3 +82,6 @@ USER_SERVICE_URL={{ml_core_user_service_URL | default("http://learner-service:90 ## portal url of env APP_PORTAL_BASE_URL={{ proto }}://{{ domain_name }} + +# Project certificate enable or disable flag E.g. ON/OFF +PROJECT_CERTIFICATE_ON_OFF={{ml_core_project_certificate_on_off | default("ON")}} From 37b7fa647138ab2ed6d6909abf273f0ff665f64c Mon Sep 17 00:00:00 2001 From: VISHNUDAS-tunerlabse Date: Wed, 2 Nov 2022 09:05:22 +0530 Subject: [PATCH 044/616] kid env of certificate issuer added --- ansible/roles/stack-sunbird/templates/ml-core-service.env | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ansible/roles/stack-sunbird/templates/ml-core-service.env b/ansible/roles/stack-sunbird/templates/ml-core-service.env index d092fbd49b..6a813f8cb6 100755 --- a/ansible/roles/stack-sunbird/templates/ml-core-service.env +++ b/ansible/roles/stack-sunbird/templates/ml-core-service.env @@ -83,5 +83,8 @@ USER_SERVICE_URL={{ml_core_user_service_URL | default("http://learner-service:90 ## portal url of env APP_PORTAL_BASE_URL={{ proto }}://{{ domain_name }} -# Project certificate enable or disable flag E.g. ON/OFF +# Project certificate enable or disable flag E.g. ON/OFF PROJECT_CERTIFICATE_ON_OFF={{ml_core_project_certificate_on_off | default("ON")}} + +# certificate issuer KID value +CERTIFICATE_ISSUER_KID=d50937e1-9359-4451-a66a-ebee45d1d605 \ No newline at end of file From d78c40fbfcb1de9b87c4e6e37422ad0e6da5bac7 Mon Sep 17 00:00:00 2001 From: VISHNUDAS-tunerlabse Date: Wed, 2 Nov 2022 14:05:32 +0530 Subject: [PATCH 045/616] credential change --- utils/sunbird-RC/schema/project_credential_template.json | 2 ++ 1 file changed, 2 insertions(+) diff --git a/utils/sunbird-RC/schema/project_credential_template.json b/utils/sunbird-RC/schema/project_credential_template.json index 9f1272cfaf..3807365bb4 100644 --- a/utils/sunbird-RC/schema/project_credential_template.json +++ b/utils/sunbird-RC/schema/project_credential_template.json @@ -7,6 +7,7 @@ "type": [ "VerifiableCredential" ], +{% raw %} "id":"did:sunbird:{{osid}}", "issuanceDate": "{{completedDate}}", "credentialSubject": { @@ -26,3 +27,4 @@ "publicKey":["{{issuer.kid}}"] } } + {% endraw %} From 370735b55c3d1b03e1e0f7f12b53b9dde8202782 Mon Sep 17 00:00:00 2001 From: VISHNUDAS-tunerlabse Date: Wed, 2 Nov 2022 14:12:23 +0530 Subject: [PATCH 046/616] formating change on credential file --- .../schema/project_credential_template.json | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/utils/sunbird-RC/schema/project_credential_template.json b/utils/sunbird-RC/schema/project_credential_template.json index 3807365bb4..d9a520d5da 100644 --- a/utils/sunbird-RC/schema/project_credential_template.json +++ b/utils/sunbird-RC/schema/project_credential_template.json @@ -1,12 +1,12 @@ { - "@context": [ - "https://www.w3.org/2018/credentials/v1", - "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/schema/project_v1_context.json", - "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/schema/project_sunbird_context.json" + "@context": [ + "https://www.w3.org/2018/credentials/v1", + "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/schema/project_v1_context.json", + "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/schema/project_sunbird_context.json" ], - "type": [ - "VerifiableCredential" - ], + "type": [ + "VerifiableCredential" + ], {% raw %} "id":"did:sunbird:{{osid}}", "issuanceDate": "{{completedDate}}", @@ -27,4 +27,4 @@ "publicKey":["{{issuer.kid}}"] } } - {% endraw %} +{% endraw %} From ab685251e3fe0bcfa502ec8c312a6d8c0be8b85c Mon Sep 17 00:00:00 2001 From: VISHNUDAS-tunerlabse Date: Wed, 2 Nov 2022 14:41:23 +0530 Subject: [PATCH 047/616] kid env variable change --- ansible/roles/stack-sunbird/templates/ml-core-service.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/stack-sunbird/templates/ml-core-service.env b/ansible/roles/stack-sunbird/templates/ml-core-service.env index 6a813f8cb6..7b1da9c931 100755 --- a/ansible/roles/stack-sunbird/templates/ml-core-service.env +++ b/ansible/roles/stack-sunbird/templates/ml-core-service.env @@ -87,4 +87,4 @@ APP_PORTAL_BASE_URL={{ proto }}://{{ domain_name }} PROJECT_CERTIFICATE_ON_OFF={{ml_core_project_certificate_on_off | default("ON")}} # certificate issuer KID value -CERTIFICATE_ISSUER_KID=d50937e1-9359-4451-a66a-ebee45d1d605 \ No newline at end of file +CERTIFICATE_ISSUER_KID={{certificate_issuer_kid | default("")}} \ No newline at end of file From 8f5f0daa16a251e32f399a61fed8ec492c5fd6fd Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Fri, 4 Nov 2022 11:47:35 +0530 Subject: [PATCH 048/616] Release 5.1.0 - gcp related changes (#3578) --- .gitignore | 2 +- ansible/artifacts-download.yml | 13 ++++- ansible/artifacts-upload.yml | 13 ++++- ansible/assets-upload.yml | 32 +++++++++-- ansible/bootstrap.yml | 13 ++++- ansible/deploy-plugins.yml | 55 ++++++++++++++++++- ansible/postgres-managed-service-backup.yml | 4 +- ansible/postgresql-restore.yml | 2 +- .../roles/cassandra-restore/tasks/main.yml | 13 ++++- .../gcp-cloud-storage/tasks/delete-batch.yml | 2 +- ansible/roles/grafana-backup/tasks/main.yml | 11 ++++ .../jenkins-backup-upload/tasks/main.yml | 14 ++++- ansible/roles/mongodb-backup/tasks/main.yml | 11 ++++ .../defaults/main.yml | 0 .../tasks/main.yml | 11 ++++ .../defaults/main.yml | 0 .../tasks/main.yml | 11 ++++ .../roles/prometheus-backup-v2/tasks/main.yml | 11 ++++ .../roles/prometheus-backup/tasks/main.yml | 11 ++++ .../roles/prometheus-restore/tasks/main.yml | 11 ++++ ansible/roles/redis-backup/tasks/main.yml | 11 ++++ .../dev/jobs/Core/jobs/Bootstrap/config.xml | 1 + .../DataPipeline/jobs/Bootstrap/config.xml | 1 + .../jobs/Bootstrap/config.xml | 1 + .../managed-postgres-backup/Jenkinsfile | 2 +- 25 files changed, 239 insertions(+), 17 deletions(-) rename ansible/roles/{postgres-azure-managed-service-backup => postgres-managed-service-backup}/defaults/main.yml (100%) rename ansible/roles/{postgres-azure-managed-service-backup => postgres-managed-service-backup}/tasks/main.yml (82%) rename ansible/roles/{postgres-azure-managed-service-restore => postgres-managed-service-restore}/defaults/main.yml (100%) rename ansible/roles/{postgres-azure-managed-service-restore => postgres-managed-service-restore}/tasks/main.yml (83%) diff --git a/.gitignore b/.gitignore index 9cbd220cb1..97b376da44 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,4 @@ .DS_Store *.retry *.pyc -.idea \ No newline at end of file +.idea diff --git a/ansible/artifacts-download.yml b/ansible/artifacts-download.yml index 2872fa1013..cb8230d44b 100644 --- a/ansible/artifacts-download.yml +++ b/ansible/artifacts-download.yml @@ -14,4 +14,15 @@ local_file_or_folder_path: "{{ artifact_path }}" storage_account_name: "{{ azure_artifact_storage_account_name }}" storage_account_key: "{{ azure_artifact_storage_account_key }}" - when: cloud_service_provider == "azure" \ No newline at end of file + when: cloud_service_provider == "azure" + + - name: download artifact from gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: download.yml + vars: + gcp_bucket_name: "{{ gcloud_artifact_bucket_name }}" + dest_folder_name: "{{ artifacts_container }}" + dest_file_name: "{{ artifact }}" + local_file_or_folder_path: "{{ artifact_path }}" + when: cloud_service_provider == "gcloud" diff --git a/ansible/artifacts-upload.yml b/ansible/artifacts-upload.yml index 642a9aa111..52e67448c7 100644 --- a/ansible/artifacts-upload.yml +++ b/ansible/artifacts-upload.yml @@ -15,4 +15,15 @@ local_file_or_folder_path: "{{ artifact_path }}" storage_account_name: "{{ azure_artifact_storage_account_name }}" storage_account_key: "{{ azure_artifact_storage_account_key }}" - when: cloud_service_provider == "azure" \ No newline at end of file + when: cloud_service_provider == "azure" + + - name: upload artifact to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload.yml + vars: + gcp_bucket_name: "{{ gcloud_artifact_bucket_name }}" + dest_folder_name: "{{ artifacts_container }}" + dest_file_name: "{{ artifact }}" + local_file_or_folder_path: "{{ artifact_path }}" + when: cloud_service_provider == "gcloud" diff --git a/ansible/assets-upload.yml b/ansible/assets-upload.yml index db14234e4a..3809c63722 100644 --- a/ansible/assets-upload.yml +++ b/ansible/assets-upload.yml @@ -9,7 +9,10 @@ # 3. After few releases, we will remove the older variables and use only the new variables across the repos vars: player_cdn_storage: "{{ player_cdn_container }}" + # Azure tasks: + - name: this block consists of tasks related to azure storage + block: - name: set common azure variables set_fact: blob_container_name: "{{ player_cdn_storage }}" @@ -18,13 +21,11 @@ storage_account_name: "{{ azure_public_storage_account_name }}" storage_account_key: "{{ azure_public_storage_account_key }}" storage_account_sas_token: "{{ azure_public_storage_account_sas }}" - when: cloud_service_provider == "azure" - + - name: delete files and folders from azure storage using azcopy include_role: name: azure-cloud-storage tasks_from: delete-using-azcopy.yml - when: cloud_service_provider == "azure" - name: upload batch of files to azure storage include_role: @@ -32,4 +33,27 @@ tasks_from: blob-upload-batch.yml vars: local_file_or_folder_path: "{{ assets }}" - when: cloud_service_provider == "azure" + when: cloud_service_provider == "azure" + + #GCP + - name: this block consists of tasks related to azure storage + block: + - name: set common gcloud variables + set_fact: + dest_folder_name: "{{ player_cdn_storage }}" + dest_folder_path: "" + file_delete_pattern: "{{ player_cdn_storage }}/" + gcp_bucket_name: "{{ gcloud_public_bucket_name }}" + + - name: delete files and folders from gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: delete-batch.yml + + - name: upload batch of files to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload-batch.yml + vars: + local_file_or_folder_path: "{{ assets }}/*" + when: cloud_service_provider == "gcloud" diff --git a/ansible/bootstrap.yml b/ansible/bootstrap.yml index 30d57ca52f..36d9d7b0d0 100644 --- a/ansible/bootstrap.yml +++ b/ansible/bootstrap.yml @@ -30,6 +30,16 @@ tags: - azure_cli +- hosts: "{{ hosts }}" + become: yes + ignore_unreachable: yes + vars_files: + - "{{inventory_dir}}/secrets.yml" + roles: + - role: gcloud-cli + tags: + - gcloud_cli + - hosts: "{{ hosts| default('all') }}" become: yes gather_facts: no @@ -39,4 +49,5 @@ roles: - vm-agents-nodeexporter tags: - - node_exporter \ No newline at end of file + - node_exporter + diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index ef3ea0b44a..bf876b3f66 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -30,7 +30,7 @@ storage_account_sas_token: "{{ azure_public_storage_account_sas }}" tags: - always - + - block: - name: delete files and folders from azure storage using azcopy include_role: @@ -43,7 +43,7 @@ - collection-editor - generic-editor - preview - + - block: - name: upload batch of files to azure storage include_role: @@ -82,3 +82,54 @@ tags: - plugins when: cloud_service_provider == "azure" + + - name: this block consists of tasks related to gcloud storage + block: + - name: set common gcloud variables + set_fact: + dest_folder_name: "{{ plugin_storage }}" + gcp_bucket_name: "{{ gcloud_public_bucket_name }}" + tags: + - always + + - block: + - name: delete files and folders from gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: delete-batch.yml + vars: + file_delete_pattern: "{{ dest_folder_name }}/{{ folder_name }}" + tags: + - content-editor + - collection-editor + - generic-editor + - preview + + - block: + - name: upload batch of files to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload-batch.yml + vars: + dest_folder_path: "{{ folder_name }}" + local_file_or_folder_path: "{{ source_name }}" + tags: + - content-editor + - collection-editor + - generic-editor + - preview + - editor + - core-plugins + + - block: + - name: upload file to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload.yml + vars: + dest_file_name: "artefacts/content-player/content-player-{{ player_version_number }}.zip" + local_file_or_folder_path: "{{ source_file_name }}" + tags: + - preview + when: cloud_service_provider == "gcloud" + diff --git a/ansible/postgres-managed-service-backup.yml b/ansible/postgres-managed-service-backup.yml index 1a92efb09d..05abaf41c0 100644 --- a/ansible/postgres-managed-service-backup.yml +++ b/ansible/postgres-managed-service-backup.yml @@ -3,6 +3,6 @@ vars_files: - ['{{inventory_dir}}/secrets.yml'] roles: - - postgres-azure-managed-service-backup + - postgres-managed-service-backup tags: - - postgres-azure-managed-service + - postgres-managed-service diff --git a/ansible/postgresql-restore.yml b/ansible/postgresql-restore.yml index e2d80770d1..bcec7447d6 100644 --- a/ansible/postgresql-restore.yml +++ b/ansible/postgresql-restore.yml @@ -3,6 +3,6 @@ vars_files: - ['{{inventory_dir}}/secrets.yml'] roles: - - postgres-azure-managed-service-restore + - postgres-managed-service-restore tags: - postgresql-restore diff --git a/ansible/roles/cassandra-restore/tasks/main.yml b/ansible/roles/cassandra-restore/tasks/main.yml index 717e2fe113..3b2fc3ae9b 100755 --- a/ansible/roles/cassandra-restore/tasks/main.yml +++ b/ansible/roles/cassandra-restore/tasks/main.yml @@ -17,7 +17,18 @@ storage_account_name: "{{ azure_management_storage_account_name }}" storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" - + +- name: download file from gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: download.yml + vars: + gcp_bucket_name: "{{ gcloud_management_bucket_name }}" + dest_folder_name: "{{ cassandra_backup_storage }}" + dest_file_name: "{{ cassandra_restore_gzip_file_name }}" + local_file_or_folder_path: "{{ cassandra_restore_gzip_file_path }}" + when: cloud_service_provider == "gcloud" + - name: unarchieve restore artifact become: true unarchive: src={{user_home}}/{{ cassandra_restore_gzip_file_name }} dest={{user_home}}/ copy=no diff --git a/ansible/roles/gcp-cloud-storage/tasks/delete-batch.yml b/ansible/roles/gcp-cloud-storage/tasks/delete-batch.yml index ad0e4449d6..17fe952b16 100644 --- a/ansible/roles/gcp-cloud-storage/tasks/delete-batch.yml +++ b/ansible/roles/gcp-cloud-storage/tasks/delete-batch.yml @@ -3,7 +3,7 @@ include_tasks: gcloud-auth.yml - name: Delete folder recursively in gcp storage - shell: gsutil rm -r "gs://{{ gcp_bucket_name }}/{{ file_delete_pattern }" + shell: gsutil rm -r "gs://{{ gcp_bucket_name }}/{{ file_delete_pattern }}" async: 3600 poll: 10 diff --git a/ansible/roles/grafana-backup/tasks/main.yml b/ansible/roles/grafana-backup/tasks/main.yml index c898ada0d5..0f0a44a2b2 100644 --- a/ansible/roles/grafana-backup/tasks/main.yml +++ b/ansible/roles/grafana-backup/tasks/main.yml @@ -32,5 +32,16 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: upload file to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload.yml + vars: + gcp_bucket_name: "{{ gcloud_management_bucket_name }}" + dest_folder_name: "{{ grafana_backup_storage }}" + dest_file_name: "{{ grafana_backup_gzip_file_name }}" + local_file_or_folder_path: "{{ grafana_backup_gzip_file_path }}" + when: cloud_service_provider == "gcloud" + - name: clean up backup dir after upload file: path="{{ grafana_backup_dir }}" state=absent diff --git a/ansible/roles/jenkins-backup-upload/tasks/main.yml b/ansible/roles/jenkins-backup-upload/tasks/main.yml index d003bed89f..32be77b7a7 100644 --- a/ansible/roles/jenkins-backup-upload/tasks/main.yml +++ b/ansible/roles/jenkins-backup-upload/tasks/main.yml @@ -23,4 +23,16 @@ local_file_or_folder_path: "/tmp/{{ LATEST_BACKUP_DIR.stdout }}.zip" storage_account_name: "{{ azure_management_storage_account_name }}" storage_account_key: "{{ azure_management_storage_account_key }}" - when: cloud_service_provider == "azure" \ No newline at end of file + when: cloud_service_provider == "azure" + +- name: upload file to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload.yml + vars: + gcp_bucket_name: "{{ gcloud_management_bucket_name }}" + dest_folder_name: "{{ jenkins_backup_storage }}" + dest_file_name: "{{ LATEST_BACKUP_DIR.stdout }}.zip" + local_file_or_folder_path: "/tmp/{{ LATEST_BACKUP_DIR.stdout }}.zip" + when: cloud_service_provider == "gcloud" + diff --git a/ansible/roles/mongodb-backup/tasks/main.yml b/ansible/roles/mongodb-backup/tasks/main.yml index 1eefe6b077..4ae40ecd2b 100644 --- a/ansible/roles/mongodb-backup/tasks/main.yml +++ b/ansible/roles/mongodb-backup/tasks/main.yml @@ -26,6 +26,17 @@ storage_account_name: "{{ azure_management_storage_account_name }}" storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" + +- name: upload file to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload.yml + vars: + gcp_bucket_name: "{{ gcloud_management_bucket_name }}" + dest_folder_name: "{{ mongo_backup_storage }}" + dest_file_name: "{{ mongo_backup_file_name }}.tar.gz" + local_file_or_folder_path: "{{ mongo_backup_file_path }}.tar.gz" + when: cloud_service_provider == "gcloud" - name: clean up backup dir after upload file: path={{ mongo_backup_dir }} state=absent diff --git a/ansible/roles/postgres-azure-managed-service-backup/defaults/main.yml b/ansible/roles/postgres-managed-service-backup/defaults/main.yml similarity index 100% rename from ansible/roles/postgres-azure-managed-service-backup/defaults/main.yml rename to ansible/roles/postgres-managed-service-backup/defaults/main.yml diff --git a/ansible/roles/postgres-azure-managed-service-backup/tasks/main.yml b/ansible/roles/postgres-managed-service-backup/tasks/main.yml similarity index 82% rename from ansible/roles/postgres-azure-managed-service-backup/tasks/main.yml rename to ansible/roles/postgres-managed-service-backup/tasks/main.yml index a8261d91a3..686f4c42f6 100644 --- a/ansible/roles/postgres-azure-managed-service-backup/tasks/main.yml +++ b/ansible/roles/postgres-managed-service-backup/tasks/main.yml @@ -54,5 +54,16 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: upload file to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload.yml + vars: + gcp_bucket_name: "{{ gcloud_management_bucket_name }}" + dest_folder_name: "{{ postgresql_backup_storage }}" + dest_file_name: "{{ postgresql_backup_gzip_file_name }}.zip" + local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" + when: cloud_service_provider == "gcloud" + - name: clean up backup dir after upload file: path="{{ postgresql_backup_dir }}" state=absent diff --git a/ansible/roles/postgres-azure-managed-service-restore/defaults/main.yml b/ansible/roles/postgres-managed-service-restore/defaults/main.yml similarity index 100% rename from ansible/roles/postgres-azure-managed-service-restore/defaults/main.yml rename to ansible/roles/postgres-managed-service-restore/defaults/main.yml diff --git a/ansible/roles/postgres-azure-managed-service-restore/tasks/main.yml b/ansible/roles/postgres-managed-service-restore/tasks/main.yml similarity index 83% rename from ansible/roles/postgres-azure-managed-service-restore/tasks/main.yml rename to ansible/roles/postgres-managed-service-restore/tasks/main.yml index 61b1fe3eca..7df51e26b4 100644 --- a/ansible/roles/postgres-azure-managed-service-restore/tasks/main.yml +++ b/ansible/roles/postgres-managed-service-restore/tasks/main.yml @@ -21,6 +21,17 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: download file from gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: download.yml + vars: + gcp_bucket_name: "{{ gcloud_management_bucket_name }}" + dest_folder_name: "{{ postgres_backup_storage }}" + dest_file_name: "{{ postgres_backup_filename }}" + local_file_or_folder_path: "{{ postgres_restore_dir }}/{{ postgres_backup_filepath }}" + when: cloud_service_provider == "gcloud" + - name: unarchive artifact unarchive: src={{ postgresql_restore_dir }}/{{ postgres_backup_filename }} dest={{ postgresql_restore_dir }}/ copy=no diff --git a/ansible/roles/prometheus-backup-v2/tasks/main.yml b/ansible/roles/prometheus-backup-v2/tasks/main.yml index 071ed395e1..0cafacb627 100644 --- a/ansible/roles/prometheus-backup-v2/tasks/main.yml +++ b/ansible/roles/prometheus-backup-v2/tasks/main.yml @@ -29,6 +29,17 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: upload file to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload.yml + vars: + gcp_bucket_name: "{{ gcloud_management_bucket_name }}" + dest_folder_name: "{{ prometheus_backup_storage }}" + dest_file_name: "{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" + local_file_or_folder_path: "/tmp/{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" + when: cloud_service_provider == "gcloud" + - name: Deleting snapshot file: path: "{{ prometheus_data_dir }}/snapshots/{{ snapshot_name }}" diff --git a/ansible/roles/prometheus-backup/tasks/main.yml b/ansible/roles/prometheus-backup/tasks/main.yml index f9aaa54073..32cffa6e5c 100644 --- a/ansible/roles/prometheus-backup/tasks/main.yml +++ b/ansible/roles/prometheus-backup/tasks/main.yml @@ -38,5 +38,16 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: upload file to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload.yml + vars: + gcp_bucket_name: "{{ gcloud_management_bucket_name }}" + dest_folder_name: "{{ prometheus_backup_storage }}" + dest_file_name: "{{ prometheus_backup_gzip_file_name }}" + local_file_or_folder_path: "{{ prometheus_backup_gzip_file_path }}" + when: cloud_service_provider == "gcloud" + - name: clean up backup dir after upload file: path="{{ prometheus_backup_dir }}" state=absent diff --git a/ansible/roles/prometheus-restore/tasks/main.yml b/ansible/roles/prometheus-restore/tasks/main.yml index 40c9bd9225..843ebe4598 100644 --- a/ansible/roles/prometheus-restore/tasks/main.yml +++ b/ansible/roles/prometheus-restore/tasks/main.yml @@ -14,6 +14,17 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: download file from gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: download.yml + vars: + gcp_bucket_name: "{{ gcloud_management_bucket_name }}" + dest_folder_name: "{{ prometheus_backup_storage }}" + dest_file_name: "{{ prometheus_backup_filename }}" + local_file_or_folder_path: "{{ prometheus_backup_dir }}/{{ prometheus_backup_filepath }}" + when: cloud_service_provider == "gcloud" + - name: ensure prometheus is stopped shell: "docker service scale {{prometheus_service_name}}=0 && sleep 10" delegate_to: "{{manager_host}}" #variable is passed as extra vars from jenkins diff --git a/ansible/roles/redis-backup/tasks/main.yml b/ansible/roles/redis-backup/tasks/main.yml index be66ea5292..51f7ab63ff 100644 --- a/ansible/roles/redis-backup/tasks/main.yml +++ b/ansible/roles/redis-backup/tasks/main.yml @@ -26,6 +26,17 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: upload file to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload.yml + vars: + gcp_bucket_name: "{{ gcloud_management_bucket_name }}" + dest_folder_name: "{{ nodebb_redis_backup_storage }}" + dest_file_name: "{{ redis_backup_file_name }}" + local_file_or_folder_path: "{{ redis_backup_file_path }}" + when: cloud_service_provider == "gcloud" + - name: clean up backup dir after upload file: path={{ redis_backup_dir }} state=absent diff --git a/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/Bootstrap/config.xml b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/Bootstrap/config.xml index 7663c96890..20d7006b52 100644 --- a/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/Bootstrap/config.xml +++ b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/Bootstrap/config.xml @@ -94,6 +94,7 @@ return """<b>This parameter is not used</b>""" true diff --git a/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/DataPipeline/jobs/Bootstrap/config.xml b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/DataPipeline/jobs/Bootstrap/config.xml index 39884cf71a..8c82b404e9 100644 --- a/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/DataPipeline/jobs/Bootstrap/config.xml +++ b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/DataPipeline/jobs/Bootstrap/config.xml @@ -94,6 +94,7 @@ return """<b>This parameter is not used</b>""" true diff --git a/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/KnowledgePlatform/jobs/Bootstrap/config.xml b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/KnowledgePlatform/jobs/Bootstrap/config.xml index 9f26dbf9d9..b3132dfd16 100644 --- a/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/KnowledgePlatform/jobs/Bootstrap/config.xml +++ b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/KnowledgePlatform/jobs/Bootstrap/config.xml @@ -94,6 +94,7 @@ return """<b>This parameter is not used</b>""" true diff --git a/pipelines/backup/managed-postgres-backup/Jenkinsfile b/pipelines/backup/managed-postgres-backup/Jenkinsfile index 66acf4baee..f27e665bd5 100644 --- a/pipelines/backup/managed-postgres-backup/Jenkinsfile +++ b/pipelines/backup/managed-postgres-backup/Jenkinsfile @@ -25,7 +25,7 @@ node() { jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() currentWs = sh(returnStdout: true, script: 'pwd').trim() ansiblePlaybook = "${currentWs}/ansible/postgres-managed-service-backup.yml" - ansibleExtraArgs = "--tags postgres-azure-managed-service --extra-vars \"postgres_env=${params.postgres_env}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" + ansibleExtraArgs = "--tags postgres-managed-service --extra-vars \"postgres_env=${params.postgres_env}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) From 4174d6cf925e09a27683da0fe02e84003d14a2ce Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Sun, 6 Nov 2022 23:11:36 +0530 Subject: [PATCH 049/616] Release 5.1.0 gcp related changes (#3580) --- ansible/desktop-faq-upload.yml | 45 ++++++++++++++++++++- ansible/dial_upload-schema.yml | 14 ++++++- ansible/kp_upload-schema.yml | 13 +++++- ansible/plugins.yml | 21 ++++++++++ ansible/roles/cert-templates/tasks/main.yml | 13 +++++- ansible/roles/desktop-deploy/tasks/main.yml | 26 +++++++++++- ansible/uploadFAQs.yml | 13 ++++++ 7 files changed, 140 insertions(+), 5 deletions(-) diff --git a/ansible/desktop-faq-upload.yml b/ansible/desktop-faq-upload.yml index 0cdb89a07d..f20f0d7eeb 100644 --- a/ansible/desktop-faq-upload.yml +++ b/ansible/desktop-faq-upload.yml @@ -49,4 +49,47 @@ tags: - upload-chatbot-config - upload-batch - when: cloud_service_provider == "azure" \ No newline at end of file + when: cloud_service_provider == "azure" + + - name: this block consists of tasks related to gcloud storage + block: + - name: set common gcloud variables + set_fact: + dest_folder_name: "{{ upload_storage }}" + dest_file_name: "{{ destination_path }}" + dest_folder_path: "{{ destination_path }}" + local_file_or_folder_path: "{{ playbook_dir }}/../{{ src_file_path }}" + tags: + - always + + - block: + - name: upload file to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload.yml + vars: + gcp_bucket_name: "{{ gcloud_public_bucket_name }}" + tags: + - upload-desktop-faq + + - block: + - name: upload file to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload.yml + vars: + gcp_bucket_name: "{{ gcloud_private_bucket_name }}" + tags: + - upload-label + + - block: + - name: upload batch of files to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload-batch.yml + vars: + gcp_bucket_name: "{{ gcloud_public_bucket_name }}" + tags: + - upload-chatbot-config + - upload-batch + when: cloud_service_provider == "gcloud" diff --git a/ansible/dial_upload-schema.yml b/ansible/dial_upload-schema.yml index ba7abf627b..a93a900263 100644 --- a/ansible/dial_upload-schema.yml +++ b/ansible/dial_upload-schema.yml @@ -37,4 +37,16 @@ local_file_or_folder_path: "dial_schema_template_files" storage_account_name: "{{ azure_public_storage_account_name }}" storage_account_key: "{{ azure_public_storage_account_key }}" - when: cloud_service_provider == "azure" \ No newline at end of file + when: cloud_service_provider == "azure" + + - name: upload batch of files to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload-batch.yml + vars: + dest_folder_name: "{{ dial_plugin_storage }}" + dest_folder_path: "schemas/local" + local_file_or_folder_path: "dial_schema_template_files" + gcp_bucket_name: "{{ gcloud_public_bucket_name }}" + when: cloud_service_provider == "gcloud" + diff --git a/ansible/kp_upload-schema.yml b/ansible/kp_upload-schema.yml index 7d7163437b..a4f6bda83a 100644 --- a/ansible/kp_upload-schema.yml +++ b/ansible/kp_upload-schema.yml @@ -21,4 +21,15 @@ local_file_or_folder_path: "{{ source_name }}" storage_account_name: "{{ azure_public_storage_account_name }}" storage_account_key: "{{ azure_public_storage_account_key }}" - when: cloud_service_provider == "azure" \ No newline at end of file + when: cloud_service_provider == "azure" + + - name: upload batch of files to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload-batch.yml + vars: + dest_folder_name: "{{ plugin_storage }}" + dest_folder_path: "schemas/local" + local_file_or_folder_path: "{{ source_name }}" + gcp_bucket_name: "{{ gcloud_public_bucket_name }}" + when: cloud_service_provider == "gcloud" diff --git a/ansible/plugins.yml b/ansible/plugins.yml index 35e34578d0..ab32d9f756 100644 --- a/ansible/plugins.yml +++ b/ansible/plugins.yml @@ -33,3 +33,24 @@ name: azure-cloud-storage tasks_from: blob-upload-batch.yml when: cloud_service_provider == "azure" + + - name: this block consists of tasks related to gcloud storage + block: + - name: set common gcloud variables + set_fact: + dest_folder_name: "{{ plugin_storage }}" + gcp_bucket_name: "{{ gcloud_public_bucket_name }}" + file_delete_pattern: "content-plugins/{{ plugins_name }}" + dest_folder_path: "/content-plugins/{{ plugins_name }}" + local_file_or_folder_path: "{{ source_file }}" + + - name: delete files and folders from gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: delete-batch.yml + + - name: upload batch of files to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload-batch.yml + when: cloud_service_provider == "gcloud" diff --git a/ansible/roles/cert-templates/tasks/main.yml b/ansible/roles/cert-templates/tasks/main.yml index ee05f2adb3..acecc4d6f4 100644 --- a/ansible/roles/cert-templates/tasks/main.yml +++ b/ansible/roles/cert-templates/tasks/main.yml @@ -42,4 +42,15 @@ local_file_or_folder_path: "{{ cert_location }}/cert-templates/certUtilScripts/out" storage_account_name: "{{ azure_private_storage_account_name }}" storage_account_key: "{{ azure_private_storage_account_key }}" - when: cloud_service_provider == "azure" \ No newline at end of file + when: cloud_service_provider == "azure" + +- name: upload batch of files to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload-batch.yml + vars: + dest_folder_name: "{{ cert_service_storage }}" + dest_folder_path: "" + local_file_or_folder_path: "{{ cert_location }}/cert-templates/certUtilScripts/out" + gcp_bucket_name: "{{ gcloud_private_bucket_name }}" + when: cloud_service_provider == "gcloud" diff --git a/ansible/roles/desktop-deploy/tasks/main.yml b/ansible/roles/desktop-deploy/tasks/main.yml index e7763604c1..4ce4da3fb6 100644 --- a/ansible/roles/desktop-deploy/tasks/main.yml +++ b/ansible/roles/desktop-deploy/tasks/main.yml @@ -73,4 +73,28 @@ vars: blob_container_folder_path: "/latest" local_file_or_folder_path: "{{ offline_repo_location }}/desktop_uploader_assets/{{ time }}/" - when: cloud_service_provider == "azure" \ No newline at end of file + when: cloud_service_provider == "azure" + +- name: this block consists of tasks related to gcloud storage + block: + - name: set common gcloud variables + set_fact: + dest_folder_name: "{{ offline_installer_storage }}" + gcp_bucket_name: "{{ gcloud_public_bucket_name }}" + + - name: upload batch of files to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload-batch.yml + vars: + dest_folder_path: "" + local_file_or_folder_path: "{{ offline_repo_location }}/desktop_uploader_assets" + + - name: upload batch of files to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload-batch.yml + vars: + dest_folder_path: "latest" + local_file_or_folder_path: "{{ offline_repo_location }}/desktop_uploader_assets/{{ time }}/" + when: cloud_service_provider == "gcloud" diff --git a/ansible/uploadFAQs.yml b/ansible/uploadFAQs.yml index 8447fe4e47..52923e1bf4 100644 --- a/ansible/uploadFAQs.yml +++ b/ansible/uploadFAQs.yml @@ -29,6 +29,19 @@ with_items: - "{{ source_folder.split(',') }}" when: cloud_service_provider == "azure" + + - name: upload batch of files to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload-batch.yml + vars: + dest_folder_name: "{{ upload_storage }}" + dest_folder_path: "" + local_file_or_folder_path: "{{ playbook_dir }}/../utils/{{ item }}" + gcp_bucket_name: "{{ gcloud_public_bucket_name }}" + with_items: + - "{{ source_folder.split(',') }}" + when: cloud_service_provider == "gcloud" tags: - upload-faqs - upload-RC-schema From 41445d96423b1de1530a1c65bc0a33f83cae393f Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Mon, 14 Nov 2022 12:03:58 +0530 Subject: [PATCH 050/616] Add GCP related vars for KP and DP (#3586) --- .../ansible/inventory/dev/DataPipeline/common.yml | 9 +++++++++ .../ansible/inventory/dev/DataPipeline/secrets.yml | 4 ++++ .../ansible/inventory/dev/KnowledgePlatform/common.yml | 9 +++++++++ .../ansible/inventory/dev/KnowledgePlatform/secrets.yml | 4 ++++ 4 files changed, 26 insertions(+) diff --git a/private_repo/ansible/inventory/dev/DataPipeline/common.yml b/private_repo/ansible/inventory/dev/DataPipeline/common.yml index 348c74dc6b..ef8432539b 100644 --- a/private_repo/ansible/inventory/dev/DataPipeline/common.yml +++ b/private_repo/ansible/inventory/dev/DataPipeline/common.yml @@ -1,5 +1,6 @@ # ------------------------------------------------------------------------------------------------------------ # # Mandatorty variables - DO NOT LEAVE ANYTHING BLANK # +cloud_service_provider: "" # Your cloud service provider name. Supported values are aws, azure, gcloud domain_name: "" # your domain name like example.com # docker hub details dockerhub: "change.docker.url" # docker hub username or url incase of private registry @@ -168,3 +169,11 @@ processing_kafka_overriden_topics: - name: ml.observation.druid retention_time: 86400000 replication_factor: 1 + +# Define the below if you are using Google Cloud +gcloud_private_bucket_name: "" +gcloud_public_bucket_name: "" +gcloud_artifact_bucket_name: "" +gcloud_management_bucket_name: "" + +gcloud_private_bucket_projectId: "" diff --git a/private_repo/ansible/inventory/dev/DataPipeline/secrets.yml b/private_repo/ansible/inventory/dev/DataPipeline/secrets.yml index d18a6d1e0e..c37b74d8fe 100644 --- a/private_repo/ansible/inventory/dev/DataPipeline/secrets.yml +++ b/private_repo/ansible/inventory/dev/DataPipeline/secrets.yml @@ -26,3 +26,7 @@ dp_vault_data_exhaust_token: # slack api token # ------------------------------------------------------------------------------------------------------------ # # Sensible defaults which you need not change - But if you would like to change, you are free to do so dp_vault_artifacts_container: artifacts + +# Define the below if you are using Google Cloud +gcp_storage_service_account_name: "" +gcp_storage_key_file: "" diff --git a/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml b/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml index 24f0320615..7f21987f82 100644 --- a/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml +++ b/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml @@ -1,6 +1,7 @@ # ------------------------------------------------------------------------------------------------------------ # # Mandatorty variables - DO NOT LEAVE ANYTHING BLANK # # docker hub details +cloud_service_provider: "" # Your cloud service provider name. Supported values are aws, azure, gcloud dockerhub: "change.docker.url" # docker hub username or url incase of private registry private_ingressgateway_ip: "" # your private kubernetes load balancer ip domain_name: "" # your domain name like example.com @@ -32,3 +33,11 @@ plugin_container_name: "{{azure_public_container}}" kp_schema_base_path: "{{proto}}://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{plugin_container_name}}/schemas/local" imagepullsecrets: "{{env}}registrysecret" # kubernetes imagePullSecrets kubeconfig_path: /var/lib/jenkins/secrets/k8s.yaml # kubeconfig file path on jenkins + +# Define the below if you are using Google Cloud +gcloud_private_bucket_name: "" +gcloud_public_bucket_name: "" +gcloud_artifact_bucket_name: "" +gcloud_management_bucket_name: "" + +gcloud_private_bucket_projectId: "" diff --git a/private_repo/ansible/inventory/dev/KnowledgePlatform/secrets.yml b/private_repo/ansible/inventory/dev/KnowledgePlatform/secrets.yml index fb1af29c0a..ef5db134da 100644 --- a/private_repo/ansible/inventory/dev/KnowledgePlatform/secrets.yml +++ b/private_repo/ansible/inventory/dev/KnowledgePlatform/secrets.yml @@ -18,3 +18,7 @@ lp_vault_youtube_api_key: # youtube api token if you want # ------------------------------------------------------------------------------------------------------------ # # Sensible defaults which you need not change - But if you would like to change, you are free to do so lp_vault_graph_passport_key: "long-secret-to-calm-entropy-gods" + +# Define the below if you are using Google Cloud +gcp_storage_service_account_name: "" +gcp_storage_key_file: "" From 1fbc256e18e188bd4a14a943b9d05a461c4950a9 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 14:03:38 +0530 Subject: [PATCH 051/616] fix: SB-31155 updated references for upstream_url --- ansible/inventory/env/group_vars/all.yml | 3 ++- ansible/roles/stack-proxy/defaults/main.yml | 7 +++++-- ansible/roles/stack-sunbird/defaults/main.yml | 7 +++++-- .../ansible/roles/helm-daemonset/defaults/main.yml | 7 +++++-- .../ansible/roles/helm-deploy/defaults/main.yml | 7 +++++-- private_repo/ansible/inventory/dev/Core/common.yml | 13 ++++++++++++- 6 files changed, 34 insertions(+), 10 deletions(-) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index df20586566..d0dfe156b1 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -36,7 +36,8 @@ sunbird_keycloak_required_action_link_expiration_seconds: 2592000 sunbird_es_port: 9300 mail_server_port: 587 -upstream_url: "{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}" +# SB-31155 +#upstream_url: "{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}" # Learner sunbird_user_profile_field_default_visibility: private diff --git a/ansible/roles/stack-proxy/defaults/main.yml b/ansible/roles/stack-proxy/defaults/main.yml index 579709e412..dd4bda152d 100644 --- a/ansible/roles/stack-proxy/defaults/main.yml +++ b/ansible/roles/stack-proxy/defaults/main.yml @@ -39,6 +39,9 @@ ekstep_s3_env: "{{sunbird_env}}" registry_url: "{{proto}}://{{proxy_server_name}}/registry" kibana_service: "{{swarm_dashboard}}:5601" -upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" -plugin_upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" +# SB-31155 +#upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" + +# SB-31155 +#plugin_upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" sunbird_offline_azure_storage_account_url: "{{ sunbird_public_storage_account_name }}.blob.core.windows.net/{{ offline_installer_container_name }}" diff --git a/ansible/roles/stack-sunbird/defaults/main.yml b/ansible/roles/stack-sunbird/defaults/main.yml index 3b5946e333..8936a190ec 100644 --- a/ansible/roles/stack-sunbird/defaults/main.yml +++ b/ansible/roles/stack-sunbird/defaults/main.yml @@ -212,8 +212,11 @@ prometheus_alertmanager_route_prefix: alertmanager ekstep_s3_env: "{{sunbird_env}}" registry_url: "{{proto}}://{{proxy_server_name}}/registry" -upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" -plugin_upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" +# SB-31155 +#upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" + +# SB-31155 +#plugin_upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" sunbird_offline_azure_storage_account_url: "{{ sunbird_public_storage_account_name }}.blob.core.windows.net/{{ offline_installer_container_name }}" # Override this dictionary in your common.yaml proxy: diff --git a/kubernetes/ansible/roles/helm-daemonset/defaults/main.yml b/kubernetes/ansible/roles/helm-daemonset/defaults/main.yml index 59eb136773..9822dd5626 100644 --- a/kubernetes/ansible/roles/helm-daemonset/defaults/main.yml +++ b/kubernetes/ansible/roles/helm-daemonset/defaults/main.yml @@ -33,6 +33,9 @@ ekstep_s3_env: "{{sunbird_env}}" registry_url: "{{proto}}://{{proxy_server_name}}/registry" ep_es_host: -upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" -plugin_upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" +# SB-31155 +#upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" + +# SB-31155 +#plugin_upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" sunbird_offline_azure_storage_account_url: "{{ sunbird_public_storage_account_name }}.blob.core.windows.net/{{ offline_installer_container_name }}" diff --git a/kubernetes/ansible/roles/helm-deploy/defaults/main.yml b/kubernetes/ansible/roles/helm-deploy/defaults/main.yml index ed2c7f5aca..bf1fed2ff7 100644 --- a/kubernetes/ansible/roles/helm-deploy/defaults/main.yml +++ b/kubernetes/ansible/roles/helm-deploy/defaults/main.yml @@ -32,8 +32,11 @@ registry_url: "{{proto}}://{{proxy_server_name}}/registry" ep_es_host: proxy_custom_config: -upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" -plugin_upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" +# SB-31155 +#upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" + +# SB-31155 +#plugin_upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" discussion_upstream_url: "{{ sunbird_public_storage_account_name }}.blob.core.windows.net" sunbird_offline_azure_storage_account_url: "{{ sunbird_public_storage_account_name }}.blob.core.windows.net/{{ offline_installer_container_name }}" diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index d314ecf925..1b32d003e7 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -44,6 +44,14 @@ gcloud_management_bucket_name: "" gcloud_private_bucket_projectId: "" +# Uncomment the variable based on your cloud provider (as a default we have kept Azure variable uncommented) +# GCP +# upstream_url: https://storage.cloud.google.com/{{ gcloud_public_bucket_name }}/{{ content_storage }} +# AWS +# upstream_url: # Geetha to fill this url based on AWS role vars +# Azure +upstream_url: "{{sunbird_azure_public_storage_account_name}}.blob.core.windows.net/{{ content_storage }}" # Proxy url to get /assets/public + # ------------------------------------------------------------------------------------------------------------ # # Cloud / Infra Specific values - Check these and update accordingly @@ -70,6 +78,7 @@ sunbird_default_channel: sunbird # default sunbird channel name environment_id: "10000003" # A 8 digit number for example like 1000000, should be same as defined in KP common.yml sunbird_content_azure_storage_container: contents # Azure container name for storing public data (like contents), should be same as azure_public_container defined in KP common.yml + # This sms sender id should be verified by your provider. This is the sender id which will be used for `From Address`. For example, # # From: SBSMS @@ -115,9 +124,11 @@ postgres: db_admin_user: postgres db_admin_password: "{{core_vault_postgres_password}}" +# Generic variable for any cloud provider +content_storage: "{{ sunbird_content_azure_storage_container }}" + # Azure account related vars sunbird_azure_public_storage_account_name: "{{ sunbird_public_storage_account_name }}" -upstream_url: "{{sunbird_azure_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}" # Proxy url to get /assets/public plugin_upstream_url: "{{upstream_url}}" azure_plugin_storage_account_name: "{{sunbird_azure_public_storage_account_name}}" azure_plugin_storage_account_key: "{{sunbird_public_storage_account_key}}" From 32d2c59e0d8e7fbef7867238c72a0c384bdd7c27 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 14:09:03 +0530 Subject: [PATCH 052/616] fix: adding https in url Signed-off-by: Keshav Prasad --- private_repo/ansible/inventory/dev/Core/common.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index 1b32d003e7..650e04c0e4 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -50,7 +50,7 @@ gcloud_private_bucket_projectId: "" # AWS # upstream_url: # Geetha to fill this url based on AWS role vars # Azure -upstream_url: "{{sunbird_azure_public_storage_account_name}}.blob.core.windows.net/{{ content_storage }}" # Proxy url to get /assets/public +upstream_url: "https://{{sunbird_azure_public_storage_account_name}}.blob.core.windows.net/{{ content_storage }}" # Proxy url to get /assets/public # ------------------------------------------------------------------------------------------------------------ # # Cloud / Infra Specific values - Check these and update accordingly From 079c554336373f2d3d67dbc69f2f544fa9c5926c Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 14:10:10 +0530 Subject: [PATCH 053/616] fix: updated sunbird_cloud_storage_urls var to use upstream_url Signed-off-by: Keshav Prasad --- ansible/inventory/env/group_vars/all.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index d0dfe156b1..4b0975b4b2 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -364,7 +364,7 @@ content_service_blacklisted_resourcetype: '' content_service_whitelisted_resourcetype: '' content_service_whitelisted_mimetype: '' content_service_blacklisted_mimetype: '' -sunbird_cloud_storage_urls: 'https://s3.ap-south-1.amazonaws.com/ekstep-public-{{ekstep_s3_env}}/,https://ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com/,https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/' +sunbird_cloud_storage_urls: 'https://s3.ap-south-1.amazonaws.com/ekstep-public-{{ekstep_s3_env}}/,https://ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com/,{{ upstream_url }}/' sunbird_email_max_recipients_limit: 100 sunbird_cassandra_consistency_level: local_quorum sunbird_cassandra_replication_strategy: '{"class":"SimpleStrategy","replication_factor":"1"}' From 6664891531d8ecf999abed6da51ef8c547b68367 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 14:26:26 +0530 Subject: [PATCH 054/616] fix: use cloud_storage_url var as a base for upstream_url --- ansible/inventory/env/group_vars/all.yml | 4 +++- ansible/roles/stack-sunbird/defaults/main.yml | 3 ++- private_repo/ansible/inventory/dev/Core/common.yml | 7 ++++--- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index 4b0975b4b2..96337704c7 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -509,7 +509,9 @@ sunbird_portal_updateLoginTimeEnabled: false # Desktop app vars #sunbird_offline_azure_storage_account: "" #added this var for adopter usecase offline_installer_container_name: "{{env}}-offlineinstaller" -cloud_storage_url: "{{ sunbird_public_storage_account_name }}.blob.core.windows.net" + +# SB-31155 +#cloud_storage_url: "{{ sunbird_public_storage_account_name }}.blob.core.windows.net" # Search-service search_index_host: "{{ groups['composite-search-cluster']|join(':9200,')}}:9200" diff --git a/ansible/roles/stack-sunbird/defaults/main.yml b/ansible/roles/stack-sunbird/defaults/main.yml index 8936a190ec..6196962b3e 100644 --- a/ansible/roles/stack-sunbird/defaults/main.yml +++ b/ansible/roles/stack-sunbird/defaults/main.yml @@ -78,7 +78,8 @@ sunbird_portal_cdn_url: sunbird_dataservice_url: sunbird_background_actor_port: sunbird_app_url: -sunbird_image_storage_url: +# SB-31155 +#sunbird_image_storage_url: sunbird_telemetry_dispatchers: kafka content_service_whitelisted_channels: content_service_blacklisted_channels: diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index 650e04c0e4..811677d8c1 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -46,11 +46,11 @@ gcloud_private_bucket_projectId: "" # Uncomment the variable based on your cloud provider (as a default we have kept Azure variable uncommented) # GCP -# upstream_url: https://storage.cloud.google.com/{{ gcloud_public_bucket_name }}/{{ content_storage }} +# cloud_storage_url: https://storage.cloud.google.com/{{ gcloud_public_bucket_name }} # AWS -# upstream_url: # Geetha to fill this url based on AWS role vars +# cloud_storage_url: # Geetha to fill this url based on AWS role vars # Azure -upstream_url: "https://{{sunbird_azure_public_storage_account_name}}.blob.core.windows.net/{{ content_storage }}" # Proxy url to get /assets/public +cloud_storage_url: "https://{{ sunbird_public_storage_account_name }}.blob.core.windows.net" # ------------------------------------------------------------------------------------------------------------ # # Cloud / Infra Specific values - Check these and update accordingly @@ -125,6 +125,7 @@ postgres: db_admin_password: "{{core_vault_postgres_password}}" # Generic variable for any cloud provider +upstream_url: "{{ cloud_storage_url }}/{{ content_storage }}" # Proxy url to get /assets/public content_storage: "{{ sunbird_content_azure_storage_container }}" # Azure account related vars From edc608e04a049a30a5abfe10e0539f34aed0574e Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 14:33:37 +0530 Subject: [PATCH 055/616] fix: generalizing dial_service_schema_base_path var Signed-off-by: Keshav Prasad --- ansible/inventory/env/group_vars/all.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index 96337704c7..c27674492f 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -556,5 +556,9 @@ sunbird_trampoline_android_keycloak_client_id: trampoline-android sunbird_trampoline_desktop_keycloak_client_id: trampoline-desktop # DIAL-service schema +# SB-31155 - This should be deprecated in future in favour of dial_plugin_storage dial_plugin_container_name: "sunbird-dial-{{env}}" -dial_service_schema_base_path: "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{dial_plugin_container_name}}/jsonld-schema/local" + +# SB-31155 - Adding a generialzed variable which can be used for any CSP +dial_plugin_storage: "{{ dial_plugin_container_name }}" +dial_service_schema_base_path: "{{ cloud_storage_url }}/{{ dial_plugin_storage }}/jsonld-schema/local" From 2e9403e531e09cb76cc7190c3ca12a3b1fc1cc0c Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 14:37:20 +0530 Subject: [PATCH 056/616] fix: generalized ml_analytics_evidence_base_url var Signed-off-by: Keshav Prasad --- ansible/roles/ml-analytics-service/defaults/main.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index 5c5d87dace..b3ede22f5d 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -10,8 +10,14 @@ ml_analytics_survey_app_name: "{{ ml_survey_app_name | default('sunbirdsurvey') ml_analytics_integrated_app_name: "{{ ml_integrated_app_name | default('sunbird') }}" ml_analytics_integrated_portal: "{{ ml_integrated_portal | default('dev.sunbird.portal') }}" ml_analytics_survey_service: "http://{{private_ingressgateway_ip}}/ml-survey" + +# SB-31155 +# This should be deprecated in future in favour of ml_analytics_public_storage ml_analytics_public_container: "{{ ml_analytics_container | default('samiksha') }}" -ml_analytics_evidence_base_url: "https://{{ sunbird_public_storage_account_name }}.blob.core.windows.net/{{ ml_analytics_public_container }}/" + +# SB-31155 - Adding a generialzed variable which can be used for any CSP +ml_analytics_public_storage: "{{ ml_analytics_public_container }}" +ml_analytics_evidence_base_url: "{{ cloud_storage_url }}/{{ ml_analytics_public_storage }}/" ml_analytics_mongodb_url: "{{ml_mongodb_host | default(groups['mongo_master'][0]+':27017')}}" ml_analytics_mongo_db_name: "{{ml_mongodb | default('ml-survey')}}" ml_analytics_mongo_observation_submission_collection: "observationSubmissions" From 8072b863cfd82b4083c2724064b90d2d6ddcec45 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 14:51:46 +0530 Subject: [PATCH 057/616] fix: removed unnessary vars and redefine in all.yml Signed-off-by: Keshav Prasad --- ansible/inventory/env/group_vars/all.yml | 9 ++++++++- ansible/roles/stack-proxy/defaults/main.yml | 9 +-------- ansible/roles/stack-sunbird/defaults/main.yml | 8 -------- .../templates/sunbird_knowledge-mw-service.env | 1 - .../ansible/roles/helm-daemonset/defaults/main.yml | 7 ------- kubernetes/ansible/roles/helm-deploy/defaults/main.yml | 6 ------ 6 files changed, 9 insertions(+), 31 deletions(-) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index c27674492f..f73d598ce2 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -376,7 +376,6 @@ sunbird_otp_length: 6 sunbird_help_link_visibility: FALSE # not required -sunbird_image_storage_url: "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/dial/" vault_auth_key: "{{core_vault_auth_key}}" vault_sender: "{{core_vault_sender}}" vault_country: "{{core_vault_country}}" @@ -508,8 +507,16 @@ sunbird_portal_updateLoginTimeEnabled: false # Desktop app vars #sunbird_offline_azure_storage_account: "" #added this var for adopter usecase + +# SB-31155 - This should be deprecated in future in favour of offline_installer_storage offline_installer_container_name: "{{env}}-offlineinstaller" +# SB-31155 - Adding a generialzed variable which can be used for any CSP +offline_installer_storage: "{{ offline_installer_container_name }}" + +# SB-31155 - Removed multiple declarations and moved here +sunbird_offline_azure_storage_account_url: "{{ cloud_storage_url }}/{{ offline_installer_storage }}" + # SB-31155 #cloud_storage_url: "{{ sunbird_public_storage_account_name }}.blob.core.windows.net" diff --git a/ansible/roles/stack-proxy/defaults/main.yml b/ansible/roles/stack-proxy/defaults/main.yml index dd4bda152d..ab1524cf72 100644 --- a/ansible/roles/stack-proxy/defaults/main.yml +++ b/ansible/roles/stack-proxy/defaults/main.yml @@ -37,11 +37,4 @@ prometheus_route_prefix: prometheus prometheus_alertmanager_route_prefix: alertmanager ekstep_s3_env: "{{sunbird_env}}" registry_url: "{{proto}}://{{proxy_server_name}}/registry" -kibana_service: "{{swarm_dashboard}}:5601" - -# SB-31155 -#upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" - -# SB-31155 -#plugin_upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" -sunbird_offline_azure_storage_account_url: "{{ sunbird_public_storage_account_name }}.blob.core.windows.net/{{ offline_installer_container_name }}" +kibana_service: "{{swarm_dashboard}}:5601" \ No newline at end of file diff --git a/ansible/roles/stack-sunbird/defaults/main.yml b/ansible/roles/stack-sunbird/defaults/main.yml index 6196962b3e..97ffbc1584 100644 --- a/ansible/roles/stack-sunbird/defaults/main.yml +++ b/ansible/roles/stack-sunbird/defaults/main.yml @@ -78,8 +78,6 @@ sunbird_portal_cdn_url: sunbird_dataservice_url: sunbird_background_actor_port: sunbird_app_url: -# SB-31155 -#sunbird_image_storage_url: sunbird_telemetry_dispatchers: kafka content_service_whitelisted_channels: content_service_blacklisted_channels: @@ -213,12 +211,6 @@ prometheus_alertmanager_route_prefix: alertmanager ekstep_s3_env: "{{sunbird_env}}" registry_url: "{{proto}}://{{proxy_server_name}}/registry" -# SB-31155 -#upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" - -# SB-31155 -#plugin_upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" -sunbird_offline_azure_storage_account_url: "{{ sunbird_public_storage_account_name }}.blob.core.windows.net/{{ offline_installer_container_name }}" # Override this dictionary in your common.yaml proxy: # repository: 'proxy' diff --git a/ansible/roles/stack-sunbird/templates/sunbird_knowledge-mw-service.env b/ansible/roles/stack-sunbird/templates/sunbird_knowledge-mw-service.env index 6a13ddadfd..c7b0533c2a 100644 --- a/ansible/roles/stack-sunbird/templates/sunbird_knowledge-mw-service.env +++ b/ansible/roles/stack-sunbird/templates/sunbird_knowledge-mw-service.env @@ -7,7 +7,6 @@ sunbird_keycloak_client_id={{sunbird_keycloak_client_id}} sunbird_keycloak_public={{sunbird_keycloak_public}} sunbird_cache_store={{sunbird_cache_store}} sunbird_cache_ttl={{sunbird_cache_ttl}} -sunbird_image_storage_url={{sunbird_image_storage_url}} sunbird_azure_account_name={{sunbird_public_storage_account_name}} sunbird_azure_account_key={{sunbird_public_storage_account_key}} sunbird_dial_code_registry_url=https://{{proxy_server_name}}/dial/ diff --git a/kubernetes/ansible/roles/helm-daemonset/defaults/main.yml b/kubernetes/ansible/roles/helm-daemonset/defaults/main.yml index 9822dd5626..511f5c2acd 100644 --- a/kubernetes/ansible/roles/helm-daemonset/defaults/main.yml +++ b/kubernetes/ansible/roles/helm-daemonset/defaults/main.yml @@ -32,10 +32,3 @@ prometheus_alertmanager_route_prefix: alertmanager ekstep_s3_env: "{{sunbird_env}}" registry_url: "{{proto}}://{{proxy_server_name}}/registry" ep_es_host: - -# SB-31155 -#upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" - -# SB-31155 -#plugin_upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" -sunbird_offline_azure_storage_account_url: "{{ sunbird_public_storage_account_name }}.blob.core.windows.net/{{ offline_installer_container_name }}" diff --git a/kubernetes/ansible/roles/helm-deploy/defaults/main.yml b/kubernetes/ansible/roles/helm-deploy/defaults/main.yml index bf1fed2ff7..2e19f88194 100644 --- a/kubernetes/ansible/roles/helm-deploy/defaults/main.yml +++ b/kubernetes/ansible/roles/helm-deploy/defaults/main.yml @@ -32,13 +32,7 @@ registry_url: "{{proto}}://{{proxy_server_name}}/registry" ep_es_host: proxy_custom_config: -# SB-31155 -#upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" - -# SB-31155 -#plugin_upstream_url: "ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com" discussion_upstream_url: "{{ sunbird_public_storage_account_name }}.blob.core.windows.net" -sunbird_offline_azure_storage_account_url: "{{ sunbird_public_storage_account_name }}.blob.core.windows.net/{{ offline_installer_container_name }}" proxy_cache_path: large_cache: From 52539a42dc62fc8baf76f7d9054b5153402b4f60 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 14:57:28 +0530 Subject: [PATCH 058/616] fix: updated desktop_app_storage_url var reference Signed-off-by: Keshav Prasad --- ansible/roles/stack-sunbird/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/stack-sunbird/defaults/main.yml b/ansible/roles/stack-sunbird/defaults/main.yml index 97ffbc1584..8b600f04a1 100644 --- a/ansible/roles/stack-sunbird/defaults/main.yml +++ b/ansible/roles/stack-sunbird/defaults/main.yml @@ -82,7 +82,7 @@ sunbird_telemetry_dispatchers: kafka content_service_whitelisted_channels: content_service_blacklisted_channels: sunbird_env_logo_url: -desktop_app_storage_url: "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{offline_installer_container_name}}" +desktop_app_storage_url: "{{ sunbird_offline_azure_storage_account_url }}" telemetry_logstash_heap_size: 512m telemetry_logstash_replicas: 1 From c40dc026dbc5c4e7cbc89e141452cf7d53d6707a Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 15:03:13 +0530 Subject: [PATCH 059/616] fix: generalized h5p_library_path var Signed-off-by: Keshav Prasad --- ansible/inventory/env/group_vars/all.yml | 3 +++ .../stack-sunbird/templates/content-service_application.conf | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index f73d598ce2..f1ed8628f2 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -569,3 +569,6 @@ dial_plugin_container_name: "sunbird-dial-{{env}}" # SB-31155 - Adding a generialzed variable which can be used for any CSP dial_plugin_storage: "{{ dial_plugin_container_name }}" dial_service_schema_base_path: "{{ cloud_storage_url }}/{{ dial_plugin_storage }}/jsonld-schema/local" + +# SB-31155 - Moved to the installation public container for now (same place where keycloaka and java artifacts are stored) +h5p_library_path: "https://sunbirdpublic.blob.core.windows.net/installation/h5p-standalone-1.3.4.zip" \ No newline at end of file diff --git a/ansible/roles/stack-sunbird/templates/content-service_application.conf b/ansible/roles/stack-sunbird/templates/content-service_application.conf index d33dbecf6f..3fa9ba9a99 100644 --- a/ansible/roles/stack-sunbird/templates/content-service_application.conf +++ b/ansible/roles/stack-sunbird/templates/content-service_application.conf @@ -382,7 +382,7 @@ content { } h5p { library { - path: "{{ h5p_library_path | default('https://sunbirddev.blob.core.windows.net/sunbird-content-dev/h5p-standalone-1.3.4.zip') }}" + path: "{{ h5p_library_path }}" } } copy { From f6fa71b055c503c3b1a3792ce23d1727afa73790 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 15:14:37 +0530 Subject: [PATCH 060/616] fix: updated player.env to use generalized var Signed-off-by: Keshav Prasad --- ansible/roles/stack-sunbird/templates/sunbird_player.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/stack-sunbird/templates/sunbird_player.env b/ansible/roles/stack-sunbird/templates/sunbird_player.env index 0d1c34f45b..2afd3af54c 100644 --- a/ansible/roles/stack-sunbird/templates/sunbird_player.env +++ b/ansible/roles/stack-sunbird/templates/sunbird_player.env @@ -162,7 +162,7 @@ sunbird_base_proto={{sunbird_base_proto | default(proto)}} ml_survey_url={{ml_survey_url|default(proto+'://' +domain_name) }} #Release-4.1.0 -sunbird_azure_storage_account_name=https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/ +sunbird_azure_storage_account_name={{ cloud_storage_url }}/ #Release-4.1.0 sunbird_google_oauth_ios_clientId={{sunbird_google_oauth_ios_clientId | default("")}} From cdb0b41cac68f4a370017150ed8cbe8f4a479007 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 15:18:21 +0530 Subject: [PATCH 061/616] fix: generalized discussion_upstream_url Signed-off-by: Keshav Prasad --- kubernetes/ansible/roles/helm-deploy/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kubernetes/ansible/roles/helm-deploy/defaults/main.yml b/kubernetes/ansible/roles/helm-deploy/defaults/main.yml index 2e19f88194..6c5c925747 100644 --- a/kubernetes/ansible/roles/helm-deploy/defaults/main.yml +++ b/kubernetes/ansible/roles/helm-deploy/defaults/main.yml @@ -32,7 +32,7 @@ registry_url: "{{proto}}://{{proxy_server_name}}/registry" ep_es_host: proxy_custom_config: -discussion_upstream_url: "{{ sunbird_public_storage_account_name }}.blob.core.windows.net" +discussion_upstream_url: "{{ cloud_storage_url }}" proxy_cache_path: large_cache: From f409d6f1eba6f7ca1ef034749aa12c85220b34f7 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 15:23:53 +0530 Subject: [PATCH 062/616] fix: generalizing CACHE_CONTEXT_URLS Signed-off-by: Keshav Prasad --- kubernetes/helm_charts/sunbird-RC/certificatesign/values.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kubernetes/helm_charts/sunbird-RC/certificatesign/values.j2 b/kubernetes/helm_charts/sunbird-RC/certificatesign/values.j2 index 9895ebf05c..62c9114a76 100644 --- a/kubernetes/helm_charts/sunbird-RC/certificatesign/values.j2 +++ b/kubernetes/helm_charts/sunbird-RC/certificatesign/values.j2 @@ -35,4 +35,4 @@ certificatesignenv: CERTIFICATE_PRIVATE_KEY: |- {{ CERTIFICATE_PRIVATE_KEY | default("''") | indent(width=4) }} SIGNING_KEY_TYPE: "{{ SIGNING_KEY_TYPE|default('RSA')}}" - CACHE_CONTEXT_URLS: "{{ cache_context_urls | default('https://' + sunbird_public_storage_account_name + '.blob.core.windows.net/' + sunbird_content_azure_storage_container + '/schema/v1_context.json,https://' + sunbird_public_storage_account_name + '.blob.core.windows.net/' + sunbird_content_azure_storage_container + '/schema/sunbird_context.json,https://' + sunbird_public_storage_account_name + '.blob.core.windows.net/' + sunbird_content_azure_storage_container + '/schema/credential_template.json')}}" \ No newline at end of file + CACHE_CONTEXT_URLS: "{{ cache_context_urls | default(upstream_url + '/schema/v1_context.json,upstream_url + '/schema/sunbird_context.json,upstream_url + '/schema/credential_template.json')}}" \ No newline at end of file From ebaa1771e20bdcde6b879355c9d70a47a24ef198 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 15:25:28 +0530 Subject: [PATCH 063/616] fix: generalizing credentialTemplate param Signed-off-by: Keshav Prasad --- .../sunbird-RC/registry/schemas/ProjectCertificate.json | 2 +- .../sunbird-RC/registry/schemas/TrainingCertificate.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json b/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json index 98f3bb91e9..0035464abb 100644 --- a/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json +++ b/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json @@ -70,6 +70,6 @@ ], "systemFields": ["osCreatedAt", "osUpdatedAt", "osCreatedBy", "osUpdatedBy"], "enableLogin": false, - "credentialTemplate": "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/schema/project_credential_template.json" + "credentialTemplate": "{{ upstream_url }}/schema/project_credential_template.json" } } \ No newline at end of file diff --git a/kubernetes/helm_charts/sunbird-RC/registry/schemas/TrainingCertificate.json b/kubernetes/helm_charts/sunbird-RC/registry/schemas/TrainingCertificate.json index 2906929f2d..5187b08e81 100644 --- a/kubernetes/helm_charts/sunbird-RC/registry/schemas/TrainingCertificate.json +++ b/kubernetes/helm_charts/sunbird-RC/registry/schemas/TrainingCertificate.json @@ -69,6 +69,6 @@ ], "systemFields": ["osCreatedAt", "osUpdatedAt", "osCreatedBy", "osUpdatedBy"], "enableLogin": false, - "credentialTemplate": "https://{{upstream_url}}/schema/credential_template.json" + "credentialTemplate": "https://{{ upstream_url }}/schema/credential_template.json" } } From 68111485f45ae603223a68f2c07f655a97dcdf1a Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 15:34:32 +0530 Subject: [PATCH 064/616] fix: generalizing kp_schema_base_path and updating reference to upstream_url Signed-off-by: Keshav Prasad --- ansible/inventory/env/group_vars/all.yml | 2 -- .../ansible/inventory/dev/Core/common.yml | 4 ++-- .../inventory/dev/KnowledgePlatform/common.yml | 17 +++++++++++++++-- .../sunbird-RC/schema/credential_template.json | 4 ++-- 4 files changed, 19 insertions(+), 8 deletions(-) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index f1ed8628f2..238ab4dd65 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -36,8 +36,6 @@ sunbird_keycloak_required_action_link_expiration_seconds: 2592000 sunbird_es_port: 9300 mail_server_port: 587 -# SB-31155 -#upstream_url: "{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}" # Learner sunbird_user_profile_field_default_visibility: private diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index 811677d8c1..594a415e25 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -130,11 +130,11 @@ content_storage: "{{ sunbird_content_azure_storage_container }}" # Azure account related vars sunbird_azure_public_storage_account_name: "{{ sunbird_public_storage_account_name }}" -plugin_upstream_url: "{{upstream_url}}" +plugin_upstream_url: "{{ upstream_url }}" azure_plugin_storage_account_name: "{{sunbird_azure_public_storage_account_name}}" azure_plugin_storage_account_key: "{{sunbird_public_storage_account_key}}" plugin_container_name: "{{sunbird_content_azure_storage_container}}" -kp_schema_base_path: "{{proto}}://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{plugin_container_name}}/schemas/local" +kp_schema_base_path: "{{ upstream_url }}/schemas/local" keycloak_api_management_user_email: "admin@sunbird.org" sunbird_installation_email: "admin@sunbird.org" diff --git a/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml b/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml index 7f21987f82..94df144c58 100644 --- a/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml +++ b/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml @@ -29,8 +29,21 @@ neo4j_enterprise: false # Set this to true if you use # Sensible defaults which you need not change - But if you would like to change, you are free to do so ekstep_domain_name: "{{ proto }}://{{ domain_name }}" artifacts_container: artifacts -plugin_container_name: "{{azure_public_container}}" -kp_schema_base_path: "{{proto}}://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{plugin_container_name}}/schemas/local" + +# Uncomment the variable based on your cloud provider (as a default we have kept Azure variable uncommented) +# GCP +# cloud_storage_url: https://storage.cloud.google.com/{{ gcloud_public_bucket_name }} +# AWS +# cloud_storage_url: # Geetha to fill this url based on AWS role vars +# Azure +cloud_storage_url: "https://{{ sunbird_public_storage_account_name }}.blob.core.windows.net" + +plugin_container_name: "{{ azure_public_container }}" + +# Generic variable for any cloud provider +plugin_storage: "{{ plugin_container_name }}" + +kp_schema_base_path: "{{ cloud_storage_url }}/{{ plugin_storage }}/schemas/local" imagepullsecrets: "{{env}}registrysecret" # kubernetes imagePullSecrets kubeconfig_path: /var/lib/jenkins/secrets/k8s.yaml # kubeconfig file path on jenkins diff --git a/utils/sunbird-RC/schema/credential_template.json b/utils/sunbird-RC/schema/credential_template.json index 8f467986cc..f96a3c0528 100644 --- a/utils/sunbird-RC/schema/credential_template.json +++ b/utils/sunbird-RC/schema/credential_template.json @@ -1,7 +1,7 @@ { "@context": [ - "https://{{upstream_url}}/schema/v1_context.json", - "https://{{upstream_url}}/schema/sunbird_context.json" + "https://{{ upstream_url }}/schema/v1_context.json", + "https://{{ upstream_url }}/schema/sunbird_context.json" ], "type": [ "VerifiableCredential" From 511382e55312e0cb7c87977f81873524c2d3e87e Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 15:36:45 +0530 Subject: [PATCH 065/616] fix: updated references of templates to upstream_url Signed-off-by: Keshav Prasad --- utils/sunbird-RC/schema/project_credential_template.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/sunbird-RC/schema/project_credential_template.json b/utils/sunbird-RC/schema/project_credential_template.json index d9a520d5da..6041f69d41 100644 --- a/utils/sunbird-RC/schema/project_credential_template.json +++ b/utils/sunbird-RC/schema/project_credential_template.json @@ -1,8 +1,8 @@ { "@context": [ "https://www.w3.org/2018/credentials/v1", - "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/schema/project_v1_context.json", - "https://{{sunbird_public_storage_account_name}}.blob.core.windows.net/{{sunbird_content_azure_storage_container}}/schema/project_sunbird_context.json" + "{{ upstream_url }}/schema/project_v1_context.json", + "{{ upstream_url }}/schema/project_sunbird_context.json" ], "type": [ "VerifiableCredential" From dec5289aed91445edddde3a235b29b2e8d0a9410 Mon Sep 17 00:00:00 2001 From: Surabhi Date: Mon, 14 Nov 2022 16:04:05 +0530 Subject: [PATCH 066/616] sunbird cdn configuration --- ansible/roles/stack-sunbird/templates/inbound.env | 13 ++++++++++--- .../roles/stack-sunbird/templates/transformer.env | 14 +++++++++++--- 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/ansible/roles/stack-sunbird/templates/inbound.env b/ansible/roles/stack-sunbird/templates/inbound.env index c8ed1a5157..c9bc2033a8 100644 --- a/ansible/roles/stack-sunbird/templates/inbound.env +++ b/ansible/roles/stack-sunbird/templates/inbound.env @@ -55,12 +55,19 @@ REDIS_DB_INDEX={{redis_db_index_uci | default('7')}} #Azure Config AZURE_BLOB_STORE_CONTAINER={{sunbird_azure_uci_container_name | default('uci-' + env )}} -AZURE_BLOB_STORE_ACCOUNT_KEY={{sunbird_private_storage_account_name}} -AZURE_BLOB_STORE_ACCOUNT_NAME={{sunbird_private_storage_account_key}} -SELECTED_FILE_CDN=azure +AZURE_BLOB_STORE_ACCOUNT_NAME={{sunbird_private_storage_account_name}} +AZURE_BLOB_STORE_ACCOUNT_KEY={{sunbird_private_storage_account_key}} #Netcore NETCORE_WHATSAPP_AUTH_TOKEN={{uci_netcore_whatsapp_token}} NETCORE_WHATSAPP_SOURCE={{uci_netcore_whatsapp_source}} NETCORE_WHATSAPP_URI={{uci_netcore_whatsapp_uri | default('https://waapi.pepipost.com/api/v2/')}} +#Sunbird CDN Configuration +SUNBIRD_CLOUD_MEDIA_STORAGE_TYPE=azure +SUNBIRD_CLOUD_MEDIA_STORAGE_KEY={{sunbird_private_storage_account_name}} +SUNBIRD_CLOUD_MEDIA_STORAGE_SECRET={{sunbird_private_storage_account_key}} +SUNBIRD_CLOUD_MEDIA_STORAGE_CONTAINER={{sunbird_azure_uci_container_name | default('uci-' + env )}} + +#Selected CDN Configuration +SELECTED_FILE_CDN=sunbird diff --git a/ansible/roles/stack-sunbird/templates/transformer.env b/ansible/roles/stack-sunbird/templates/transformer.env index fd2e6d00aa..f5c2f7b3a5 100644 --- a/ansible/roles/stack-sunbird/templates/transformer.env +++ b/ansible/roles/stack-sunbird/templates/transformer.env @@ -72,6 +72,14 @@ POSTHOG_EVENT_ENABLED=FALSE #Azure Config AZURE_BLOB_STORE_CONTAINER={{sunbird_azure_uci_container_name | default('uci-' + env )}} -AZURE_BLOB_STORE_ACCOUNT_KEY={{sunbird_private_storage_account_name}} -AZURE_BLOB_STORE_ACCOUNT_NAME={{sunbird_private_storage_account_key}} -SELECTED_FILE_CDN=azure \ No newline at end of file +AZURE_BLOB_STORE_ACCOUNT_NAME={{sunbird_private_storage_account_name}} +AZURE_BLOB_STORE_ACCOUNT_KEY={{sunbird_private_storage_account_key}} + +#Sunbird CDN Configuration +SUNBIRD_CLOUD_MEDIA_STORAGE_TYPE=azure +SUNBIRD_CLOUD_MEDIA_STORAGE_KEY={{sunbird_private_storage_account_name}} +SUNBIRD_CLOUD_MEDIA_STORAGE_SECRET={{sunbird_private_storage_account_key}} +SUNBIRD_CLOUD_MEDIA_STORAGE_CONTAINER={{sunbird_azure_uci_container_name | default('uci-' + env )}} + +#Selected CDN Configuration +SELECTED_FILE_CDN=sunbird \ No newline at end of file From 0dbae8510053debc98081a5a0e4c8a34848b028f Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 16:13:25 +0530 Subject: [PATCH 067/616] fix: remove unnessary vars Signed-off-by: Keshav Prasad --- ansible/inventory/env/group_vars/all.yml | 8 -------- .../stack-sunbird/templates/sunbird_learner-service.env | 1 - .../roles/stack-sunbird/templates/sunbird_lms-service.env | 1 - private_repo/ansible/inventory/dev/Core/common.yml | 2 +- 4 files changed, 1 insertion(+), 11 deletions(-) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index 238ab4dd65..9fe037507c 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -408,8 +408,6 @@ sunbird_health_check_enable: 'true' ## Release 1.15 ## sunbird_keycloak_user_federation_provider_id: "{{core_vault_sunbird_keycloak_user_federation_provider_id}}" -# Learner-service -sunbird_course_metrics_base_url: https://{{sunbird_private_storage_account_name}}.blob.core.windows.net/ sunbird_gzip_size_threshold: 262144 prometheus_mount_point: "/root/dockerdata/prometheus/data/" @@ -503,9 +501,6 @@ content_import_remove_props: '["downloadUrl","variants","previewUrl","streamingU #Sunbird-Portal release-2.6.5 # sunbird_portal_updateLoginTimeEnabled: false -# Desktop app vars -#sunbird_offline_azure_storage_account: "" #added this var for adopter usecase - # SB-31155 - This should be deprecated in future in favour of offline_installer_storage offline_installer_container_name: "{{env}}-offlineinstaller" @@ -515,9 +510,6 @@ offline_installer_storage: "{{ offline_installer_container_name }}" # SB-31155 - Removed multiple declarations and moved here sunbird_offline_azure_storage_account_url: "{{ cloud_storage_url }}/{{ offline_installer_storage }}" -# SB-31155 -#cloud_storage_url: "{{ sunbird_public_storage_account_name }}.blob.core.windows.net" - # Search-service search_index_host: "{{ groups['composite-search-cluster']|join(':9200,')}}:9200" compositesearch_index_name: "compositesearch" diff --git a/ansible/roles/stack-sunbird/templates/sunbird_learner-service.env b/ansible/roles/stack-sunbird/templates/sunbird_learner-service.env index 79d0bfe1e3..3b6a3f122e 100644 --- a/ansible/roles/stack-sunbird/templates/sunbird_learner-service.env +++ b/ansible/roles/stack-sunbird/templates/sunbird_learner-service.env @@ -86,7 +86,6 @@ sunbird_time_zone={{sunbird_time_zone}} sunbird_health_check_enable={{sunbird_health_check_enable}} sunbird_keycloak_user_federation_provider_id={{core_vault_sunbird_keycloak_user_federation_provider_id}} sunbird_gzip_enable={{sunbird_gzip_enable}} -sunbird_course_metrics_base_url={{sunbird_course_metrics_base_url}} sunbird_gzip_size_threshold={{sunbird_gzip_size_threshold | default(262144)}} sunbird_analytics_blob_account_name={{sunbird_private_storage_account_name}} sunbird_analytics_blob_account_key={{sunbird_private_storage_account_key}} diff --git a/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env b/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env index 6b790eb735..1b3fdba3ca 100644 --- a/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env +++ b/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env @@ -87,7 +87,6 @@ sunbird_time_zone={{sunbird_time_zone}} sunbird_health_check_enable={{sunbird_health_check_enable}} sunbird_keycloak_user_federation_provider_id={{core_vault_sunbird_keycloak_user_federation_provider_id}} sunbird_gzip_enable={{sunbird_gzip_enable}} -sunbird_course_metrics_base_url={{sunbird_course_metrics_base_url}} sunbird_gzip_size_threshold={{sunbird_gzip_size_threshold | default(262144)}} sunbird_analytics_blob_account_name={{sunbird_private_storage_account_name}} sunbird_analytics_blob_account_key={{sunbird_private_storage_account_key}} diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index 594a415e25..b73c466a45 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -125,7 +125,7 @@ postgres: db_admin_password: "{{core_vault_postgres_password}}" # Generic variable for any cloud provider -upstream_url: "{{ cloud_storage_url }}/{{ content_storage }}" # Proxy url to get /assets/public +upstream_url: "{{ cloud_storage_url }}/{{ content_storage }}" content_storage: "{{ sunbird_content_azure_storage_container }}" # Azure account related vars From 52004154e58728c131ff6dcfa8e85693df037ec8 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 16:20:22 +0530 Subject: [PATCH 068/616] fix: reordered comments Signed-off-by: Keshav Prasad --- private_repo/ansible/inventory/dev/Core/common.yml | 7 +++++-- .../ansible/inventory/dev/KnowledgePlatform/common.yml | 3 ++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index b73c466a45..781d798112 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -77,8 +77,13 @@ proto: https # http or https, preferably https sunbird_default_channel: sunbird # default sunbird channel name environment_id: "10000003" # A 8 digit number for example like 1000000, should be same as defined in KP common.yml +# SB-31155 - This should be deprecated in future in favour of content_storage sunbird_content_azure_storage_container: contents # Azure container name for storing public data (like contents), should be same as azure_public_container defined in KP common.yml +# SB-31155 - Adding a generialzed variable which can be used for any CSP +content_storage: "{{ sunbird_content_azure_storage_container }}" + + # This sms sender id should be verified by your provider. This is the sender id which will be used for `From Address`. For example, # # From: SBSMS @@ -124,9 +129,7 @@ postgres: db_admin_user: postgres db_admin_password: "{{core_vault_postgres_password}}" -# Generic variable for any cloud provider upstream_url: "{{ cloud_storage_url }}/{{ content_storage }}" -content_storage: "{{ sunbird_content_azure_storage_container }}" # Azure account related vars sunbird_azure_public_storage_account_name: "{{ sunbird_public_storage_account_name }}" diff --git a/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml b/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml index 94df144c58..e4fdbf2b14 100644 --- a/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml +++ b/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml @@ -38,9 +38,10 @@ artifacts_container: artifacts # Azure cloud_storage_url: "https://{{ sunbird_public_storage_account_name }}.blob.core.windows.net" +# SB-31155 - This should be deprecated in future in favour of plugin_storage plugin_container_name: "{{ azure_public_container }}" -# Generic variable for any cloud provider +# SB-31155 - Adding a generialzed variable which can be used for any CSP plugin_storage: "{{ plugin_container_name }}" kp_schema_base_path: "{{ cloud_storage_url }}/{{ plugin_storage }}/schemas/local" From d94747a96b9b173b03c216fa351543c0952b7302 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 16:26:07 +0530 Subject: [PATCH 069/616] fix: typo fix Signed-off-by: Keshav Prasad --- ansible/inventory/env/group_vars/all.yml | 4 ++-- ansible/roles/ml-analytics-service/defaults/main.yml | 2 +- private_repo/ansible/inventory/dev/Core/common.yml | 2 +- .../ansible/inventory/dev/KnowledgePlatform/common.yml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index 9fe037507c..9e268e1168 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -504,7 +504,7 @@ sunbird_portal_updateLoginTimeEnabled: false # SB-31155 - This should be deprecated in future in favour of offline_installer_storage offline_installer_container_name: "{{env}}-offlineinstaller" -# SB-31155 - Adding a generialzed variable which can be used for any CSP +# SB-31155 - Adding a generalized variable which can be used for any CSP offline_installer_storage: "{{ offline_installer_container_name }}" # SB-31155 - Removed multiple declarations and moved here @@ -556,7 +556,7 @@ sunbird_trampoline_desktop_keycloak_client_id: trampoline-desktop # SB-31155 - This should be deprecated in future in favour of dial_plugin_storage dial_plugin_container_name: "sunbird-dial-{{env}}" -# SB-31155 - Adding a generialzed variable which can be used for any CSP +# SB-31155 - Adding a generalized variable which can be used for any CSP dial_plugin_storage: "{{ dial_plugin_container_name }}" dial_service_schema_base_path: "{{ cloud_storage_url }}/{{ dial_plugin_storage }}/jsonld-schema/local" diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index b3ede22f5d..fd5e62c5f4 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -15,7 +15,7 @@ ml_analytics_survey_service: "http://{{private_ingressgateway_ip}}/ml-survey" # This should be deprecated in future in favour of ml_analytics_public_storage ml_analytics_public_container: "{{ ml_analytics_container | default('samiksha') }}" -# SB-31155 - Adding a generialzed variable which can be used for any CSP +# SB-31155 - Adding a generalized variable which can be used for any CSP ml_analytics_public_storage: "{{ ml_analytics_public_container }}" ml_analytics_evidence_base_url: "{{ cloud_storage_url }}/{{ ml_analytics_public_storage }}/" ml_analytics_mongodb_url: "{{ml_mongodb_host | default(groups['mongo_master'][0]+':27017')}}" diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index 781d798112..831a444922 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -80,7 +80,7 @@ environment_id: "10000003" # A 8 digit number fo # SB-31155 - This should be deprecated in future in favour of content_storage sunbird_content_azure_storage_container: contents # Azure container name for storing public data (like contents), should be same as azure_public_container defined in KP common.yml -# SB-31155 - Adding a generialzed variable which can be used for any CSP +# SB-31155 - Adding a generalized variable which can be used for any CSP content_storage: "{{ sunbird_content_azure_storage_container }}" diff --git a/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml b/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml index e4fdbf2b14..831eaf7c4a 100644 --- a/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml +++ b/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml @@ -41,7 +41,7 @@ cloud_storage_url: "https://{{ sunbird_public_storage_account_name }}.blob.core. # SB-31155 - This should be deprecated in future in favour of plugin_storage plugin_container_name: "{{ azure_public_container }}" -# SB-31155 - Adding a generialzed variable which can be used for any CSP +# SB-31155 - Adding a generalized variable which can be used for any CSP plugin_storage: "{{ plugin_container_name }}" kp_schema_base_path: "{{ cloud_storage_url }}/{{ plugin_storage }}/schemas/local" From 0a7856c7144da6b2f763e79717ac36ff46b7b686 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 16:35:00 +0530 Subject: [PATCH 070/616] fix: remove duplicate vars Signed-off-by: Keshav Prasad --- ansible/inventory/env/group_vars/all.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index 9e268e1168..c479f6693b 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -85,7 +85,6 @@ keycloak_postgres_host: "{{groups['postgres'][0]}}" #Private IP of Postgres ser kong_postgres_host: "{{groups['postgres'][0]}}" #Private IP of Postgres server uci_postgres_host: "{{groups['postgres'][0]}}" #Private IP of Postgres server sunbird_cassandra_host: "{{groups['cassandra']|join(',')}}" #Private IP of Cassandra server -sunbird_es_host: "{{groups['es']| join(',')}}" ## Application server configurations sunbird_analytics_api_base_url: "http://analytics-service.{{namespace}}.svc.cluster.local:9000" @@ -339,13 +338,11 @@ kong__test_jwt: "{{ core_vault_sunbird_api_auth_token }}" ####### App ES ######## app_es_etc_cluster_name: "{{env}}" app_es_etc_discovery_zen_minimum_master_nodes: "{{groups['es']| length | int}}" -app_es_snapshot_host: "{{ groups['es'][0] }}" app_es_restore_host: "{{ groups['es'][0] }}" app_es_snapshot_base_path: application #######Log Es log_es_etc_cluster_name: "{{env}}-log" -log_es_snapshot_host: "{{ groups['log-es'][0] }}" log_es_restore_host: "{{ groups['log-es'][0] }}" log_es_host: "{{ groups['log-es'][0] }}" From 4867b294f19eae2df99bfb33a8541e74f2ce6926 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 14 Nov 2022 18:13:25 +0530 Subject: [PATCH 071/616] fix: moving few vars to default Signed-off-by: Keshav Prasad --- ansible/inventory/env/group_vars/all.yml | 7 ++++++- private_repo/ansible/inventory/dev/Core/common.yml | 8 +------- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index c479f6693b..1aaa166d5c 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -557,5 +557,10 @@ dial_plugin_container_name: "sunbird-dial-{{env}}" dial_plugin_storage: "{{ dial_plugin_container_name }}" dial_service_schema_base_path: "{{ cloud_storage_url }}/{{ dial_plugin_storage }}/jsonld-schema/local" +# SB-31155 - Moving few vars from private repo template to here +content_storage: "{{ sunbird_content_azure_storage_container }}" +upstream_url: "{{ cloud_storage_url }}/{{ content_storage }}" +plugin_upstream_url: "{{ upstream_url }}" +kp_schema_base_path: "{{ upstream_url }}/schemas/local" # SB-31155 - Moved to the installation public container for now (same place where keycloaka and java artifacts are stored) -h5p_library_path: "https://sunbirdpublic.blob.core.windows.net/installation/h5p-standalone-1.3.4.zip" \ No newline at end of file +h5p_library_path: "https://sunbirdpublic.blob.core.windows.net/installation/h5p-standalone-1.3.4.zip" diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index 831a444922..286c957102 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -77,12 +77,9 @@ proto: https # http or https, preferably https sunbird_default_channel: sunbird # default sunbird channel name environment_id: "10000003" # A 8 digit number for example like 1000000, should be same as defined in KP common.yml -# SB-31155 - This should be deprecated in future in favour of content_storage +# SB-31155 - This should be deprecated in future in favour of content_storage defined in all.yml sunbird_content_azure_storage_container: contents # Azure container name for storing public data (like contents), should be same as azure_public_container defined in KP common.yml -# SB-31155 - Adding a generalized variable which can be used for any CSP -content_storage: "{{ sunbird_content_azure_storage_container }}" - # This sms sender id should be verified by your provider. This is the sender id which will be used for `From Address`. For example, # @@ -129,15 +126,12 @@ postgres: db_admin_user: postgres db_admin_password: "{{core_vault_postgres_password}}" -upstream_url: "{{ cloud_storage_url }}/{{ content_storage }}" # Azure account related vars sunbird_azure_public_storage_account_name: "{{ sunbird_public_storage_account_name }}" -plugin_upstream_url: "{{ upstream_url }}" azure_plugin_storage_account_name: "{{sunbird_azure_public_storage_account_name}}" azure_plugin_storage_account_key: "{{sunbird_public_storage_account_key}}" plugin_container_name: "{{sunbird_content_azure_storage_container}}" -kp_schema_base_path: "{{ upstream_url }}/schemas/local" keycloak_api_management_user_email: "admin@sunbird.org" sunbird_installation_email: "admin@sunbird.org" From 996c091b92d728a2926b9147767ee103d87296b4 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Tue, 15 Nov 2022 16:45:27 +0530 Subject: [PATCH 072/616] fix: moved var to all.yml of LP repo Signed-off-by: Keshav Prasad --- .../ansible/inventory/dev/KnowledgePlatform/common.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml b/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml index 831eaf7c4a..b905d7b359 100644 --- a/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml +++ b/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml @@ -41,9 +41,6 @@ cloud_storage_url: "https://{{ sunbird_public_storage_account_name }}.blob.core. # SB-31155 - This should be deprecated in future in favour of plugin_storage plugin_container_name: "{{ azure_public_container }}" -# SB-31155 - Adding a generalized variable which can be used for any CSP -plugin_storage: "{{ plugin_container_name }}" - kp_schema_base_path: "{{ cloud_storage_url }}/{{ plugin_storage }}/schemas/local" imagepullsecrets: "{{env}}registrysecret" # kubernetes imagePullSecrets kubeconfig_path: /var/lib/jenkins/secrets/k8s.yaml # kubeconfig file path on jenkins From f55ab75992bea5ca7ab7b9c15854103f307df1f5 Mon Sep 17 00:00:00 2001 From: Ashwiniev95 Date: Wed, 16 Nov 2022 11:30:18 +0530 Subject: [PATCH 073/616] Add few new variables --- ansible/roles/ml-analytics-service/defaults/main.yml | 5 +++++ ansible/roles/ml-analytics-service/templates/config.j2 | 10 ++++++++++ 2 files changed, 15 insertions(+) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index 5c5d87dace..f95a395b7d 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -99,3 +99,8 @@ ml_analytics_druid_observation_batch_ingestion_spec: '{"type":"index","spec":{"i ml_analytics_observation_batchupdate_azure_blob_path: "observation/batchDeletion" ml_analytics_observation_submission_id_filepath: "{{ WORKDIR }}/ml-analytics-service/observations/submissions.csv" ml_analytics_observation_batchupdate_output_dir: "{{ WORKDIR }}/source/observations/" +ml_analytics_druid_survey_query_spec : '{"queryType":"scan","dataSource":"sl-survey","resultFormat":"list","columns":["completedDate","createdAt","createdBy","criteriaExternalId","criteriaId","criteriaName","surveyId","surveyName","surveySubmissionId","questionAnswer","questionECM","questionExternalId","questionId","questionName","questionResponseLabel","questionResponseType","solutionExternalId","solutionId","solutionName","updatedAt","instanceParentId","instanceId","instanceParentResponsetype","instanceParentQuestion","questionSequenceByEcm","maxScore","minScore","percentageScore","pointsBasedScoreInParent","totalScore","scoreAchieved","totalpercentage","instanceParentExternalId","instanceParentEcmSequence","remarks","total_evidences","evidence_count","instanceParentCriteriaId","instanceParentCriteriaExternalId","instanceParentCriteriaName","isAPrivateProgram","programId","programName","programExternalId","questionResponseLabel_number","channel","parent_channel","appName","organisation_name","user_subtype","user_type","board_name","district_code","district_name","district_externalId","block_code","block_name","block_externalId","school_code","school_name","school_externalId","cluster_code","cluster_name","cluster_externalId","state_code","state_name","state_externalId","organisation_id","evidences"],"intervals":["1901-01-01T00:00:00+00:00/2101-01-01T00:00:00+00:00"]}' +survey_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris": ["azure://telemetry-data-store/survey/batchDeletion/druidData.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"sl-survey","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"completedDate","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"completedDate"},{"type":"string","name":"createdAt"},{"type":"string","name":"createdBy"},{"type":"string","name":"criteriaExternalId"},{"type":"string","name":"criteriaId"},{"type":"string","name":"criteriaName"},{"type":"string","name":"surveyId"},{"type":"string","name":"surveyName"},{"type":"string","name":"surveySubmissionId"},{"type":"string","name":"questionAnswer"},{"type":"string","name":"questionECM"},{"type":"string","name":"questionExternalId"},{"type":"string","name":"questionId"},{"type":"string","name":"questionName"},{"type":"string","name":"questionResponseLabel"},{"type":"string","name":"questionResponseType"},{"type":"string","name":"solutionExternalId"},{"type":"string","name":"solutionId"},{"type":"string","name":"solutionName"},{"type":"string","name":"updatedAt"},{"type":"string","name":"instanceParentId"},{"type":"string","name":"instanceId"},{"type":"string","name":"instanceParentResponsetype"},{"type":"string","name":"instanceParentQuestion"},{"type":"string","name":"questionSequenceByEcm"},{"type":"string","name":"maxScore"},{"type":"string","name":"minScore"},{"type":"string","name":"percentageScore"},{"type":"string","name":"pointsBasedScoreInParent"},{"type":"string","name":"totalScore"},{"type":"string","name":"scoreAchieved"},{"type":"string","name":"totalpercentage"},{"type":"string","name":"instanceParentExternalId"},{"type":"string","name":"instanceParentEcmSequence"},{"type":"string","name":"remarks"},{"type":"string","name":"total_evidences"},{"type":"string","name":"evidence_count"},{"type":"string","name":"evidences"},{"type":"string","name":"instanceParentCriteriaId"},{"type":"string","name":"instanceParentCriteriaExternalId"},{"type":"string","name":"instanceParentCriteriaName"},{"type":"string","name":"isAPrivateProgram"},{"type":"string","name":"programId"},{"type":"string","name":"programName"},{"type":"string","name":"programExternalId"},{"name":"questionResponseLabel_number","type":"float"},{"type":"string","name":"channel"},{"type":"string","name":"parent_channel"},{"type":"string","name":"appName"},{"type":"string","name":"organisation_name"},{"type":"string","name":"user_subtype"},{"type":"string","name":"user_type"},{"type":"string","name":"board_name"},{"type":"string","name":"district_code"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_code"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"school_code"},{"type":"string","name":"school_name"},{"type":"string","name":"school_externalId"},{"type":"string","name":"cluster_code"},{"type":"string","name":"cluster_name"},{"type":"string","name":"cluster_externalId"},{"type":"string","name":"state_code"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"organisation_id"},{"type":"string","name":"isSubmissionDeleted"}]},"metricsSpec":[{"type":"floatSum","name":"question_response_number","fieldName":"questionResponseLabel_number"}]}}}' +ml_analytics_survey_batchupdate_azure_blob_path : "survey/batchDeletion" +ml_analytics_survey_submission_id_filepath : "{{ WORKDIR }}/ml-analytics-service/survey/submissions.csv" +ml_analytics_survey_batchupdate_output_dir : "{{ WORKDIR }}/source/survey/" diff --git a/ansible/roles/ml-analytics-service/templates/config.j2 b/ansible/roles/ml-analytics-service/templates/config.j2 index b4945675d3..cef1739e4e 100644 --- a/ansible/roles/ml-analytics-service/templates/config.j2 +++ b/ansible/roles/ml-analytics-service/templates/config.j2 @@ -106,6 +106,10 @@ observation_query_spec = {{ ml_analytics_druid_observation_query_spec }} observation_injestion_spec = {{ml_analytics_druid_observation_batch_ingestion_spec}} +survey_query_spec = {{ml_analytics_druid_survey_query_spec}} + +survey_injestion_spec = {{ml_analytics_druid_survey_batch_ingestion_spec}} + [KAFKA] url = {{ ml_analytics_kafka_url }} @@ -188,6 +192,8 @@ projects_program_csv = {{ ml_analytics_program_dashboard_azure_blob_path }} observation_batch_ingestion_data_del = {{ ml_analytics_observation_batchupdate_azure_blob_path }} +survey_batch_ingestion_data_del = {{ml_analytics_survey_batchupdate_azure_blob_path}} + [REDIS] host = {{ ml_analytics_redis_host }} @@ -224,6 +230,10 @@ observation_sub_ids = {{ ml_analytics_observation_submission_id_filepath }} observation_druid_data = {{ ml_analytics_observation_batchupdate_output_dir }} +survey_sub_ids = {{ml_analytics_survey_submission_id_filepath}} + +survey_druid_data = {{ml_analytics_survey_batchupdate_output_dir}} + [CLOUD_STORAGE] service_name = {{ ml_analytics_AWS_service_name }} From e4628c7947ba160d429e9e3c893fd63c16d7cc92 Mon Sep 17 00:00:00 2001 From: Ashwiniev95 Date: Wed, 16 Nov 2022 11:33:51 +0530 Subject: [PATCH 074/616] Update key --- ansible/roles/ml-analytics-service/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index f95a395b7d..6bcb9616e9 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -100,7 +100,7 @@ ml_analytics_observation_batchupdate_azure_blob_path: "observation/batchDeletion ml_analytics_observation_submission_id_filepath: "{{ WORKDIR }}/ml-analytics-service/observations/submissions.csv" ml_analytics_observation_batchupdate_output_dir: "{{ WORKDIR }}/source/observations/" ml_analytics_druid_survey_query_spec : '{"queryType":"scan","dataSource":"sl-survey","resultFormat":"list","columns":["completedDate","createdAt","createdBy","criteriaExternalId","criteriaId","criteriaName","surveyId","surveyName","surveySubmissionId","questionAnswer","questionECM","questionExternalId","questionId","questionName","questionResponseLabel","questionResponseType","solutionExternalId","solutionId","solutionName","updatedAt","instanceParentId","instanceId","instanceParentResponsetype","instanceParentQuestion","questionSequenceByEcm","maxScore","minScore","percentageScore","pointsBasedScoreInParent","totalScore","scoreAchieved","totalpercentage","instanceParentExternalId","instanceParentEcmSequence","remarks","total_evidences","evidence_count","instanceParentCriteriaId","instanceParentCriteriaExternalId","instanceParentCriteriaName","isAPrivateProgram","programId","programName","programExternalId","questionResponseLabel_number","channel","parent_channel","appName","organisation_name","user_subtype","user_type","board_name","district_code","district_name","district_externalId","block_code","block_name","block_externalId","school_code","school_name","school_externalId","cluster_code","cluster_name","cluster_externalId","state_code","state_name","state_externalId","organisation_id","evidences"],"intervals":["1901-01-01T00:00:00+00:00/2101-01-01T00:00:00+00:00"]}' -survey_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris": ["azure://telemetry-data-store/survey/batchDeletion/druidData.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"sl-survey","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"completedDate","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"completedDate"},{"type":"string","name":"createdAt"},{"type":"string","name":"createdBy"},{"type":"string","name":"criteriaExternalId"},{"type":"string","name":"criteriaId"},{"type":"string","name":"criteriaName"},{"type":"string","name":"surveyId"},{"type":"string","name":"surveyName"},{"type":"string","name":"surveySubmissionId"},{"type":"string","name":"questionAnswer"},{"type":"string","name":"questionECM"},{"type":"string","name":"questionExternalId"},{"type":"string","name":"questionId"},{"type":"string","name":"questionName"},{"type":"string","name":"questionResponseLabel"},{"type":"string","name":"questionResponseType"},{"type":"string","name":"solutionExternalId"},{"type":"string","name":"solutionId"},{"type":"string","name":"solutionName"},{"type":"string","name":"updatedAt"},{"type":"string","name":"instanceParentId"},{"type":"string","name":"instanceId"},{"type":"string","name":"instanceParentResponsetype"},{"type":"string","name":"instanceParentQuestion"},{"type":"string","name":"questionSequenceByEcm"},{"type":"string","name":"maxScore"},{"type":"string","name":"minScore"},{"type":"string","name":"percentageScore"},{"type":"string","name":"pointsBasedScoreInParent"},{"type":"string","name":"totalScore"},{"type":"string","name":"scoreAchieved"},{"type":"string","name":"totalpercentage"},{"type":"string","name":"instanceParentExternalId"},{"type":"string","name":"instanceParentEcmSequence"},{"type":"string","name":"remarks"},{"type":"string","name":"total_evidences"},{"type":"string","name":"evidence_count"},{"type":"string","name":"evidences"},{"type":"string","name":"instanceParentCriteriaId"},{"type":"string","name":"instanceParentCriteriaExternalId"},{"type":"string","name":"instanceParentCriteriaName"},{"type":"string","name":"isAPrivateProgram"},{"type":"string","name":"programId"},{"type":"string","name":"programName"},{"type":"string","name":"programExternalId"},{"name":"questionResponseLabel_number","type":"float"},{"type":"string","name":"channel"},{"type":"string","name":"parent_channel"},{"type":"string","name":"appName"},{"type":"string","name":"organisation_name"},{"type":"string","name":"user_subtype"},{"type":"string","name":"user_type"},{"type":"string","name":"board_name"},{"type":"string","name":"district_code"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_code"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"school_code"},{"type":"string","name":"school_name"},{"type":"string","name":"school_externalId"},{"type":"string","name":"cluster_code"},{"type":"string","name":"cluster_name"},{"type":"string","name":"cluster_externalId"},{"type":"string","name":"state_code"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"organisation_id"},{"type":"string","name":"isSubmissionDeleted"}]},"metricsSpec":[{"type":"floatSum","name":"question_response_number","fieldName":"questionResponseLabel_number"}]}}}' +ml_analytics_druid_survey_batch_ingestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris": ["azure://telemetry-data-store/survey/batchDeletion/druidData.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"sl-survey","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"completedDate","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"completedDate"},{"type":"string","name":"createdAt"},{"type":"string","name":"createdBy"},{"type":"string","name":"criteriaExternalId"},{"type":"string","name":"criteriaId"},{"type":"string","name":"criteriaName"},{"type":"string","name":"surveyId"},{"type":"string","name":"surveyName"},{"type":"string","name":"surveySubmissionId"},{"type":"string","name":"questionAnswer"},{"type":"string","name":"questionECM"},{"type":"string","name":"questionExternalId"},{"type":"string","name":"questionId"},{"type":"string","name":"questionName"},{"type":"string","name":"questionResponseLabel"},{"type":"string","name":"questionResponseType"},{"type":"string","name":"solutionExternalId"},{"type":"string","name":"solutionId"},{"type":"string","name":"solutionName"},{"type":"string","name":"updatedAt"},{"type":"string","name":"instanceParentId"},{"type":"string","name":"instanceId"},{"type":"string","name":"instanceParentResponsetype"},{"type":"string","name":"instanceParentQuestion"},{"type":"string","name":"questionSequenceByEcm"},{"type":"string","name":"maxScore"},{"type":"string","name":"minScore"},{"type":"string","name":"percentageScore"},{"type":"string","name":"pointsBasedScoreInParent"},{"type":"string","name":"totalScore"},{"type":"string","name":"scoreAchieved"},{"type":"string","name":"totalpercentage"},{"type":"string","name":"instanceParentExternalId"},{"type":"string","name":"instanceParentEcmSequence"},{"type":"string","name":"remarks"},{"type":"string","name":"total_evidences"},{"type":"string","name":"evidence_count"},{"type":"string","name":"evidences"},{"type":"string","name":"instanceParentCriteriaId"},{"type":"string","name":"instanceParentCriteriaExternalId"},{"type":"string","name":"instanceParentCriteriaName"},{"type":"string","name":"isAPrivateProgram"},{"type":"string","name":"programId"},{"type":"string","name":"programName"},{"type":"string","name":"programExternalId"},{"name":"questionResponseLabel_number","type":"float"},{"type":"string","name":"channel"},{"type":"string","name":"parent_channel"},{"type":"string","name":"appName"},{"type":"string","name":"organisation_name"},{"type":"string","name":"user_subtype"},{"type":"string","name":"user_type"},{"type":"string","name":"board_name"},{"type":"string","name":"district_code"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_code"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"school_code"},{"type":"string","name":"school_name"},{"type":"string","name":"school_externalId"},{"type":"string","name":"cluster_code"},{"type":"string","name":"cluster_name"},{"type":"string","name":"cluster_externalId"},{"type":"string","name":"state_code"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"organisation_id"},{"type":"string","name":"isSubmissionDeleted"}]},"metricsSpec":[{"type":"floatSum","name":"question_response_number","fieldName":"questionResponseLabel_number"}]}}}' ml_analytics_survey_batchupdate_azure_blob_path : "survey/batchDeletion" ml_analytics_survey_submission_id_filepath : "{{ WORKDIR }}/ml-analytics-service/survey/submissions.csv" ml_analytics_survey_batchupdate_output_dir : "{{ WORKDIR }}/source/survey/" From f870b051d010ca60e089f0312ad2c57f36a1fc64 Mon Sep 17 00:00:00 2001 From: Surabhi Date: Wed, 16 Nov 2022 13:09:48 +0530 Subject: [PATCH 075/616] removed unused variables --- ansible/roles/stack-sunbird/templates/inbound.env | 5 ----- ansible/roles/stack-sunbird/templates/transformer.env | 5 ----- 2 files changed, 10 deletions(-) diff --git a/ansible/roles/stack-sunbird/templates/inbound.env b/ansible/roles/stack-sunbird/templates/inbound.env index c9bc2033a8..331ae1d3fc 100644 --- a/ansible/roles/stack-sunbird/templates/inbound.env +++ b/ansible/roles/stack-sunbird/templates/inbound.env @@ -53,11 +53,6 @@ REDIS_PASS={{sunbird_redis_pass | default('')}} REDIS_PORT={{sunbird_redis_port | default(6379)}} REDIS_DB_INDEX={{redis_db_index_uci | default('7')}} -#Azure Config -AZURE_BLOB_STORE_CONTAINER={{sunbird_azure_uci_container_name | default('uci-' + env )}} -AZURE_BLOB_STORE_ACCOUNT_NAME={{sunbird_private_storage_account_name}} -AZURE_BLOB_STORE_ACCOUNT_KEY={{sunbird_private_storage_account_key}} - #Netcore NETCORE_WHATSAPP_AUTH_TOKEN={{uci_netcore_whatsapp_token}} NETCORE_WHATSAPP_SOURCE={{uci_netcore_whatsapp_source}} diff --git a/ansible/roles/stack-sunbird/templates/transformer.env b/ansible/roles/stack-sunbird/templates/transformer.env index f5c2f7b3a5..b5be5c4451 100644 --- a/ansible/roles/stack-sunbird/templates/transformer.env +++ b/ansible/roles/stack-sunbird/templates/transformer.env @@ -70,11 +70,6 @@ REDIS_DB_INDEX={{redis_db_index_uci | default('7')}} EXHAUST_TELEMETRY_ENABLED=TRUE POSTHOG_EVENT_ENABLED=FALSE -#Azure Config -AZURE_BLOB_STORE_CONTAINER={{sunbird_azure_uci_container_name | default('uci-' + env )}} -AZURE_BLOB_STORE_ACCOUNT_NAME={{sunbird_private_storage_account_name}} -AZURE_BLOB_STORE_ACCOUNT_KEY={{sunbird_private_storage_account_key}} - #Sunbird CDN Configuration SUNBIRD_CLOUD_MEDIA_STORAGE_TYPE=azure SUNBIRD_CLOUD_MEDIA_STORAGE_KEY={{sunbird_private_storage_account_name}} From 3a25d5bde548cb3ad03c9d67c81752a543364b3b Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Wed, 16 Nov 2022 14:03:55 +0530 Subject: [PATCH 076/616] feat: SB-30654 generalizing plugins upload Signed-off-by: Keshav Prasad --- ansible/deploy-plugins.yml | 23 ++++++++++++++++------- pipelines/deploy/CEPlugins/Jenkinsfile | 3 +-- 2 files changed, 17 insertions(+), 9 deletions(-) diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index bf876b3f66..b7f233af67 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -72,13 +72,22 @@ - preview - block: - - name: run the az_copy.sh script - shell: "bash {{ az_file_path }} {{ plugin_storage }} {{ source_file }}" - async: 3600 - poll: 10 - environment: - AZURE_STORAGE_ACCOUNT: "{{ azure_public_storage_account_name }}" - AZURE_STORAGE_SAS_TOKEN: "{{ azure_public_storage_account_sas }}" + - name: delete batch of files from azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-delete-batch.yml + vars: + blob_delete_pattern: "content-plugins/{{ item }}" + with_lines: "cat {{ plugin_list_to_delete_and_upload }}" + + - name: upload batch of files to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + blob_container_folder_path: "/content-plugins" + local_file_or_folder_path: "{{ source_file }}/{{ item }}" + with_lines: "cat {{ plugin_list_to_delete_and_upload }}" tags: - plugins when: cloud_service_provider == "azure" diff --git a/pipelines/deploy/CEPlugins/Jenkinsfile b/pipelines/deploy/CEPlugins/Jenkinsfile index fea1e80819..1d026ac576 100644 --- a/pipelines/deploy/CEPlugins/Jenkinsfile +++ b/pipelines/deploy/CEPlugins/Jenkinsfile @@ -31,11 +31,10 @@ node() { sh """ unzip ${artifact} unzip content-plugins.zip - chmod a+x content-plugins/az_copy.sh mv content-plugins ansible """ ansiblePlaybook = "${currentWs}/ansible/deploy-plugins.yml" - ansibleExtraArgs = "--tags plugins --extra-vars \" source_file=${currentWs}/ansible/content-plugins az_file_path=${currentWs}/ansible/content-plugins/az_copy.sh\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" + ansibleExtraArgs = "--tags plugins --extra-vars \" source_file=${currentWs}/ansible/content-plugins plugin_list_to_delete_and_upload=${currentWs}/ansible/content-plugins/plugin_list_to_delete_and_upload.txt\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('ansiblePlaybook', ansiblePlaybook) values.put('ansibleExtraArgs', ansibleExtraArgs) println values From be05234a5cb2b7165251751460c6de9c1e84a03e Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Wed, 16 Nov 2022 18:55:45 +0530 Subject: [PATCH 077/616] feat: upload plugins using existing roles Signed-off-by: Keshav Prasad --- ansible/deploy-plugins.yml | 12 +++++++----- ansible/roles/azure-cloud-storage/defaults/main.yml | 7 ++++++- .../azure-cloud-storage/tasks/blob-delete-batch.yml | 2 +- .../azure-cloud-storage/tasks/blob-upload-batch.yml | 3 ++- .../azure-cloud-storage/tasks/container-create.yml | 4 ++-- pipelines/deploy/CEPlugins/Jenkinsfile | 2 +- 6 files changed, 19 insertions(+), 11 deletions(-) diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index b7f233af67..357baef98e 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -30,6 +30,7 @@ storage_account_sas_token: "{{ azure_public_storage_account_sas }}" tags: - always + no_log: True - block: - name: delete files and folders from azure storage using azcopy @@ -77,17 +78,18 @@ name: azure-cloud-storage tasks_from: blob-delete-batch.yml vars: - blob_delete_pattern: "content-plugins/{{ item }}" - with_lines: "cat {{ plugin_list_to_delete_and_upload }}" + blob_delete_pattern: "content-plugins/{{ item }}/*" + with_lines: cat {{ plugin_list_to_delete_and_upload }} - name: upload batch of files to azure storage include_role: name: azure-cloud-storage tasks_from: blob-upload-batch.yml vars: - blob_container_folder_path: "/content-plugins" - local_file_or_folder_path: "{{ source_file }}/{{ item }}" - with_lines: "cat {{ plugin_list_to_delete_and_upload }}" + blob_container_folder_path: "/content-plugins/{{ item }}" + local_file_or_folder_path: "{{ source_folder }}/{{ item }}" + create_container: false + with_lines: cat {{ plugin_list_to_delete_and_upload }} tags: - plugins when: cloud_service_provider == "azure" diff --git a/ansible/roles/azure-cloud-storage/defaults/main.yml b/ansible/roles/azure-cloud-storage/defaults/main.yml index 0e4e45bf95..8f6673d3c9 100644 --- a/ansible/roles/azure-cloud-storage/defaults/main.yml +++ b/ansible/roles/azure-cloud-storage/defaults/main.yml @@ -64,4 +64,9 @@ blob_container_folder_path: "" # This variable affects only new containers and has no affect on a container if it already exists # If the container already exists, the access level will not be changed # You will need to change the access level from Azure portal or using az storage container set-permission command -container_public_access: "" \ No newline at end of file +container_public_access: "" + +# Create the container by default before running the specific azure tasks +# If we would like to skip container creation (in case of a looped execution), you can set this value to false +# in order to skip the task for every iteration +create_container: true \ No newline at end of file diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml b/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml index 4e8ad68a2d..e642a6f24f 100644 --- a/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml +++ b/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml @@ -1,5 +1,5 @@ --- -- name: delete files and folders from a blob container recursively +- name: delete files and folders - deleting {{ blob_container_name }}/{{ blob_delete_pattern }} shell: "az storage blob delete-batch --source {{ blob_container_name }} --pattern '{{ blob_delete_pattern }}' --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" async: 3600 poll: 10 \ No newline at end of file diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml b/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml index 3043da46cc..8f10576cb5 100644 --- a/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml +++ b/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml @@ -3,8 +3,9 @@ include_role: name: azure-cloud-storage tasks_from: container-create.yml + when: create_container is true -- name: upload files and folders from a local directory to azure storage container +- name: upload files and folders - uploading {{ blob_container_name }}{{ blob_container_folder_path }} shell: "az storage blob upload-batch --destination {{ blob_container_name }}{{ blob_container_folder_path }} --source {{ local_file_or_folder_path }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" async: 3600 poll: 10 \ No newline at end of file diff --git a/ansible/roles/azure-cloud-storage/tasks/container-create.yml b/ansible/roles/azure-cloud-storage/tasks/container-create.yml index 419510cc19..847c765a33 100644 --- a/ansible/roles/azure-cloud-storage/tasks/container-create.yml +++ b/ansible/roles/azure-cloud-storage/tasks/container-create.yml @@ -1,8 +1,8 @@ --- -- name: create container in azure storage if it doesn't exist +- name: create container if it doesn't exist shell: "az storage container create --name {{ blob_container_name }} --public-access {{ container_public_access }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" when: storage_account_key | length > 0 -- name: create container in azure storage if it doesn't exist +- name: create container if it doesn't exist shell: "az storage container create --name {{ blob_container_name }} --public-access {{ container_public_access }} --account-name {{ storage_account_name }} --sas-token '{{ storage_account_sas_token }}'" when: storage_account_sas_token | length > 0 \ No newline at end of file diff --git a/pipelines/deploy/CEPlugins/Jenkinsfile b/pipelines/deploy/CEPlugins/Jenkinsfile index 1d026ac576..078069bbe0 100644 --- a/pipelines/deploy/CEPlugins/Jenkinsfile +++ b/pipelines/deploy/CEPlugins/Jenkinsfile @@ -34,7 +34,7 @@ node() { mv content-plugins ansible """ ansiblePlaybook = "${currentWs}/ansible/deploy-plugins.yml" - ansibleExtraArgs = "--tags plugins --extra-vars \" source_file=${currentWs}/ansible/content-plugins plugin_list_to_delete_and_upload=${currentWs}/ansible/content-plugins/plugin_list_to_delete_and_upload.txt\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" + ansibleExtraArgs = "--tags plugins --extra-vars \" source_folder=${currentWs}/ansible/content-plugins plugin_list_to_delete_and_upload=${currentWs}/ansible/content-plugins/plugins_to_delete_and_upload.txt\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('ansiblePlaybook', ansiblePlaybook) values.put('ansibleExtraArgs', ansibleExtraArgs) println values From f436573fcf8d07c1c67c4f099c6a01b446c3ed06 Mon Sep 17 00:00:00 2001 From: G33tha Date: Wed, 16 Nov 2022 20:18:21 +0530 Subject: [PATCH 078/616] grouping aws specific tasks into a single role (#3573) --- ansible/artifacts-download.yml | 13 ++++ ansible/artifacts-upload.yml | 13 ++++ ansible/assets-upload.yml | 24 +++++++ ansible/bootstrap.yml | 10 +++ ansible/deploy-plugins.yml | 64 +++++++++++++++++++ ansible/desktop-faq-upload.yml | 51 +++++++++++++++ ansible/dial_upload-schema.yml | 13 ++++ ansible/kp_upload-schema.yml | 13 ++++ ansible/roles/aws-cli/defaults/main.yml | 1 + ansible/roles/aws-cli/tasks/main.yml | 24 +++++++ .../roles/aws-cloud-storage/defaults/main.yml | 3 + .../aws-cloud-storage/tasks/delete-folder.yml | 9 +++ .../roles/aws-cloud-storage/tasks/delete.yml | 9 +++ .../aws-cloud-storage/tasks/download.yml | 9 +++ .../roles/aws-cloud-storage/tasks/main.yml | 18 ++++++ .../aws-cloud-storage/tasks/upload-folder.yml | 9 +++ .../roles/aws-cloud-storage/tasks/upload.yml | 9 +++ ansible/roles/cassandra-backup/tasks/main.yml | 13 ++++ .../roles/cassandra-restore/tasks/main.yml | 14 ++++ ansible/roles/cert-templates/tasks/main.yml | 13 ++++ ansible/roles/desktop-deploy/tasks/main.yml | 28 ++++++++ ansible/roles/grafana-backup/tasks/main.yml | 13 ++++ .../jenkins-backup-upload/tasks/main.yml | 13 ++++ ansible/roles/mongodb-backup/tasks/main.yml | 13 ++++ .../tasks/main.yml | 13 ++++ .../tasks/main.yml | 13 ++++ .../roles/postgresql-backup/tasks/main.yml | 13 ++++ .../roles/postgresql-restore/tasks/main.yml | 13 ++++ .../roles/prometheus-backup-v2/tasks/main.yml | 13 ++++ .../roles/prometheus-backup/tasks/main.yml | 13 ++++ .../roles/prometheus-restore/tasks/main.yml | 13 ++++ ansible/roles/redis-backup/tasks/main.yml | 13 ++++ ansible/uploadFAQs.yml | 15 +++++ .../dev/jobs/Core/jobs/Bootstrap/config.xml | 1 + pipelines/deploy/CEPlugins/Jenkinsfile | 2 +- .../ansible/inventory/dev/Core/common.yml | 8 ++- .../ansible/inventory/dev/Core/secrets.yml | 11 +++- 37 files changed, 535 insertions(+), 5 deletions(-) create mode 100644 ansible/roles/aws-cli/defaults/main.yml create mode 100644 ansible/roles/aws-cli/tasks/main.yml create mode 100644 ansible/roles/aws-cloud-storage/defaults/main.yml create mode 100644 ansible/roles/aws-cloud-storage/tasks/delete-folder.yml create mode 100644 ansible/roles/aws-cloud-storage/tasks/delete.yml create mode 100644 ansible/roles/aws-cloud-storage/tasks/download.yml create mode 100644 ansible/roles/aws-cloud-storage/tasks/main.yml create mode 100644 ansible/roles/aws-cloud-storage/tasks/upload-folder.yml create mode 100644 ansible/roles/aws-cloud-storage/tasks/upload.yml diff --git a/ansible/artifacts-download.yml b/ansible/artifacts-download.yml index cb8230d44b..043446554d 100644 --- a/ansible/artifacts-download.yml +++ b/ansible/artifacts-download.yml @@ -26,3 +26,16 @@ dest_file_name: "{{ artifact }}" local_file_or_folder_path: "{{ artifact_path }}" when: cloud_service_provider == "gcloud" + + - name: download artifact from aws s3 + include_role: + name: aws-cloud-storage + tasks_from: download.yml + vars: + local_file_or_folder_path: "{{ artifact_path }}" + s3_bucket_name: "{{ aws_artifact_s3_bucket_name }}" + s3_path: "{{ artifacts_container }}/{{ artifact }}" + aws_default_region: "{{ aws_region }}" + aws_access_key_id: "{{ aws_artifact_bucket_access_key }}" + aws_secret_access_key: "{{ aws_artifact_bucket_secret_access_key }}" + when: cloud_service_provider == "aws" \ No newline at end of file diff --git a/ansible/artifacts-upload.yml b/ansible/artifacts-upload.yml index 52e67448c7..32e866808c 100644 --- a/ansible/artifacts-upload.yml +++ b/ansible/artifacts-upload.yml @@ -27,3 +27,16 @@ dest_file_name: "{{ artifact }}" local_file_or_folder_path: "{{ artifact_path }}" when: cloud_service_provider == "gcloud" + + - name: upload artifact to aws s3 + include_role: + name: aws-cloud-storage + tasks_from: upload.yml + vars: + local_file_or_folder_path: "{{ artifact_path }}" + s3_bucket_name: "{{ aws_artifact_s3_bucket_name }}" + s3_path: "{{ artifacts_container }}/{{ artifact }}" + aws_default_region: "{{ aws_region }}" + aws_access_key_id: "{{ aws_artifact_bucket_access_key }}" + aws_secret_access_key: "{{ aws_artifact_bucket_secret_access_key }}" + when: cloud_service_provider == "aws" \ No newline at end of file diff --git a/ansible/assets-upload.yml b/ansible/assets-upload.yml index 3809c63722..12021680fe 100644 --- a/ansible/assets-upload.yml +++ b/ansible/assets-upload.yml @@ -35,6 +35,30 @@ local_file_or_folder_path: "{{ assets }}" when: cloud_service_provider == "azure" +##### AWS + - name: this block consists of tasks related to aws storage + block: + - name: set common aws variables + set_fact: + s3_bucket_name: "{{ aws_public_s3_bucket_name }}" + s3_path: "{{ player_cdn_storage }}" + aws_default_region: "{{ aws_region }}" + aws_access_key_id: "{{ aws_public_bucket_access_key }}" + aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" + + - name: delete files and folders from s3 + include_role: + name: aws-cloud-storage + tasks_from: delete-folder.yml + + - name: upload batch of files to s3 + include_role: + name: aws-cloud-storage + tasks_from: upload-folder.yml + vars: + local_file_or_folder_path: "{{ assets }}" + when: cloud_service_provider == "aws" + #GCP - name: this block consists of tasks related to azure storage block: diff --git a/ansible/bootstrap.yml b/ansible/bootstrap.yml index 36d9d7b0d0..b23479e833 100644 --- a/ansible/bootstrap.yml +++ b/ansible/bootstrap.yml @@ -40,6 +40,16 @@ tags: - gcloud_cli +- hosts: "{{ hosts }}" + become: yes + ignore_unreachable: yes + vars_files: + - "{{inventory_dir}}/secrets.yml" + roles: + - role: aws-cli + tags: + - aws_cli + - hosts: "{{ hosts| default('all') }}" become: yes gather_facts: no diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index bf876b3f66..fa4156d3c7 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -133,3 +133,67 @@ - preview when: cloud_service_provider == "gcloud" +################################### AWS tasks ######################### + - name: this block consists of tasks related to aws s3 + block: + - name: set common aws variables + set_fact: + aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ aws_public_s3_bucket_name }}" + aws_access_key_id: "{{ aws_public_bucket_access_key }}" + aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" + tags: + - always + + - block: + - name: delete files and folders from s3 + include_role: + name: aws-cloud-storage + tasks_from: delete-folder.yml + vars: + s3_path: "{{ plugin_storage }}/{{ folder_name }}" + tags: + - content-editor + - collection-editor + - generic-editor + - preview + + - block: + - name: upload folder to s3 + include_role: + name: aws-cloud-storage + tasks_from: upload-folder.yml + vars: + s3_path: "{{ plugin_storage }}/{{ folder_name }}" + local_file_or_folder_path: "{{ source_name }}" + tags: + - content-editor + - collection-editor + - generic-editor + - preview + - editor + - core-plugins + + - block: + - name: upload file to s3 + include_role: + name: aws-cloud-storage + tasks_from: upload.yml + vars: + s3_path: "{{ plugin_storage }}/artefacts/content-player/content-player-{{ player_version_number }}.zip" + local_file_or_folder_path: "{{ source_file_name }}" + tags: + - preview + + - block: + - name: run the s3_copy.sh script + shell: "bash {{ s3_file_path }} {{ plugin_storage }} {{ source_file }} {{ aws_public_s3_bucket_name }}" + async: 3600 + poll: 10 + environment: + AWS_DEFAULT_REGION: "{{ aws_default_region }}" + AWS_ACCESS_KEY_ID: "{{ aws_access_key_id }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_access_key }}" + tags: + - plugins + when: cloud_service_provider == "aws" \ No newline at end of file diff --git a/ansible/desktop-faq-upload.yml b/ansible/desktop-faq-upload.yml index f20f0d7eeb..911153576b 100644 --- a/ansible/desktop-faq-upload.yml +++ b/ansible/desktop-faq-upload.yml @@ -93,3 +93,54 @@ - upload-chatbot-config - upload-batch when: cloud_service_provider == "gcloud" + +######################## AWS tasks ######################################### + + - name: this block consists of tasks related to aws s3 + block: + - name: set common aws variables + set_fact: + aws_default_region: "{{ aws_region }}" + local_file_or_folder_path: "{{ playbook_dir }}/../{{ src_file_path }}" + s3_path: "{{ upload_storage }}/{{ destination_path }}" + tags: + - always + + - block: + - name: upload file to aws s3 public bucket + include_role: + name: aws-cloud-storage + tasks_from: upload.yml + vars: + s3_bucket_name: "{{ aws_public_s3_bucket_name }}" + aws_access_key_id: "{{ aws_public_bucket_access_key }}" + aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" + tags: + - upload-desktop-faq + + - block: + - name: upload file to aws s3 private bucket + include_role: + name: aws-cloud-storage + tasks_from: upload.yml + vars: + s3_bucket_name: "{{ aws_private_s3_bucket_name }}" + aws_access_key_id: "{{ aws_private_bucket_access_key }}" + aws_secret_access_key: "{{ aws_private_bucket_secret_access_key }}" + tags: + - upload-label + + - block: + - name: upload folder to aws s3 + include_role: + name: aws-cloud-storage + tasks_from: upload-folder.yml + vars: + s3_bucket_name: "{{ aws_public_s3_bucket_name }}" + aws_access_key_id: "{{ aws_public_bucket_access_key }}" + aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" + tags: + - upload-chatbot-config + - upload-batch + when: cloud_service_provider == "aws" + \ No newline at end of file diff --git a/ansible/dial_upload-schema.yml b/ansible/dial_upload-schema.yml index a93a900263..f046e63462 100644 --- a/ansible/dial_upload-schema.yml +++ b/ansible/dial_upload-schema.yml @@ -38,6 +38,19 @@ storage_account_name: "{{ azure_public_storage_account_name }}" storage_account_key: "{{ azure_public_storage_account_key }}" when: cloud_service_provider == "azure" + + - name: upload batch of files to aws s3 + include_role: + name: aws-cloud-storage + tasks_from: upload-folder.yml + vars: + s3_bucket_name: "{{ aws_public_s3_bucket_name }}" + aws_access_key_id: "{{ aws_public_bucket_access_key }}" + aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" + aws_default_region: "{{ aws_region }}" + local_file_or_folder_path: "dial_schema_template_files" + s3_path: "{{ dial_plugin_storage }}/schemas/local" + when: cloud_service_provider == "aws" - name: upload batch of files to gcloud storage include_role: diff --git a/ansible/kp_upload-schema.yml b/ansible/kp_upload-schema.yml index a4f6bda83a..aecdab077a 100644 --- a/ansible/kp_upload-schema.yml +++ b/ansible/kp_upload-schema.yml @@ -23,6 +23,19 @@ storage_account_key: "{{ azure_public_storage_account_key }}" when: cloud_service_provider == "azure" + - name: upload batch of files to aws s3 + include_role: + name: aws-cloud-storage + tasks_from: upload-folder.yml + vars: + s3_bucket_name: "{{ aws_public_s3_bucket_name }}" + aws_access_key_id: "{{ aws_public_bucket_access_key }}" + aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" + aws_default_region: "{{ aws_region }}" + local_file_or_folder_path: "{{ source_name }}" + s3_path: "{{ plugin_storage }}/schemas/local" + when: cloud_service_provider == "aws" + - name: upload batch of files to gcloud storage include_role: name: gcp-cloud-storage diff --git a/ansible/roles/aws-cli/defaults/main.yml b/ansible/roles/aws-cli/defaults/main.yml new file mode 100644 index 0000000000..53d866eafa --- /dev/null +++ b/ansible/roles/aws-cli/defaults/main.yml @@ -0,0 +1 @@ +aws_cli_url: https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip \ No newline at end of file diff --git a/ansible/roles/aws-cli/tasks/main.yml b/ansible/roles/aws-cli/tasks/main.yml new file mode 100644 index 0000000000..5907fb1aaf --- /dev/null +++ b/ansible/roles/aws-cli/tasks/main.yml @@ -0,0 +1,24 @@ +--- +- name: Download the installation file + get_url: + url: "{{ aws_cli_url }}" + dest: /tmp/awscliv2.zip + +- name: Installing unzip + apt: + name: "{{item}}" + state: latest + with_items: + - zip + - unzip + +- name: Unzip the installer + unarchive: + src: /tmp/awscliv2.zip + dest: /tmp/ + remote_src: yes + +- name: install aws cli + shell: ./aws/install + args: + chdir: /tmp/ diff --git a/ansible/roles/aws-cloud-storage/defaults/main.yml b/ansible/roles/aws-cloud-storage/defaults/main.yml new file mode 100644 index 0000000000..6f3f6f86d6 --- /dev/null +++ b/ansible/roles/aws-cloud-storage/defaults/main.yml @@ -0,0 +1,3 @@ +s3_bucket_name: "" +s3_path: "" +local_file_or_folder_path: "" diff --git a/ansible/roles/aws-cloud-storage/tasks/delete-folder.yml b/ansible/roles/aws-cloud-storage/tasks/delete-folder.yml new file mode 100644 index 0000000000..c912b14edb --- /dev/null +++ b/ansible/roles/aws-cloud-storage/tasks/delete-folder.yml @@ -0,0 +1,9 @@ +--- +- name: delete files and folders recursively + environment: + AWS_DEFAULT_REGION: "{{ aws_default_region }}" + AWS_ACCESS_KEY_ID: "{{ aws_access_key_id }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_access_key }}" + shell: "aws s3 rm s3://{{ s3_bucket_name }}/{{ s3_path }} --recursive" + async: 3600 + poll: 10 diff --git a/ansible/roles/aws-cloud-storage/tasks/delete.yml b/ansible/roles/aws-cloud-storage/tasks/delete.yml new file mode 100644 index 0000000000..414ea52e6b --- /dev/null +++ b/ansible/roles/aws-cloud-storage/tasks/delete.yml @@ -0,0 +1,9 @@ +--- +- name: delete files from s3 + environment: + AWS_DEFAULT_REGION: "{{ aws_default_region }}" + AWS_ACCESS_KEY_ID: "{{ aws_access_key_id }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_access_key }}" + shell: "aws s3 rm s3://{{ s3_bucket_name }}/{{ s3_path }}" + async: 3600 + poll: 10 diff --git a/ansible/roles/aws-cloud-storage/tasks/download.yml b/ansible/roles/aws-cloud-storage/tasks/download.yml new file mode 100644 index 0000000000..138024af78 --- /dev/null +++ b/ansible/roles/aws-cloud-storage/tasks/download.yml @@ -0,0 +1,9 @@ +--- +- name: download files to s3 + environment: + AWS_DEFAULT_REGION: "{{ aws_default_region }}" + AWS_ACCESS_KEY_ID: "{{ aws_access_key_id }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_access_key }}" + shell: "aws s3 cp s3://{{ s3_bucket_name }}/{{ s3_path }} {{ local_file_or_folder_path }}" + async: 3600 + poll: 10 diff --git a/ansible/roles/aws-cloud-storage/tasks/main.yml b/ansible/roles/aws-cloud-storage/tasks/main.yml new file mode 100644 index 0000000000..62f204a9d2 --- /dev/null +++ b/ansible/roles/aws-cloud-storage/tasks/main.yml @@ -0,0 +1,18 @@ +--- +- name: delete files from aws S3 bucket + include: delete.yml + +- name: delete folders from aws S3 bucket recursively + include: delete-folder.yml + + +- name: download file from S3 + include: download.yml + +- name: upload files from a local to aws S3 + include: upload.yml + +- name: upload files and folder from local directory to aws S3 + include: upload-folder.yml + + diff --git a/ansible/roles/aws-cloud-storage/tasks/upload-folder.yml b/ansible/roles/aws-cloud-storage/tasks/upload-folder.yml new file mode 100644 index 0000000000..3e03b068b7 --- /dev/null +++ b/ansible/roles/aws-cloud-storage/tasks/upload-folder.yml @@ -0,0 +1,9 @@ +--- +- name: upload folder to s3 + environment: + AWS_DEFAULT_REGION: "{{ aws_default_region }}" + AWS_ACCESS_KEY_ID: "{{ aws_access_key_id }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_access_key }}" + shell: "aws s3 cp {{ local_file_or_folder_path }} s3://{{ s3_bucket_name }}/{{ s3_path }} --recursive" + async: 3600 + poll: 10 diff --git a/ansible/roles/aws-cloud-storage/tasks/upload.yml b/ansible/roles/aws-cloud-storage/tasks/upload.yml new file mode 100644 index 0000000000..af8de990e2 --- /dev/null +++ b/ansible/roles/aws-cloud-storage/tasks/upload.yml @@ -0,0 +1,9 @@ +--- +- name: upload files to s3 + environment: + AWS_DEFAULT_REGION: "{{ aws_default_region }}" + AWS_ACCESS_KEY_ID: "{{ aws_access_key_id }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_access_key }}" + shell: "aws s3 cp {{ local_file_or_folder_path }} s3://{{ s3_bucket_name }}/{{ s3_path }}" + async: 3600 + poll: 10 diff --git a/ansible/roles/cassandra-backup/tasks/main.yml b/ansible/roles/cassandra-backup/tasks/main.yml index fc662bcea5..507aeb190b 100755 --- a/ansible/roles/cassandra-backup/tasks/main.yml +++ b/ansible/roles/cassandra-backup/tasks/main.yml @@ -45,6 +45,19 @@ storage_account_sas_token: "{{ azure_management_storage_account_sas }}" when: cloud_service_provider == "azure" +- name: upload backup to S3 + include_role: + name: aws-cloud-storage + tasks_from: upload-folder.yml + vars: + local_file_or_folder_path: "/data/cassandra/backup/{{ cassandra_backup_folder_name }}" + s3_bucket_name: "{{ aws_management_s3_bucket_name }}" + s3_path: "{{ cassandra_backup_storage }}" + aws_default_region: "{{ aws_region }}" + aws_access_key_id: "{{ aws_management_bucket_access_key }}" + aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" + when: cloud_service_provider == "aws" + - name: upload file to gcloud storage include_role: name: gcp-cloud-storage diff --git a/ansible/roles/cassandra-restore/tasks/main.yml b/ansible/roles/cassandra-restore/tasks/main.yml index 3b2fc3ae9b..8a47ab7089 100755 --- a/ansible/roles/cassandra-restore/tasks/main.yml +++ b/ansible/roles/cassandra-restore/tasks/main.yml @@ -18,6 +18,20 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: download a file from aws s3 + become: true + include_role: + name: aws-cloud-storage + tasks_from: download.yml + vars: + s3_bucket_name: "{{ aws_management_s3_bucket_name }}" + aws_access_key_id: "{{ aws_management_bucket_access_key }}" + aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" + aws_default_region: "{{ aws_region }}" + local_file_or_folder_path: "{{ cassandra_restore_gzip_file_path }}" + s3_path: "{{ cassandra_backup_storage }}/{{ cassandra_restore_gzip_file_name }}" + when: cloud_service_provider == "aws" + - name: download file from gcloud storage include_role: name: gcp-cloud-storage diff --git a/ansible/roles/cert-templates/tasks/main.yml b/ansible/roles/cert-templates/tasks/main.yml index acecc4d6f4..78f1f769b3 100644 --- a/ansible/roles/cert-templates/tasks/main.yml +++ b/ansible/roles/cert-templates/tasks/main.yml @@ -44,6 +44,19 @@ storage_account_key: "{{ azure_private_storage_account_key }}" when: cloud_service_provider == "azure" +- name: upload batch of files to aws s3 + include_role: + name: aws-cloud-storage + tasks_from: upload-folder.yml + vars: + s3_bucket_name: "{{ aws_private_s3_bucket_name }}" + aws_access_key_id: "{{ aws_private_bucket_access_key }}" + aws_secret_access_key: "{{ aws_private_bucket_secret_access_key }}" + aws_default_region: "{{ aws_region }}" + local_file_or_folder_path: "{{ cert_location }}/cert-templates/certUtilScripts/out" + s3_path: "{{ cert_service_storage }}" + when: cloud_service_provider == "aws" + - name: upload batch of files to gcloud storage include_role: name: gcp-cloud-storage diff --git a/ansible/roles/desktop-deploy/tasks/main.yml b/ansible/roles/desktop-deploy/tasks/main.yml index 4ce4da3fb6..09c41300ef 100644 --- a/ansible/roles/desktop-deploy/tasks/main.yml +++ b/ansible/roles/desktop-deploy/tasks/main.yml @@ -75,6 +75,34 @@ local_file_or_folder_path: "{{ offline_repo_location }}/desktop_uploader_assets/{{ time }}/" when: cloud_service_provider == "azure" +######################## AWS tasks ################################## + +- name: this block consists of tasks related to aws s3 + block: + - name: set common aws variables + set_fact: + s3_bucket_name: "{{ aws_public_s3_bucket_name }}" + aws_access_key_id: "{{ aws_public_bucket_access_key }}" + aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" + aws_default_region: "{{ aws_region }}" + + - name: upload batch of files to aws s3 + include_role: + name: aws-cloud-storage + tasks_from: upload-folder.yml + vars: + s3_path: "{{ offline_installer_storage }}" + local_file_or_folder_path: "{{ offline_repo_location }}/desktop_uploader_assets" + + - name: upload batch of files to aws s3 + include_role: + name: aws-cloud-storage + tasks_from: upload-folder.yml + vars: + s3_path: "{{ offline_installer_storage }}/latest" + local_file_or_folder_path: "{{ offline_repo_location }}/desktop_uploader_assets/{{ time }}/" + when: cloud_service_provider == "aws" + - name: this block consists of tasks related to gcloud storage block: - name: set common gcloud variables diff --git a/ansible/roles/grafana-backup/tasks/main.yml b/ansible/roles/grafana-backup/tasks/main.yml index 0f0a44a2b2..2c8520030c 100644 --- a/ansible/roles/grafana-backup/tasks/main.yml +++ b/ansible/roles/grafana-backup/tasks/main.yml @@ -32,6 +32,19 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: upload file to aws s3 + include_role: + name: aws-cloud-storage + tasks_from: upload.yml + vars: + s3_bucket_name: "{{ aws_management_s3_bucket_name }}" + aws_access_key_id: "{{ aws_management_bucket_access_key }}" + aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" + aws_default_region: "{{ aws_region }}" + local_file_or_folder_path: "{{ grafana_backup_gzip_file_path }}" + s3_path: "{{ grafana_backup_storage }}/{{ grafana_backup_gzip_file_name }}" + when: cloud_service_provider == "aws" + - name: upload file to gcloud storage include_role: name: gcp-cloud-storage diff --git a/ansible/roles/jenkins-backup-upload/tasks/main.yml b/ansible/roles/jenkins-backup-upload/tasks/main.yml index 32be77b7a7..a94e57fe4a 100644 --- a/ansible/roles/jenkins-backup-upload/tasks/main.yml +++ b/ansible/roles/jenkins-backup-upload/tasks/main.yml @@ -25,6 +25,19 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: upload file to aws s3 + include_role: + name: aws-cloud-storage + tasks_from: upload.yml + vars: + s3_bucket_name: "{{ aws_management_s3_bucket_name }}" + aws_access_key_id: "{{ aws_management_bucket_access_key }}" + aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" + aws_default_region: "{{ aws_region }}" + local_file_or_folder_path: "/tmp/{{ LATEST_BACKUP_DIR.stdout }}.zip" + s3_path: "{{ jenkins_backup_storage }}/{{ LATEST_BACKUP_DIR.stdout }}.zip" + when: cloud_service_provider == "aws" + - name: upload file to gcloud storage include_role: name: gcp-cloud-storage diff --git a/ansible/roles/mongodb-backup/tasks/main.yml b/ansible/roles/mongodb-backup/tasks/main.yml index 4ae40ecd2b..0762f2754f 100644 --- a/ansible/roles/mongodb-backup/tasks/main.yml +++ b/ansible/roles/mongodb-backup/tasks/main.yml @@ -27,6 +27,19 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: upload file to aws s3 + include_role: + name: aws-cloud-storage + tasks_from: upload.yml + vars: + s3_bucket_name: "{{ aws_public_s3_bucket_name }}" + aws_access_key_id: "{{ aws_public_bucket_access_key }}" + aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" + aws_default_region: "{{ aws_region }}" + local_file_or_folder_path: "{{ mongo_backup_file_path }}.tar.gz" + s3_path: "{{ mongo_backup_storage }}/{{ mongo_backup_file_name }}.tar.gz" + when: cloud_service_provider == "aws" + - name: upload file to gcloud storage include_role: name: gcp-cloud-storage diff --git a/ansible/roles/postgres-managed-service-backup/tasks/main.yml b/ansible/roles/postgres-managed-service-backup/tasks/main.yml index 686f4c42f6..ea206146b3 100644 --- a/ansible/roles/postgres-managed-service-backup/tasks/main.yml +++ b/ansible/roles/postgres-managed-service-backup/tasks/main.yml @@ -54,6 +54,19 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: upload file to aws s3 + include_role: + name: aws-cloud-storage + tasks_from: upload.yml + vars: + s3_bucket_name: "{{ aws_management_s3_bucket_name }}" + aws_access_key_id: "{{ aws_management_bucket_access_key }}" + aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" + aws_default_region: "{{ aws_region }}" + local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" + s3_path: "{{ postgresql_backup_storage }}/{{ postgresql_backup_gzip_file_name }}.zip" + when: cloud_service_provider == "aws" + - name: upload file to gcloud storage include_role: name: gcp-cloud-storage diff --git a/ansible/roles/postgres-managed-service-restore/tasks/main.yml b/ansible/roles/postgres-managed-service-restore/tasks/main.yml index 7df51e26b4..0299ff3f73 100644 --- a/ansible/roles/postgres-managed-service-restore/tasks/main.yml +++ b/ansible/roles/postgres-managed-service-restore/tasks/main.yml @@ -21,6 +21,19 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: download a file from aws s3 + include_role: + name: aws-cloud-storage + tasks_from: download.yml + vars: + s3_bucket_name: "{{ aws_management_s3_bucket_name }}" + aws_access_key_id: "{{ aws_management_bucket_access_key }}" + aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" + aws_default_region: "{{ aws_region }}" + local_file_or_folder_path: "{{ postgres_restore_dir }}/{{ postgres_backup_filepath }}" + s3_path: "{{ postgres_backup_storage }}/{{ postgres_backup_filename }}" + when: cloud_service_provider == "aws" + - name: download file from gcloud storage include_role: name: gcp-cloud-storage diff --git a/ansible/roles/postgresql-backup/tasks/main.yml b/ansible/roles/postgresql-backup/tasks/main.yml index 0704d4847f..65116bede0 100644 --- a/ansible/roles/postgresql-backup/tasks/main.yml +++ b/ansible/roles/postgresql-backup/tasks/main.yml @@ -26,6 +26,19 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: upload file to aws s3 + include_role: + name: aws-cloud-storage + tasks_from: upload.yml + vars: + s3_bucket_name: "{{ aws_management_s3_bucket_name }}" + aws_access_key_id: "{{ aws_management_bucket_access_key }}" + aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" + aws_default_region: "{{ aws_region }}" + local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" + s3_path: "{{ postgresql_backup_storage }}/{{ postgresql_backup_gzip_file_name }}" + when: cloud_service_provider == "aws" + - name: upload file to gcloud storage include_role: name: gcp-cloud-storage diff --git a/ansible/roles/postgresql-restore/tasks/main.yml b/ansible/roles/postgresql-restore/tasks/main.yml index ec6a40494d..877e178987 100644 --- a/ansible/roles/postgresql-restore/tasks/main.yml +++ b/ansible/roles/postgresql-restore/tasks/main.yml @@ -16,6 +16,19 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: download a file from aws s3 + include_role: + name: aws-cloud-storage + tasks_from: download.yml + vars: + s3_bucket_name: "{{ aws_management_s3_bucket_name }}" + aws_access_key_id: "{{ aws_management_bucket_access_key }}" + aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" + aws_default_region: "{{ aws_region }}" + local_file_or_folder_path: "{{ postgresql_restore_gzip_file_path }}" + s3_path: "{{ postgres_backup_storage }}/{{ postgresql_restore_gzip_file_name }}" + when: cloud_service_provider == "aws" + - name: download file from gcloud storage include_role: name: gcp-cloud-storage diff --git a/ansible/roles/prometheus-backup-v2/tasks/main.yml b/ansible/roles/prometheus-backup-v2/tasks/main.yml index 0cafacb627..3831080dbc 100644 --- a/ansible/roles/prometheus-backup-v2/tasks/main.yml +++ b/ansible/roles/prometheus-backup-v2/tasks/main.yml @@ -29,6 +29,19 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: upload file to aws s3 + include_role: + name: aws-cloud-storage + tasks_from: upload.yml + vars: + s3_bucket_name: "{{ aws_management_s3_bucket_name }}" + aws_access_key_id: "{{ aws_management_bucket_access_key }}" + aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" + aws_default_region: "{{ aws_region }}" + local_file_or_folder_path: "/tmp/{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" + s3_path: "{{ prometheus_backup_storage }}/{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" + when: cloud_service_provider == "aws" + - name: upload file to gcloud storage include_role: name: gcp-cloud-storage diff --git a/ansible/roles/prometheus-backup/tasks/main.yml b/ansible/roles/prometheus-backup/tasks/main.yml index 32cffa6e5c..55a51287ae 100644 --- a/ansible/roles/prometheus-backup/tasks/main.yml +++ b/ansible/roles/prometheus-backup/tasks/main.yml @@ -38,6 +38,19 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: upload file to aws s3 + include_role: + name: aws-cloud-storage + tasks_from: upload.yml + vars: + s3_bucket_name: "{{ aws_management_s3_bucket_name }}" + aws_access_key_id: "{{ aws_management_bucket_access_key }}" + aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" + aws_default_region: "{{ aws_region }}" + local_file_or_folder_path: "{{ prometheus_backup_gzip_file_path }}" + s3_path: "{{ prometheus_backup_storage }}/{{ prometheus_backup_gzip_file_name }}" + when: cloud_service_provider == "aws" + - name: upload file to gcloud storage include_role: name: gcp-cloud-storage diff --git a/ansible/roles/prometheus-restore/tasks/main.yml b/ansible/roles/prometheus-restore/tasks/main.yml index 843ebe4598..2232770fdd 100644 --- a/ansible/roles/prometheus-restore/tasks/main.yml +++ b/ansible/roles/prometheus-restore/tasks/main.yml @@ -14,6 +14,19 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: download a file from aws s3 + include_role: + name: aws-cloud-storage + tasks_from: download.yml + vars: + s3_bucket_name: "{{ aws_management_s3_bucket_name }}" + aws_access_key_id: "{{ aws_management_bucket_access_key }}" + aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" + aws_default_region: "{{ aws_region }}" + local_file_or_folder_path: "{{ prometheus_backup_dir }}/{{ prometheus_backup_filepath }}" + s3_path: "{{ prometheus_backup_storage }}/{{ prometheus_backup_filename }}" + when: cloud_service_provider == "aws" + - name: download file from gcloud storage include_role: name: gcp-cloud-storage diff --git a/ansible/roles/redis-backup/tasks/main.yml b/ansible/roles/redis-backup/tasks/main.yml index 51f7ab63ff..5359a362c8 100644 --- a/ansible/roles/redis-backup/tasks/main.yml +++ b/ansible/roles/redis-backup/tasks/main.yml @@ -26,6 +26,19 @@ storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" +- name: upload file to aws s3 + include_role: + name: aws-cloud-storage + tasks_from: upload.yml + vars: + s3_bucket_name: "{{ aws_management_s3_bucket_name }}" + aws_access_key_id: "{{ aws_management_bucket_access_key }}" + aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" + aws_default_region: "{{ aws_region }}" + local_file_or_folder_path: "{{ redis_backup_file_path }}" + s3_path: "{{ nodebb_redis_backup_storage }}/{{ redis_backup_file_name }}" + when: cloud_service_provider == "aws" + - name: upload file to gcloud storage include_role: name: gcp-cloud-storage diff --git a/ansible/uploadFAQs.yml b/ansible/uploadFAQs.yml index 52923e1bf4..b37398b874 100644 --- a/ansible/uploadFAQs.yml +++ b/ansible/uploadFAQs.yml @@ -29,6 +29,21 @@ with_items: - "{{ source_folder.split(',') }}" when: cloud_service_provider == "azure" + + - name: upload batch of files to s3 + include_role: + name: aws-cloud-storage + tasks_from: upload-folder.yml + vars: + s3_bucket_name: "{{ aws_public_s3_bucket_name }}" + aws_default_region: "{{ aws_region }}" + aws_access_key_id: "{{ aws_public_bucket_access_key }}" + aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" + local_file_or_folder_path: "{{ playbook_dir }}/../utils/{{ item }}" + s3_path: "{{ upload_storage }}" + with_items: + - "{{ source_folder.split(',') }}" + when: cloud_service_provider == "aws" - name: upload batch of files to gcloud storage include_role: diff --git a/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/Bootstrap/config.xml b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/Bootstrap/config.xml index 20d7006b52..b95bca2645 100644 --- a/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/Bootstrap/config.xml +++ b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/Bootstrap/config.xml @@ -94,6 +94,7 @@ return """<b>This parameter is not used</b>""" true diff --git a/pipelines/deploy/CEPlugins/Jenkinsfile b/pipelines/deploy/CEPlugins/Jenkinsfile index fea1e80819..e1baf9ca14 100644 --- a/pipelines/deploy/CEPlugins/Jenkinsfile +++ b/pipelines/deploy/CEPlugins/Jenkinsfile @@ -35,7 +35,7 @@ node() { mv content-plugins ansible """ ansiblePlaybook = "${currentWs}/ansible/deploy-plugins.yml" - ansibleExtraArgs = "--tags plugins --extra-vars \" source_file=${currentWs}/ansible/content-plugins az_file_path=${currentWs}/ansible/content-plugins/az_copy.sh\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" + ansibleExtraArgs = "--tags plugins --extra-vars \" source_file=${currentWs}/ansible/content-plugins az_file_path=${currentWs}/ansible/content-plugins/az_copy.sh\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('ansiblePlaybook', ansiblePlaybook) values.put('ansibleExtraArgs', ansibleExtraArgs) println values diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index 286c957102..1984bcd2b3 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -34,7 +34,11 @@ azure_management_storage_account_name: "{{ sunbird_management_storage_account_na azure_artifact_storage_account_name: "{{ sunbird_artifact_storage_account_name }}" # Define the below if you are using AWS Cloud -aws_management_bucket_name: "" +aws_region: "" +aws_management_s3_bucket_name: "" +aws_artifact_s3_bucket_name: "" +aws_public_s3_bucket_name: "" +aws_private_s3_bucket_name: "" # Define the below if you are using Google Cloud gcloud_private_bucket_name: "" @@ -48,7 +52,7 @@ gcloud_private_bucket_projectId: "" # GCP # cloud_storage_url: https://storage.cloud.google.com/{{ gcloud_public_bucket_name }} # AWS -# cloud_storage_url: # Geetha to fill this url based on AWS role vars +# cloud_storage_url: "https://{{aws_public_s3_bucket_name}}.s3.{{aws_region}}.amazonaws.com" # Azure cloud_storage_url: "https://{{ sunbird_public_storage_account_name }}.blob.core.windows.net" diff --git a/private_repo/ansible/inventory/dev/Core/secrets.yml b/private_repo/ansible/inventory/dev/Core/secrets.yml index bbb1a526b1..e8e48bf801 100644 --- a/private_repo/ansible/inventory/dev/Core/secrets.yml +++ b/private_repo/ansible/inventory/dev/Core/secrets.yml @@ -30,8 +30,15 @@ azure_public_storage_account_sas: "{{ sunbird_public_storage_account_sas }}" azure_management_storage_account_sas: "{{ sunbird_management_storage_account_sas }}" # Define the below if you are using AWS Cloud -aws_management_bucket_user_access_key: "" -aws_management_bucket_user_secret_key: "" +aws_management_bucket_access_key: "" +aws_artifact_bucket_access_key: "" +aws_public_bucket_access_key: "" +aws_private_bucket_access_key: "" + +aws_management_bucket_secret_access_key: "" +aws_artifact_bucket_secret_access_key: "" +aws_public_bucket_secret_access_key: "" +aws_private_bucket_secret_access_key: "" # Define the below if you are using Google Cloud gcp_storage_service_account_name: "" From ab8a0b1f1e2e0e7d5f27c8eff9b038f73213f230 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Thu, 17 Nov 2022 01:06:12 +0530 Subject: [PATCH 079/616] fix: remove polling to save time --- ansible/deploy-plugins.yml | 19 +++++++------------ .../azure-cloud-storage/defaults/main.yml | 2 +- .../tasks/blob-delete-batch-no-poll.yml | 5 +++++ .../tasks/blob-delete-batch.yml | 4 ++-- .../tasks/blob-upload-batch-no-poll.yml | 5 +++++ .../tasks/blob-upload-batch.yml | 6 +++--- pipelines/deploy/CEPlugins/Jenkinsfile | 2 +- 7 files changed, 24 insertions(+), 19 deletions(-) create mode 100644 ansible/roles/azure-cloud-storage/tasks/blob-delete-batch-no-poll.yml create mode 100644 ansible/roles/azure-cloud-storage/tasks/blob-upload-batch-no-poll.yml diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index 357baef98e..508e32e1de 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -76,20 +76,15 @@ - name: delete batch of files from azure storage include_role: name: azure-cloud-storage - tasks_from: blob-delete-batch.yml + tasks_from: "{{ item[0] }}" vars: - blob_delete_pattern: "content-plugins/{{ item }}/*" - with_lines: cat {{ plugin_list_to_delete_and_upload }} - - - name: upload batch of files to azure storage - include_role: - name: azure-cloud-storage - tasks_from: blob-upload-batch.yml - vars: - blob_container_folder_path: "/content-plugins/{{ item }}" - local_file_or_folder_path: "{{ source_folder }}/{{ item }}" + blob_delete_pattern: "content-plugins/{{ item[1] }}/*" + blob_container_folder_path: "/content-plugins/{{ item[1] }}" + local_file_or_folder_path: "{{ source_folder }}/{{ item[1] }}" create_container: false - with_lines: cat {{ plugin_list_to_delete_and_upload }} + with_nested: + - ['blob-delete-batch-no-poll.yml', 'blob-upload-batch-no-poll.yml'] + - "{{ lookup('file', plugins_to_delete_and_upload).split('\n') }}" tags: - plugins when: cloud_service_provider == "azure" diff --git a/ansible/roles/azure-cloud-storage/defaults/main.yml b/ansible/roles/azure-cloud-storage/defaults/main.yml index 8f6673d3c9..0f4b72d96d 100644 --- a/ansible/roles/azure-cloud-storage/defaults/main.yml +++ b/ansible/roles/azure-cloud-storage/defaults/main.yml @@ -69,4 +69,4 @@ container_public_access: "" # Create the container by default before running the specific azure tasks # If we would like to skip container creation (in case of a looped execution), you can set this value to false # in order to skip the task for every iteration -create_container: true \ No newline at end of file +create_container: True diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch-no-poll.yml b/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch-no-poll.yml new file mode 100644 index 0000000000..152e3a49ad --- /dev/null +++ b/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch-no-poll.yml @@ -0,0 +1,5 @@ +--- +- name: delete files and folders - deleting {{ blob_container_name }}/{{ blob_delete_pattern }} + shell: "az storage blob delete-batch --source {{ blob_container_name }} --pattern '{{ blob_delete_pattern }}' --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" + async: 1800 + poll: 0 \ No newline at end of file diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml b/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml index e642a6f24f..152e3a49ad 100644 --- a/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml +++ b/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml @@ -1,5 +1,5 @@ --- - name: delete files and folders - deleting {{ blob_container_name }}/{{ blob_delete_pattern }} shell: "az storage blob delete-batch --source {{ blob_container_name }} --pattern '{{ blob_delete_pattern }}' --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" - async: 3600 - poll: 10 \ No newline at end of file + async: 1800 + poll: 0 \ No newline at end of file diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch-no-poll.yml b/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch-no-poll.yml new file mode 100644 index 0000000000..ff00854851 --- /dev/null +++ b/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch-no-poll.yml @@ -0,0 +1,5 @@ +--- +- name: upload files and folders - uploading {{ blob_container_name }}{{ blob_container_folder_path }} + shell: "az storage blob upload-batch --destination {{ blob_container_name }}{{ blob_container_folder_path }} --source {{ local_file_or_folder_path }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" + async: 1800 + poll: 0 \ No newline at end of file diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml b/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml index 8f10576cb5..59d1098fc6 100644 --- a/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml +++ b/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml @@ -3,9 +3,9 @@ include_role: name: azure-cloud-storage tasks_from: container-create.yml - when: create_container is true + when: create_container is True - name: upload files and folders - uploading {{ blob_container_name }}{{ blob_container_folder_path }} shell: "az storage blob upload-batch --destination {{ blob_container_name }}{{ blob_container_folder_path }} --source {{ local_file_or_folder_path }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" - async: 3600 - poll: 10 \ No newline at end of file + async: 1800 + poll: 0 \ No newline at end of file diff --git a/pipelines/deploy/CEPlugins/Jenkinsfile b/pipelines/deploy/CEPlugins/Jenkinsfile index 078069bbe0..865d71a34c 100644 --- a/pipelines/deploy/CEPlugins/Jenkinsfile +++ b/pipelines/deploy/CEPlugins/Jenkinsfile @@ -34,7 +34,7 @@ node() { mv content-plugins ansible """ ansiblePlaybook = "${currentWs}/ansible/deploy-plugins.yml" - ansibleExtraArgs = "--tags plugins --extra-vars \" source_folder=${currentWs}/ansible/content-plugins plugin_list_to_delete_and_upload=${currentWs}/ansible/content-plugins/plugins_to_delete_and_upload.txt\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" + ansibleExtraArgs = "--tags plugins --extra-vars \" source_folder=${currentWs}/ansible/content-plugins plugins_to_delete_and_upload=${currentWs}/ansible/content-plugins/plugins_to_delete_and_upload.txt\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('ansiblePlaybook', ansiblePlaybook) values.put('ansibleExtraArgs', ansibleExtraArgs) println values From 949f1da2fa3bcc996c80d5b314f1ae6891c03df6 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Thu, 17 Nov 2022 01:18:41 +0530 Subject: [PATCH 080/616] fix: updated comments Signed-off-by: Keshav Prasad --- ansible/deploy-plugins.yml | 1 - ansible/roles/azure-cloud-storage/defaults/main.yml | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index 508e32e1de..2fe881dc93 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -81,7 +81,6 @@ blob_delete_pattern: "content-plugins/{{ item[1] }}/*" blob_container_folder_path: "/content-plugins/{{ item[1] }}" local_file_or_folder_path: "{{ source_folder }}/{{ item[1] }}" - create_container: false with_nested: - ['blob-delete-batch-no-poll.yml', 'blob-upload-batch-no-poll.yml'] - "{{ lookup('file', plugins_to_delete_and_upload).split('\n') }}" diff --git a/ansible/roles/azure-cloud-storage/defaults/main.yml b/ansible/roles/azure-cloud-storage/defaults/main.yml index 0f4b72d96d..824d5af18a 100644 --- a/ansible/roles/azure-cloud-storage/defaults/main.yml +++ b/ansible/roles/azure-cloud-storage/defaults/main.yml @@ -66,7 +66,7 @@ blob_container_folder_path: "" # You will need to change the access level from Azure portal or using az storage container set-permission command container_public_access: "" -# Create the container by default before running the specific azure tasks -# If we would like to skip container creation (in case of a looped execution), you can set this value to false -# in order to skip the task for every iteration +# Creates the container by default before running the specific azure blob tasks +# If we would like to skip container creation (in case of a looped execution), +# you can set this value to False in order to skip the contatiner creation task for every iteration create_container: True From 931cd0509ba6c68f303f2c0c3eb9b622fdb78206 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Thu, 17 Nov 2022 01:20:09 +0530 Subject: [PATCH 081/616] fix: added a var to control container creation Signed-off-by: Keshav Prasad --- ansible/roles/azure-cloud-storage/defaults/main.yml | 2 +- ansible/roles/azure-cloud-storage/tasks/blob-upload.yml | 1 + ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/ansible/roles/azure-cloud-storage/defaults/main.yml b/ansible/roles/azure-cloud-storage/defaults/main.yml index 824d5af18a..b5266cb76a 100644 --- a/ansible/roles/azure-cloud-storage/defaults/main.yml +++ b/ansible/roles/azure-cloud-storage/defaults/main.yml @@ -67,6 +67,6 @@ blob_container_folder_path: "" container_public_access: "" # Creates the container by default before running the specific azure blob tasks -# If we would like to skip container creation (in case of a looped execution), +# If you would like to skip container creation (in case of a looped execution), # you can set this value to False in order to skip the contatiner creation task for every iteration create_container: True diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-upload.yml b/ansible/roles/azure-cloud-storage/tasks/blob-upload.yml index 4b493ffb73..36423dcfc6 100644 --- a/ansible/roles/azure-cloud-storage/tasks/blob-upload.yml +++ b/ansible/roles/azure-cloud-storage/tasks/blob-upload.yml @@ -3,6 +3,7 @@ include_role: name: azure-cloud-storage tasks_from: container-create.yml + when: create_container is True - name: upload file to azure storage container shell: "az storage blob upload --container-name {{ blob_container_name }} --file {{ local_file_or_folder_path }} --name {{ blob_file_name }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" diff --git a/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml b/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml index 99ab3c2bf8..0726b48f00 100644 --- a/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml +++ b/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml @@ -3,6 +3,7 @@ include_role: name: azure-cloud-storage tasks_from: container-create.yml + when: create_container is True - name: upload files and folders to azure storage using azcopy shell: "azcopy copy {{ local_file_or_folder_path }} 'https://{{ storage_account_name }}.blob.core.windows.net/{{ blob_container_name }}{{ blob_container_folder_path }}{{ storage_account_sas_token }}' --recursive" From 2d9eb95c6af1ec44b7d9869f7196c91d3375a2c6 Mon Sep 17 00:00:00 2001 From: VISHNUDAS <95604247+VISHNUDAS-tunerlabs@users.noreply.github.com> Date: Thu, 17 Nov 2022 10:18:16 +0530 Subject: [PATCH 082/616] env changes for project certificate story (#3599) --- ansible/roles/stack-sunbird/templates/ml-core-service.env | 8 +------- .../roles/stack-sunbird/templates/ml-projects-service.env | 3 +++ 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/ansible/roles/stack-sunbird/templates/ml-core-service.env b/ansible/roles/stack-sunbird/templates/ml-core-service.env index 7b1da9c931..e3db337464 100755 --- a/ansible/roles/stack-sunbird/templates/ml-core-service.env +++ b/ansible/roles/stack-sunbird/templates/ml-core-service.env @@ -81,10 +81,4 @@ ELASTICSEARCH_ENTITIES_INDEX={{ml_core_elasticsearch_user_extension_index_type | USER_SERVICE_URL={{ml_core_user_service_URL | default("http://learner-service:9000")}} ## portal url of env -APP_PORTAL_BASE_URL={{ proto }}://{{ domain_name }} - -# Project certificate enable or disable flag E.g. ON/OFF -PROJECT_CERTIFICATE_ON_OFF={{ml_core_project_certificate_on_off | default("ON")}} - -# certificate issuer KID value -CERTIFICATE_ISSUER_KID={{certificate_issuer_kid | default("")}} \ No newline at end of file +APP_PORTAL_BASE_URL={{ proto }}://{{ domain_name }} \ No newline at end of file diff --git a/ansible/roles/stack-sunbird/templates/ml-projects-service.env b/ansible/roles/stack-sunbird/templates/ml-projects-service.env index eeca6c4d70..16e8340a1c 100644 --- a/ansible/roles/stack-sunbird/templates/ml-projects-service.env +++ b/ansible/roles/stack-sunbird/templates/ml-projects-service.env @@ -39,3 +39,6 @@ PROJECT_SUBMISSION_TOPIC={{ml_project_submission_topic | default (env_name+".ml. # Base url of the sunbird enviornment USER_SERVICE_URL={{ml_project_user_service_URL | default("http://learner-service:9000")}} + +# certificate issuer KID value +CERTIFICATE_ISSUER_KID={{certificate_issuer_kid | default("")}} From 4b41e72e1da9645cb70d7893355141f7210bb3ad Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Thu, 17 Nov 2022 15:21:23 +0530 Subject: [PATCH 083/616] fix: remove unnecessary become Signed-off-by: Keshav Prasad --- ansible/deploy-plugins.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index 0d05b069eb..63dba9cd96 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -1,5 +1,4 @@ - hosts: local - become: yes gather_facts: no vars_files: - "{{inventory_dir}}/secrets.yml" @@ -201,4 +200,4 @@ AWS_SECRET_ACCESS_KEY: "{{ aws_secret_access_key }}" tags: - plugins - when: cloud_service_provider == "aws" \ No newline at end of file + when: cloud_service_provider == "aws" From 25c6f71fad5ba01e1c2374cc9a23b7f53fdf77ac Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Thu, 17 Nov 2022 17:14:10 +0530 Subject: [PATCH 084/616] fix: poll incorrectly set to 0 Signed-off-by: Keshav Prasad --- ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml | 4 ++-- ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml b/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml index 152e3a49ad..e642a6f24f 100644 --- a/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml +++ b/ansible/roles/azure-cloud-storage/tasks/blob-delete-batch.yml @@ -1,5 +1,5 @@ --- - name: delete files and folders - deleting {{ blob_container_name }}/{{ blob_delete_pattern }} shell: "az storage blob delete-batch --source {{ blob_container_name }} --pattern '{{ blob_delete_pattern }}' --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" - async: 1800 - poll: 0 \ No newline at end of file + async: 3600 + poll: 10 \ No newline at end of file diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml b/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml index 59d1098fc6..82f806a803 100644 --- a/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml +++ b/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml @@ -7,5 +7,5 @@ - name: upload files and folders - uploading {{ blob_container_name }}{{ blob_container_folder_path }} shell: "az storage blob upload-batch --destination {{ blob_container_name }}{{ blob_container_folder_path }} --source {{ local_file_or_folder_path }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" - async: 1800 - poll: 0 \ No newline at end of file + async: 3600 + poll: 10 \ No newline at end of file From 3dc90f9eb9ff25c8b919b81d806c55ec328307a5 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Thu, 17 Nov 2022 19:24:20 +0530 Subject: [PATCH 085/616] fix: renamed task (#3601) --- ansible/deploy-plugins.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index 63dba9cd96..8da2bd445e 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -72,7 +72,7 @@ - preview - block: - - name: delete batch of files from azure storage + - name: delete and re-upload plugins include_role: name: azure-cloud-storage tasks_from: "{{ item[0] }}" @@ -200,4 +200,4 @@ AWS_SECRET_ACCESS_KEY: "{{ aws_secret_access_key }}" tags: - plugins - when: cloud_service_provider == "aws" + when: cloud_service_provider == "aws" \ No newline at end of file From bdbea1be4ef6c30d61ab1406669a5a3cd0573aef Mon Sep 17 00:00:00 2001 From: VISHNUDAS <95604247+VISHNUDAS-tunerlabs@users.noreply.github.com> Date: Mon, 21 Nov 2022 15:47:09 +0530 Subject: [PATCH 086/616] Certificate env changes- RC- internal call used instead env variable (#3603) From daf4b7c37236bae6f813ccf26acaa58a80662bc4 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Mon, 21 Nov 2022 17:27:44 +0530 Subject: [PATCH 087/616] Update config.j2 --- .../ml-analytics-service/templates/config.j2 | 144 ++++++++++-------- 1 file changed, 83 insertions(+), 61 deletions(-) diff --git a/ansible/roles/ml-analytics-service/templates/config.j2 b/ansible/roles/ml-analytics-service/templates/config.j2 index cef1739e4e..f43f08191a 100644 --- a/ansible/roles/ml-analytics-service/templates/config.j2 +++ b/ansible/roles/ml-analytics-service/templates/config.j2 @@ -4,7 +4,8 @@ survey_app = {{ ml_analytics_survey_app_name }} integrated_app = {{ ml_analytics_integrated_app_name }} -integrated_portal = {{ ml_analytics_integrated_portal }} +integrated_portal = {{ ml_analytics_integrated_portal_name }} + [API_HEADERS] @@ -14,6 +15,7 @@ authorization = {{ ml_analytics_api_authorization_key }} internal_access_token = {{ ml_analytics_api_access_token }} + [ML_SURVEY_SERVICE_URL] url = {{ ml_analytics_survey_service }} @@ -24,24 +26,15 @@ user_profile_end_point = assessment/api/v1/userExtension/getProfile/ evidence_base_url = {{ ml_analytics_evidence_base_url }} -[MONGO] - -# -------------- -# Mongo url -#--------------- -mongo_url = mongodb://{{ ml_analytics_mongodb_url }} +[MONGO] -# ----------------------- -# Mongo database name -# ----------------------- +url = mongodb://{{ ml_analytics_mongodb_url }} database_name = {{ ml_analytics_mongo_db_name }} -# ------------------- -# Mongo Collections -# ------------------- +# ------ Mongo Collections ------- # observation_sub_collection = {{ ml_analytics_mongo_observation_submission_collection }} solutions_collection = {{ ml_analytics_mongo_solution_collection }} @@ -68,6 +61,7 @@ survey_submissions_collection = {{ ml_analytics_mongo_survey_submissions_collect survey_collection = {{ ml_analytics_mongo_survey_collection }} + [DRUID] metadata_url = http://{{ ml_analytics_druid_url }}/druid/coordinator/v1/datasources/ @@ -84,13 +78,15 @@ observation_status_injestion_spec = {{ ml_analytics_druid_observation_status_inj project_injestion_spec = {{ ml_analytics_druid_project_injestion_spec }} -ml_distinctCnt_obs_status_spec = {{ ml_analytics_druid_distinctCnt_obs_injestion_spec }} +ml_distinctCnt_obs_status_spec = {{ ml_analytics_druid_distinctCnt_obs_injestion_spec }} -ml_distinctCnt_obs_domain_spec = {{ ml_analytics_druid_distinctCnt_obs_domain_injestion_spec }} +ml_distinctCnt_obs_domain_spec = {{ ml_analytics_druid_distinctCnt_obs_domain_injestion_spec }} -ml_distinctCnt_obs_domain_criteria_spec = {{ ml_analytics_druid_distinctCnt_obs_domain_criteria_injestion_spec }} +ml_distinctCnt_obs_domain_criteria_spec = {{ ml_analytics_druid_distinctCnt_obs_domain_criteria_injestion_spec }} -ml_distinctCnt_projects_status_spec = {{ ml_analytics_druid_distinctCnt_projects_status_injestion_spec }} +ml_distinctCnt_projects_status_spec = {{ ml_analytics_druid_distinctCnt_projects_status_injestion_spec }} + +ml_distinctCnt_prglevel_projects_status_spec = {{ ml_analytics_druid_distinctCnt_prglevel_projects_status_injestion_spec }} observation_status_rollup_injestion_spec = {{ ml_analytics_druid_observation_status_rollup_injestion_spec }} @@ -98,8 +94,6 @@ project_rollup_injestion_spec = {{ ml_analytics_druid_project_rollup_injestion_s ml_survey_rollup_spec = {{ml_analytics_druid_survey_rollup_injestion_spec}} -ml_distinctCnt_prglevel_projects_status_spec = {{ ml_analytics_druid_distinctCnt_prglevel_projects_status_injestion_spec}} - survey_status_injestion_spec = {{ ml_analytics_druid_survey_status_injestion_spec }} observation_query_spec = {{ ml_analytics_druid_observation_query_spec }} @@ -110,6 +104,9 @@ survey_query_spec = {{ml_analytics_druid_survey_query_spec}} survey_injestion_spec = {{ml_analytics_druid_survey_batch_ingestion_spec}} +intervals = {{ml_analytics_druid_interval_list}} + + [KAFKA] url = {{ ml_analytics_kafka_url }} @@ -126,6 +123,7 @@ survey_raw_topic = {{ ml_analytics_kafka_survey_topic_name }} survey_druid_topic = {{ ml_analytics_kafka_survey_druid_topic_name }} + [LOGS] observation_streaming_success = {{ ml_analytics_observation_log_folder_path }}/success.log @@ -152,55 +150,54 @@ survey_streaming_success = {{ ml_analytics_survey_log_folder_path }}/success.log survey_streaming_error = {{ ml_analytics_survey_log_folder_path }}/error.log -[AZURE] +{% if ML_Cloud_Service_Provider is eq 'ORACLE' %} -account_name = {{ ml_analytics_azure_account_name }} +[ORACLE] -sas_token = {{ ml_analytics_azure_sas_token }} +endpoint_url = {{ ml_ORACLE_endpoint_url }} -container_name = {{ ml_analytics_azure_container_name }} +access_key = {{ ml_ORACLE_access_key }} -observation_blob_path = {{ ml_analytics_observation_azure_blob_path }} +secret_access_key = {{ ml_ORACLE_secret_access_key }} -projects_blob_path = {{ ml_analytics_project_azure_blob_path }} +region_name = {{ ml_ORACLE_region_name }} -observation_distinctCount_blob_path = {{ ml_analytics_obs_distinctCnt_azure_blob_path }} +bucket_name = {{ ml_ORACLE_bucket_name }} -observation_distinctCount_domain_blob_path = {{ ml_analytics_obs_distinctCnt_domain_azure_blob_path }} +{% elif ML_Cloud_Service_Provider is eq 'GCP' %} -observation_distinctCount_domain_criteria_blob_path = {{ ml_analytics_obs_distinctCnt_domain_criteria_azure_blob_path }} +[GCP] -projects_distinctCnt_blob_path = {{ ml_analytics_projects_distinctCnt_azure_blob_path }} +secret_data = {{ ml_GCP_secret_json_file }} -projects_rollup_blob_path = {{ ml_analytics_project_rollup_azure_blob_path }} +bucket_name = {{ ml_GCP_bucket_name }} -observation_rollup_blob_path = {{ ml_analytics_observation_rollup_azure_blob_path }} +{% elif ML_Cloud_Service_Provider is eq 'AWS' %} -survey_rollup_blob_path = {{ ml_analytics_survey_rollup_azure_blob_path }} +[AWS] -projects_distinctCnt_prgmlevel_blob_path = {{ml_analytics_projects_distinctCnt_prglevel_azure_blob_path}} +service_name = {{ ml_AWS_service_name }} -survey_blob_path = {{ ml_analytics_survey_azure_blob_path }} +access_key = {{ ml_AWS_access_key }} -public_account_name = {{ ml_analytics_public_azure_account_name }} +secret_access_key = {{ ml_AWS_secret_access_key }} -public_access_key = {{ ml_analytics_public_azure_access_key }} +region_name = {{ ml_AWS_region_name }} -public_container_name = {{ ml_analytics_public_azure_container_name }} +bucket_name = {{ ml_AWS_bucket_name }} -projects_program_csv = {{ ml_analytics_program_dashboard_azure_blob_path }} +{% else %} -observation_batch_ingestion_data_del = {{ ml_analytics_observation_batchupdate_azure_blob_path }} +[AZURE] -survey_batch_ingestion_data_del = {{ml_analytics_survey_batchupdate_azure_blob_path}} +account_name = {{ ml_analytics_azure_account_name }} -[REDIS] +sas_token = {{ ml_analytics_azure_sas_token }} -host = {{ ml_analytics_redis_host }} +container_name = {{ ml_analytics_azure_container_name }} -port = {{ ml_analytics_redis_port }} +account_key = {{ ml_analytics_azure_account_key }} -db_name = {{ ml_analytics_redis_db_name }} [OUTPUT_DIR] @@ -208,22 +205,22 @@ project = {{ ml_analytics_project_output_dir }} observation_status = {{ ml_analytics_observation_status_output_dir }} -observation_distinctCount_status = {{ ml_analytics_obs_distinctCnt_output_dir }} +observation_distinctCount_status = {{ ml_analytics_obs_distinctCnt_output_dir }} -observation_distinctCount_domain = {{ ml_analytics_obs_distinctCnt_domain_output_dir }} +observation_distinctCount_domain = {{ ml_analytics_obs_distinctCnt_domain_output_dir }} -observation_distinctCount_domain_criteria = {{ ml_analytics_obs_distinctCnt_domain_criteria_output_dir }} +observation_distinctCount_domain_criteria = {{ ml_analytics_obs_distinctCnt_domain_criteria_output_dir }} projects_distinctCount = {{ ml_analytics_projects_distinctCnt_output_dir }} +projects_distinctCount_prgmlevel = {{ ml_analytics_projects_distinctCnt_prglevel_output_dir }} + project_rollup = {{ ml_analytics_project_rollup_output_dir }} observation_status_rollup = {{ ml_analytics_observation_status_rollup_output_dir }} survey_rollup = {{ ml_analytics_survey_rollup_output_dir }} -projects_distinctCount_prgmlevel = {{ml_analytics_projects_distinctCnt_prglevel_output_dir}} - survey_status = {{ ml_analytics_survey_status_output_dir }} observation_sub_ids = {{ ml_analytics_observation_submission_id_filepath }} @@ -234,26 +231,51 @@ survey_sub_ids = {{ml_analytics_survey_submission_id_filepath}} survey_druid_data = {{ml_analytics_survey_batchupdate_output_dir}} -[CLOUD_STORAGE] -service_name = {{ ml_analytics_AWS_service_name }} +[SLACK] -access_key = {{ ml_analytics_AWS_access_key }} +token = {{ml_slack_token}} -secret_access_key = {{ ml_analytics_AWS_secret_access_key }} +channel = {{ml_slack_channel}} -region_name = {{ ml_analytics_AWS_region_name }} -bucket_name = {{ ml_analytics_AWS_bucket_name }} +[VAM] -[SLACK] +druid_query_url = {{ ml_druid_query_data }} -token = {{ ml_analytics_slack_token }} +program_dashboard_data = {{ ml_program_dashboard_data }} -channel = {{ ml_analytics_channel_name }} -[VAM] +[COMMON] -druid_query_url = {{ ml_druid_query_data }} +cloud_module_path = {{ ml_analytics_cloud_package_path }} -program_dashboard_data = {{ ml_program_dashboard_data }} +observation_blob_path = {{ ml_analytics_observation_azure_blob_path }} + +projects_blob_path = {{ ml_analytics_project_azure_blob_path }} + +observation_distinctCount_blob_path = {{ ml_analytics_obs_distinctCnt_azure_blob_path }} + +observation_distinctCount_domain_blob_path = {{ ml_analytics_obs_distinctCnt_domain_azure_blob_path }} + +observation_distinctCount_domain_criteria_blob_path = {{ ml_analytics_obs_distinctCnt_domain_criteria_azure_blob_path }} + +projects_distinctCnt_blob_path = {{ ml_analytics_projects_distinctCnt_azure_blob_path }} + +projects_distinctCnt_prgmlevel_blob_path = {{ ml_analytics_projects_distinctCnt_prglevel_azure_blob_path }} + +projects_rollup_blob_path = {{ ml_analytics_project_rollup_azure_blob_path }} + +observation_rollup_blob_path = {{ ml_analytics_observation_rollup_azure_blob_path }} + +survey_rollup_blob_path = {{ ml_analytics_survey_rollup_azure_blob_path }} + +survey_blob_path = {{ ml_analytics_survey_azure_blob_path }} + +projects_program_csv = {{ ml_analytics_program_dashboard_azure_blob_path }} + +observation_batch_ingestion_data_del = {{ ml_analytics_observation_batchupdate_azure_blob_path }} + +survey_batch_ingestion_data_del = {{ ml_analytics_survey_batchupdate_azure_blob_path}} + +cname_url = {{ ml_analytics_cname_url }} From 6bc03f69e454aea55aeeb8b74e5003b403d4376c Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Mon, 21 Nov 2022 17:35:57 +0530 Subject: [PATCH 088/616] Update main.yml --- ansible/roles/ml-analytics-service/tasks/main.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ansible/roles/ml-analytics-service/tasks/main.yml b/ansible/roles/ml-analytics-service/tasks/main.yml index 0998cf8188..6bf640005e 100755 --- a/ansible/roles/ml-analytics-service/tasks/main.yml +++ b/ansible/roles/ml-analytics-service/tasks/main.yml @@ -105,6 +105,10 @@ dest: "{{ config_path }}/config.ini" backup: yes +- name: Copy GCP Secrets to JSON file + copy: dest="{{config_path}}/{{ml_GCP_secret_json_file}}" content="{{ ml_GCP_Secrets | to_nice_json}}" mode=0644 owner="{{ USER }}" + when: ML_Cloud_Service_Provider == 'GCP' + - name: Templating the shell_script_config.j2 to shell_script_config template: src: "shell_script_config.j2" From 38e6fd566b7f2c968c7341088e453d1bd23b7fb2 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Mon, 21 Nov 2022 17:37:28 +0530 Subject: [PATCH 089/616] Update config.j2 --- ansible/roles/ml-analytics-service/templates/config.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/ml-analytics-service/templates/config.j2 b/ansible/roles/ml-analytics-service/templates/config.j2 index f43f08191a..ce5ff99bff 100644 --- a/ansible/roles/ml-analytics-service/templates/config.j2 +++ b/ansible/roles/ml-analytics-service/templates/config.j2 @@ -4,7 +4,7 @@ survey_app = {{ ml_analytics_survey_app_name }} integrated_app = {{ ml_analytics_integrated_app_name }} -integrated_portal = {{ ml_analytics_integrated_portal_name }} +integrated_portal = {{ ml_analytics_integrated_portal }} [API_HEADERS] From eb8eb6a37af4aa6935c82488e4b678c930742467 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Mon, 21 Nov 2022 17:56:34 +0530 Subject: [PATCH 090/616] Update main.yml --- ansible/roles/ml-analytics-service/defaults/main.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index 1e6f116cc3..6733061380 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -110,3 +110,8 @@ ml_analytics_druid_survey_batch_ingestion_spec : '{"type":"index","spec":{"ioCon ml_analytics_survey_batchupdate_azure_blob_path : "survey/batchDeletion" ml_analytics_survey_submission_id_filepath : "{{ WORKDIR }}/ml-analytics-service/survey/submissions.csv" ml_analytics_survey_batchupdate_output_dir : "{{ WORKDIR }}/source/survey/" +ml_analytics_druid_interval_list: '["1901-01-01T00:00:00+00:00/2020-01-01T00:00:00+00:00","2020-01-01T00:00:00+00:00/2020-06-01T00:00:00+00:00","2020-06-01T00:00:00+00:00/2021-01-01T00:00:00+00:00","2021-01-01T00:00:00+00:00/2021-06-01T00:00:00+00:00","2021-06-01T00:00:00+00:00/2022-01-01T00:00:00+00:00","2022-01-01T00:00:00+00:00/2022-03-01T00:00:00+00:00","2022-03-01T00:00:00+00:00/2022-06-01T00:00:00+00:00","2022-06-01T00:00:00+00:00/2022-09-01T00:00:00+00:00","2022-09-01T00:00:00+00:00/2023-01-01T00:00:00+00:00"]' +ml_analytics_azure_account_key: "{{ sunbird_private_storage_account_sas }}" +ML_Cloud_Service_Provider: "{{ ml_csp | default('AZURE') }}" ## Valid options - ORACLE, GCP, AWS & AZURE +ml_analytics_cloud_package_path: "{{ ml_cloud_package_path | default('') }}" +ml_analytics_cname_url: "{{ ml_cname_url | default('') }}" From 7c02eb4653bd23890c3b0fedcd5b77342d29bde7 Mon Sep 17 00:00:00 2001 From: Jayaprakash8887 Date: Wed, 23 Nov 2022 18:39:44 +0530 Subject: [PATCH 091/616] Issue #KN-427 feat: Knowledge service cloud-agnostic --- .../templates/assessment-service_application.conf | 9 +++++---- .../templates/content-service_application.conf | 9 +++++---- .../templates/taxonomy-service_application.conf | 9 +++++---- 3 files changed, 15 insertions(+), 12 deletions(-) diff --git a/ansible/roles/stack-sunbird/templates/assessment-service_application.conf b/ansible/roles/stack-sunbird/templates/assessment-service_application.conf index 851433f160..60d129907a 100644 --- a/ansible/roles/stack-sunbird/templates/assessment-service_application.conf +++ b/ansible/roles/stack-sunbird/templates/assessment-service_application.conf @@ -383,10 +383,11 @@ languageCode { telugu : "te" } -cloud_storage_type: "azure" -azure_storage_key: "{{ sunbird_public_storage_account_name }}" -azure_storage_secret: "{{ sunbird_public_storage_account_key }}" -azure_storage_container: "{{ sunbird_content_azure_storage_container }}" +cloud_storage_type: "{{ cloud_service_provider }}" +cloud_storage_key: "{{ cloud_public_storage_accountname }}" +cloud_storage_secret: "{{ cloud_public_storage_secret }}" +cloud_storage_endpoint: "{{ cloud_public_storage_endpoint }}" +cloud_storage_container: "{{ cloud_storage_content_bucketname }}" kafka { urls : "{{ kafka_urls }}" diff --git a/ansible/roles/stack-sunbird/templates/content-service_application.conf b/ansible/roles/stack-sunbird/templates/content-service_application.conf index ee0a80a645..24c1007695 100644 --- a/ansible/roles/stack-sunbird/templates/content-service_application.conf +++ b/ansible/roles/stack-sunbird/templates/content-service_application.conf @@ -481,10 +481,11 @@ composite { url : "{{ sunbird_search_service_api_base_url }}/v3/search" } } -cloud_storage_type: "azure" -azure_storage_key: "{{ sunbird_public_storage_account_name }}" -azure_storage_secret: "{{ sunbird_public_storage_account_key }}" -azure_storage_container: "{{ sunbird_content_azure_storage_container }}" +cloud_storage_type: "{{ cloud_service_provider }}" +cloud_storage_key: "{{ cloud_public_storage_accountname }}" +cloud_storage_secret: "{{ cloud_public_storage_secret }}" +cloud_storage_endpoint: "{{ cloud_public_storage_endpoint }}" +cloud_storage_container: "{{ cloud_storage_content_bucketname }}" # Google Drive APIKEY learning_content_drive_apiKey = "{{ learning_content_drive_apiKey }}" diff --git a/ansible/roles/stack-sunbird/templates/taxonomy-service_application.conf b/ansible/roles/stack-sunbird/templates/taxonomy-service_application.conf index 1c3714fbe8..e1298a1b92 100644 --- a/ansible/roles/stack-sunbird/templates/taxonomy-service_application.conf +++ b/ansible/roles/stack-sunbird/templates/taxonomy-service_application.conf @@ -366,10 +366,11 @@ platform { } # Cloud Storage Config -cloud_storage_type: "azure" -azure_storage_key: "{{ sunbird_public_storage_account_name }}" -azure_storage_secret: "{{ sunbird_public_storage_account_key }}" -azure_storage_container: "{{ sunbird_content_azure_storage_container }}" +cloud_storage_type: "{{ cloud_service_provider }}" +cloud_storage_key: "{{ cloud_public_storage_accountname }}" +cloud_storage_secret: "{{ cloud_public_storage_secret }}" +cloud_storage_endpoint: "{{ cloud_public_storage_endpoint }}" +cloud_storage_container: "{{ cloud_storage_content_bucketname }}" installation.id: ekstep From f804103408530aa2d9c2de3b6ad43c08aecd97bc Mon Sep 17 00:00:00 2001 From: Jayaprakash8887 Date: Wed, 23 Nov 2022 18:43:07 +0530 Subject: [PATCH 092/616] Revert "Issue #KN-9 feat: Content Publish API refactor." This reverts commit e37ca7291abf51ec385d9c464a3852f32b5724f1. --- .../stack-sunbird/templates/content-service_application.conf | 1 - 1 file changed, 1 deletion(-) diff --git a/ansible/roles/stack-sunbird/templates/content-service_application.conf b/ansible/roles/stack-sunbird/templates/content-service_application.conf index 24c1007695..fb5a2e7667 100644 --- a/ansible/roles/stack-sunbird/templates/content-service_application.conf +++ b/ansible/roles/stack-sunbird/templates/content-service_application.conf @@ -494,7 +494,6 @@ kafka { urls : "{{ kafka_urls }}" topic.send.enable : true topics.instruction : "{{ env_name }}.learning.job.request" - publish.request.topic : "{{ env_name }}.publish.job.request" } # DIAL Link Config From 267f338854cd4f8aacb4f957dbeb98537fea55a0 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Thu, 24 Nov 2022 11:35:50 +0530 Subject: [PATCH 093/616] Update config.j2 --- ansible/roles/ml-analytics-service/templates/config.j2 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/roles/ml-analytics-service/templates/config.j2 b/ansible/roles/ml-analytics-service/templates/config.j2 index ce5ff99bff..770de394cb 100644 --- a/ansible/roles/ml-analytics-service/templates/config.j2 +++ b/ansible/roles/ml-analytics-service/templates/config.j2 @@ -164,7 +164,7 @@ region_name = {{ ml_ORACLE_region_name }} bucket_name = {{ ml_ORACLE_bucket_name }} -{% elif ML_Cloud_Service_Provider is eq 'GCP' %} +{% elif ML_Cloud_Service_Provider is eq 'gcloud' %} [GCP] @@ -172,7 +172,7 @@ secret_data = {{ ml_GCP_secret_json_file }} bucket_name = {{ ml_GCP_bucket_name }} -{% elif ML_Cloud_Service_Provider is eq 'AWS' %} +{% elif ML_Cloud_Service_Provider is eq 'aws' %} [AWS] From d22223a0a4cfc6103d333ffc8069a162abdd6e4c Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Thu, 24 Nov 2022 11:53:35 +0530 Subject: [PATCH 094/616] Update main.yml --- .../ml-analytics-service/defaults/main.yml | 22 ++++++++----------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index 6733061380..da0df0ada6 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -44,8 +44,8 @@ ml_analytics_kafka_survey_druid_topic_name: "{{ env_name }}.ml.survey.druid" ml_analytics_observation_log_folder_path: "{{ WORKDIR }}/logs/observation" ml_analytics_project_log_folder_path: "{{ WORKDIR }}/logs/project" ml_analytics_survey_log_folder_path: "{{ WORKDIR }}/logs/survey" -ml_analytics_azure_account_name: "{{ sunbird_private_storage_account_name }}" -ml_analytics_azure_container_name: "telemetry-data-store" +ml_analytics_azure_account_name: "{{ cloud_private_storage_accountname }}" +ml_analytics_azure_container_name: "{{ cloud_storage_telemetry_bucketname }}" ml_analytics_observation_azure_blob_path: "observation/status/" ml_analytics_project_azure_blob_path: "projects/" ml_analytics_redis_host: "{{ml_redis_host | default(groups['dp-redis'][0])}}" @@ -57,7 +57,7 @@ ml_analytics_api_authorization_key: "{{ml_api_auth_token | default('sunbird_api_ ml_analytics_api_access_token: "{{ml_api_access_token | default('ml_core_internal_access_token')}}" ml_analytics_druid_observation_status_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type": "azure","uris": ["azure://telemetry-data-store/observation/status/sl_observation_status.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"sl-observation-status","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"updatedAt","format":"auto"},"dimensionsSpec":{"dimensions":["status","entity_externalId","entity_id","entity_type","solution_id","solution_externalId","submission_id","entity_name","solution_name","role_title","school_name","school_code","school_externalId","state_name","state_code","state_externalId","district_name","district_code","district_externalId","block_name","block_code","block_externalId","cluster_name","cluster_code","cluster_externalId","completedDate","channel","parent_channel","program_id","program_externalId","program_name","app_name","user_id","private_program","solution_type","organisation_name","ecm_marked_na","board_name","updatedAt","organisation_id","user_type","observed_school_name","observed_school_id","observed_school_code","observed_state_name","observed_state_id","observed_state_code","observed_district_name","observed_district_id","observed_district_code","observed_block_name","observed_block_id","observed_block_code","observed_cluster_name","observed_cluster_id","observed_cluster_code"]},"metricsSpec":[]}}}' ml_analytics_druid_project_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type": "azure","uris": ["azure://telemetry-data-store/projects/sl_projects.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"sl-project","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"project_updated_date","format":"auto"},"dimensionsSpec":{"dimensions":[]},"metricsSpec":[]}}}' -ml_analytics_azure_sas_token: "{{ sunbird_private_storage_account_key }}" +ml_analytics_azure_sas_token: "{{ cloud_private_storage_secret }}" ml_analytics_druid_distinctCnt_obs_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/distinctCount/ml_observation_distinctCount_status.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-obs-status","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"solution_name"},{"type":"string","name":"solution_id"},{"type":"string","name":"status"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"parent_channel"},{"type":"string","name":"solution_type"},{"type":"string","name":"private_program"},{"type":"long","name":"unique_submissions"},{"type":"long","name":"unique_entities"},{"type":"long","name":"unique_users"},{"type":"long","name":"unique_solution"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' ml_analytics_druid_distinctCnt_obs_domain_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/distinctCount_domain/ml_observation_distinctCount_domain.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-obs-domain","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"solution_name"},{"type":"string","name":"solution_id"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"parent_channel"},{"type":"string","name":"solution_type"},{"type":"string","name":"private_program"},{"type":"string","name":"domain_name"},{"type":"string","name":"domain_externalId"},{"type":"string","name":"domain_level"},{"type":"long","name":"unique_submissions"},{"type":"long","name":"unique_entities"},{"type":"long","name":"unique_users"},{"type":"long","name":"unique_solution"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' ml_analytics_druid_distinctCnt_obs_domain_criteria_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/distinctCount_domain_criteria/ml_observation_distinctCount_domain_criteria.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-obs-domain-criteria","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"solution_name"},{"type":"string","name":"solution_id"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"parent_channel"},{"type":"string","name":"solution_type"},{"type":"string","name":"private_program"},{"type":"string","name":"domain_name"},{"type":"string","name":"domain_externalId"},{"type":"string","name":"domain_level"},{"type":"string","name":"criteria_name"},{"type":"string","name":"criteria_score"},{"type":"string","name":"criteria_id"},{"type":"long","name":"unique_submissions"},{"type":"long","name":"unique_entities"},{"type":"long","name":"unique_users"},{"type":"long","name":"unique_solution"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' @@ -80,11 +80,10 @@ ml_analytics_observation_status_rollup_output_dir: "/opt/sparkjobs/source/observ ml_analytics_druid_project_rollup_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/projects/rollup/projects_rollup.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-project-status-rollup","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"project_updated_date","format":"iso"},"dimensionsSpec":{"dimensions":["project_title","project_goal","area_of_improvement","status_of_project","tasks_name","tasks_status","designation","task_evidence_status","project_id","task_id","project_created_type","parent_channel","program_id","program_name","project_updated_date","createdBy","program_externalId","private_program","task_deleted_flag","project_terms_and_condition","state_externalId","block_externalId","district_externalId","cluster_externalId","school_externalId","state_name","block_name","district_name","cluster_name","school_name","board_name","organisation_name","solution_id","organisation_id",{"name":"status_code","type":"long"}]},"metricsSpec":[{"name":"count","type":"count"},{"name":"sum___v","type":"longSum","fieldName":"__v"},{"name":"sum_status_code","type":"longMax","fieldName":"status_code"},{"type":"HLLSketchBuild","name":"count_of_createBy","fieldName":"createdBy"},{"type":"HLLSketchBuild","name":"count_of_project_id","fieldName":"project_id"},{"type":"HLLSketchBuild","name":"count_of_solution_id","fieldName":"solution_id"},{"type":"HLLSketchBuild","name":"count_of_program_id","fieldName":"program_id"}]}}}' ml_analytics_druid_observation_status_rollup_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/rollup/observation_status_rollup.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-observation-status-rollup","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"updatedAt","format":"iso"},"dimensionsSpec":{"dimensions":["status","user_id","solution_id","submission_id","entity_name","completedDate","program_id","private_program","solution_type","updatedAt","role_title","solution_name","program_name","channel","parent_channel","block_name","district_name","school_name","cluster_name","state_name","organisation_name","board_name","district_externalId","state_externalId","block_externalId","cluster_externalId","school_externalId","organisation_id",{"type":"long","name":"status_code"}]},"metricsSpec":[{"type":"count","name":"count"},{"type":"longSum","name":"sum___v","fieldName":"__v","expression":null},{"type":"HLLSketchBuild","name":"count_distinct_solution","fieldName":"solution_id","lgK":12,"tgtHllType":"HLL_4","round":false},{"type":"HLLSketchBuild","name":"count_distinct_submission_id","fieldName":"submission_id","lgK":12,"tgtHllType":"HLL_4","round":false},{"type":"HLLSketchBuild","name":"count_distinct_user_id","fieldName":"user_id","lgK":12,"tgtHllType":"HLL_4","round":false}]}}}' ml_analytics_druid_rollup_url: "{{groups['druid'][0]}}:8081" -ml_analytics_AWS_service_name: "{{ ml_AWS_service_name | default('') }}" -ml_analytics_AWS_access_key: "{{ ml_AWS_access_key | default('') }}" -ml_analytics_AWS_secret_access_key: "{{ ml_AWS_secret_access_key | default('') }}" -ml_analytics_AWS_region_name: "{{ ml_AWS_region_name | default('') }}" -ml_analytics_AWS_bucket_name: "{{ ml_AWS_bucket_name | default('') }}" +ml_analytics_AWS_access_key: "{{ cloud_private_storage_accountname }}" +ml_analytics_AWS_secret_access_key: "{{ cloud_private_storage_secret }}" +ml_analytics_AWS_region_name: "{{ cloud_private_storage_region }}" +ml_analytics_AWS_bucket_name: "{{ cloud_storage_telemetry_bucketname }}" ml_analytics_druid_distinctCnt_prglevel_projects_status_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/projects/distinctCountPrglevel/ml_projects_distinctCount_prgmlevel.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-project-programLevel-status","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"status_of_project"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"private_program"},{"type":"string","name":"project_created_type"},{"type":"string","name":"parent_channel"},{"type":"long","name":"unique_projects"},{"type":"long","name":"unique_users"},{"type":"long","name":"no_of_imp_with_evidence"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' ml_analytics_projects_distinctCnt_prglevel_output_dir: "{{ WORKDIR }}/source/projects/distinctCountPrglevel/output" ml_analytics_projects_distinctCnt_prglevel_azure_blob_path: "projects/distinctCountPrglevel/" @@ -93,9 +92,6 @@ ml_analytics_survey_azure_blob_path : "survey/status/" ml_analytics_druid_survey_status_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/survey/status/sl_survey_status.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-survey-status","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":true,"segmentGranularity":"DAY"},"timestampSpec":{"column":"updatedAt","format":"iso"},"dimensionsSpec":{"dimensions":["survey_submission_id", "submission_status", "user_id", "user_sub_type", "user_type", "state_externalId", "block_externalId", "district_externalId", "cluster_externalId", "school_externalId", "state_name", "block_name", "district_name", "cluster_name", "school_name", "board_name", "organisation_id", "organisation_name", "program_externalId", "program_id", "program_name", "survey_name", "survey_id", "survey_externalId", "created_date", "submission_date", "updatedAt", "parent_channel", "solution_name", "solution_id","private_program"]},"metricsSpec":[]}}}' ml_analytics_slack_token: "{{ ml_slack_token | default('') }}" ml_analytics_channel_name: "{{ ml_slack_channel | default('') }}" -ml_analytics_public_azure_account_name: "{{ ml_public_azure_account | default('') }}" -ml_analytics_public_azure_access_key: "{{ ml_public_azure_key | default('') }}" -ml_analytics_public_azure_container_name: "{{ ml_public_azure_container | default('') }}" ml_analytics_program_dashboard_azure_blob_path: "{{ ml_program_blob_path | default('') }}" ml_druid_query_data: "{{ ml_druid_query | default('') }}" ml_program_dashboard_data: "{{ ml_program_data | default('') }}" @@ -111,7 +107,7 @@ ml_analytics_survey_batchupdate_azure_blob_path : "survey/batchDeletion" ml_analytics_survey_submission_id_filepath : "{{ WORKDIR }}/ml-analytics-service/survey/submissions.csv" ml_analytics_survey_batchupdate_output_dir : "{{ WORKDIR }}/source/survey/" ml_analytics_druid_interval_list: '["1901-01-01T00:00:00+00:00/2020-01-01T00:00:00+00:00","2020-01-01T00:00:00+00:00/2020-06-01T00:00:00+00:00","2020-06-01T00:00:00+00:00/2021-01-01T00:00:00+00:00","2021-01-01T00:00:00+00:00/2021-06-01T00:00:00+00:00","2021-06-01T00:00:00+00:00/2022-01-01T00:00:00+00:00","2022-01-01T00:00:00+00:00/2022-03-01T00:00:00+00:00","2022-03-01T00:00:00+00:00/2022-06-01T00:00:00+00:00","2022-06-01T00:00:00+00:00/2022-09-01T00:00:00+00:00","2022-09-01T00:00:00+00:00/2023-01-01T00:00:00+00:00"]' -ml_analytics_azure_account_key: "{{ sunbird_private_storage_account_sas }}" -ML_Cloud_Service_Provider: "{{ ml_csp | default('AZURE') }}" ## Valid options - ORACLE, GCP, AWS & AZURE +ml_analytics_azure_account_key: "{{ cloud_private_storage_secret }}" +ML_Cloud_Service_Provider: "{{ cloud_service_provider | default('azure') }}" ## Valid options - ORACLE, GCP, AWS & AZURE ml_analytics_cloud_package_path: "{{ ml_cloud_package_path | default('') }}" ml_analytics_cname_url: "{{ ml_cname_url | default('') }}" From 69249369d7031e55ccb3737c4352f95e16d652df Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Thu, 24 Nov 2022 11:55:16 +0530 Subject: [PATCH 095/616] Update main.yml --- ansible/roles/ml-analytics-service/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index da0df0ada6..6b12734b74 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -108,6 +108,6 @@ ml_analytics_survey_submission_id_filepath : "{{ WORKDIR }}/ml-analytics-service ml_analytics_survey_batchupdate_output_dir : "{{ WORKDIR }}/source/survey/" ml_analytics_druid_interval_list: '["1901-01-01T00:00:00+00:00/2020-01-01T00:00:00+00:00","2020-01-01T00:00:00+00:00/2020-06-01T00:00:00+00:00","2020-06-01T00:00:00+00:00/2021-01-01T00:00:00+00:00","2021-01-01T00:00:00+00:00/2021-06-01T00:00:00+00:00","2021-06-01T00:00:00+00:00/2022-01-01T00:00:00+00:00","2022-01-01T00:00:00+00:00/2022-03-01T00:00:00+00:00","2022-03-01T00:00:00+00:00/2022-06-01T00:00:00+00:00","2022-06-01T00:00:00+00:00/2022-09-01T00:00:00+00:00","2022-09-01T00:00:00+00:00/2023-01-01T00:00:00+00:00"]' ml_analytics_azure_account_key: "{{ cloud_private_storage_secret }}" -ML_Cloud_Service_Provider: "{{ cloud_service_provider | default('azure') }}" ## Valid options - ORACLE, GCP, AWS & AZURE +ML_Cloud_Service_Provider: "{{ cloud_service_provider | default('azure') }}" ## Valid options - ORACLE, gcloud, aws & azure ml_analytics_cloud_package_path: "{{ ml_cloud_package_path | default('') }}" ml_analytics_cname_url: "{{ ml_cname_url | default('') }}" From bcdb073b4716ccfbcd1bf89d0acec8ecbe02f6ea Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Thu, 24 Nov 2022 12:14:58 +0530 Subject: [PATCH 096/616] Update main.yml --- ansible/roles/ml-analytics-service/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index 6b12734b74..b9e9557807 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -109,5 +109,5 @@ ml_analytics_survey_batchupdate_output_dir : "{{ WORKDIR }}/source/survey/" ml_analytics_druid_interval_list: '["1901-01-01T00:00:00+00:00/2020-01-01T00:00:00+00:00","2020-01-01T00:00:00+00:00/2020-06-01T00:00:00+00:00","2020-06-01T00:00:00+00:00/2021-01-01T00:00:00+00:00","2021-01-01T00:00:00+00:00/2021-06-01T00:00:00+00:00","2021-06-01T00:00:00+00:00/2022-01-01T00:00:00+00:00","2022-01-01T00:00:00+00:00/2022-03-01T00:00:00+00:00","2022-03-01T00:00:00+00:00/2022-06-01T00:00:00+00:00","2022-06-01T00:00:00+00:00/2022-09-01T00:00:00+00:00","2022-09-01T00:00:00+00:00/2023-01-01T00:00:00+00:00"]' ml_analytics_azure_account_key: "{{ cloud_private_storage_secret }}" ML_Cloud_Service_Provider: "{{ cloud_service_provider | default('azure') }}" ## Valid options - ORACLE, gcloud, aws & azure -ml_analytics_cloud_package_path: "{{ ml_cloud_package_path | default('') }}" +ml_analytics_cloud_package_path: "{{ config_path }}/cloud_storage" ml_analytics_cname_url: "{{ ml_cname_url | default('') }}" From 1932c838f41f48de229dbf2a1e2d8af45ffb37b4 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Thu, 24 Nov 2022 12:24:35 +0530 Subject: [PATCH 097/616] Update main.yml --- ansible/roles/ml-analytics-service/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index b9e9557807..bdc0129745 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -110,4 +110,4 @@ ml_analytics_druid_interval_list: '["1901-01-01T00:00:00+00:00/2020-01-01T00:00: ml_analytics_azure_account_key: "{{ cloud_private_storage_secret }}" ML_Cloud_Service_Provider: "{{ cloud_service_provider | default('azure') }}" ## Valid options - ORACLE, gcloud, aws & azure ml_analytics_cloud_package_path: "{{ config_path }}/cloud_storage" -ml_analytics_cname_url: "{{ ml_cname_url | default('') }}" +ml_analytics_cname_url: "https://{{ cloud_private_storage_accountname }}.blob.core.windows.net/{{ cloud_storage_telemetry_bucketname }}" From d13d58a0797b9377086e379b2d907386d7a917de Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Thu, 24 Nov 2022 13:06:20 +0530 Subject: [PATCH 098/616] Update main.yml --- ansible/roles/ml-analytics-service/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/ml-analytics-service/tasks/main.yml b/ansible/roles/ml-analytics-service/tasks/main.yml index 6bf640005e..a3813c5d05 100755 --- a/ansible/roles/ml-analytics-service/tasks/main.yml +++ b/ansible/roles/ml-analytics-service/tasks/main.yml @@ -106,7 +106,7 @@ backup: yes - name: Copy GCP Secrets to JSON file - copy: dest="{{config_path}}/{{ml_GCP_secret_json_file}}" content="{{ ml_GCP_Secrets | to_nice_json}}" mode=0644 owner="{{ USER }}" + copy: dest="{{config_path}}/{{ml_GCP_secret_json_file}}" content="{{ ml_GCP_Secrets | to_nice_json}}" mode=0400 owner="{{ USER }}" when: ML_Cloud_Service_Provider == 'GCP' - name: Templating the shell_script_config.j2 to shell_script_config From dfd09a62adca833c9f37ef56fb45a8aad4667f68 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Thu, 24 Nov 2022 14:34:33 +0530 Subject: [PATCH 099/616] Update main.yml --- ansible/roles/ml-analytics-service/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/ml-analytics-service/tasks/main.yml b/ansible/roles/ml-analytics-service/tasks/main.yml index a3813c5d05..dfa015c99c 100755 --- a/ansible/roles/ml-analytics-service/tasks/main.yml +++ b/ansible/roles/ml-analytics-service/tasks/main.yml @@ -107,7 +107,7 @@ - name: Copy GCP Secrets to JSON file copy: dest="{{config_path}}/{{ml_GCP_secret_json_file}}" content="{{ ml_GCP_Secrets | to_nice_json}}" mode=0400 owner="{{ USER }}" - when: ML_Cloud_Service_Provider == 'GCP' + when: ML_Cloud_Service_Provider == 'gcloud' - name: Templating the shell_script_config.j2 to shell_script_config template: From 99e820eb7d2d79651f8a40ab7a5de72f34486c68 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Thu, 24 Nov 2022 16:11:29 +0530 Subject: [PATCH 100/616] Update main.yml --- ansible/roles/ml-analytics-service/defaults/main.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index bdc0129745..e29058043c 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -111,3 +111,7 @@ ml_analytics_azure_account_key: "{{ cloud_private_storage_secret }}" ML_Cloud_Service_Provider: "{{ cloud_service_provider | default('azure') }}" ## Valid options - ORACLE, gcloud, aws & azure ml_analytics_cloud_package_path: "{{ config_path }}/cloud_storage" ml_analytics_cname_url: "https://{{ cloud_private_storage_accountname }}.blob.core.windows.net/{{ cloud_storage_telemetry_bucketname }}" +ml_GCP_secret_json_file: gcp_secrets.json +ml_GCP_Secrets: + account_name: {{ cloud_private_storage_accountname }} + account_key: {{ cloud_private_storage_secret }} From 618045ed8f33d036b41010ff520c88383b1d19eb Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Thu, 24 Nov 2022 16:14:41 +0530 Subject: [PATCH 101/616] Update main.yml --- ansible/roles/ml-analytics-service/defaults/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index e29058043c..fd73a69673 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -112,6 +112,7 @@ ML_Cloud_Service_Provider: "{{ cloud_service_provider | default('azure') }}" ## ml_analytics_cloud_package_path: "{{ config_path }}/cloud_storage" ml_analytics_cname_url: "https://{{ cloud_private_storage_accountname }}.blob.core.windows.net/{{ cloud_storage_telemetry_bucketname }}" ml_GCP_secret_json_file: gcp_secrets.json +ml_GCP_bucket_name: {{ cloud_storage_telemetry_bucketname }} ml_GCP_Secrets: account_name: {{ cloud_private_storage_accountname }} account_key: {{ cloud_private_storage_secret }} From abab73d35530175638ea31e7d79781a450ee2af3 Mon Sep 17 00:00:00 2001 From: Rajesh Kumaravel Date: Fri, 25 Nov 2022 14:43:50 +0530 Subject: [PATCH 102/616] Issue #ED-521 fix: Updated CSP variables with Finalized vars --- .../roles/stack-sunbird/templates/sunbird_player.env | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/ansible/roles/stack-sunbird/templates/sunbird_player.env b/ansible/roles/stack-sunbird/templates/sunbird_player.env index 2afd3af54c..c0ec466694 100644 --- a/ansible/roles/stack-sunbird/templates/sunbird_player.env +++ b/ansible/roles/stack-sunbird/templates/sunbird_player.env @@ -222,3 +222,14 @@ sunbird_gcloud_labels={{gcloud_labels_folder_name | default("labels")}} sunbird_gcloud_client_email={{gcloud_private_bucket_client_email | default("")}} sunbird_gcloud_private_key={{gcloud_private_bucket_private_key | default("")}} sunbird_gcloud_projectId={{gcloud_private_bucket_projectId | default("")}} + +#release-5.1.0 +#CSP configuration variables changes +cloud_service_provider={{cloud_service_provider}} +cloud_private_storage_accountname={{cloud_private_storage_accountname | default("")}} +cloud_private_storage_secret={{cloud_private_storage_secret | default("")}} +cloud_private_storage_region={{cloud_private_storage_region | default("ap-south-1")}} +cloud_private_storage_project={{cloud_private_storage_project | default("")}} +cloud_storage_privatereports_bucketname={{cloud_storage_privatereports_bucketname | default("reports")}} +cloud_storage_resourceBundle_bucketname={{cloud_storage_resourceBundle_bucketname | default("label")}} +cloud_storage_desktopCrash_bucketname={{cloud_storage_desktopCrash_bucketname | default("desktopappcrashlogs")}} From c72fa43de1022b894f58abe2681f8cd8b2a7ca93 Mon Sep 17 00:00:00 2001 From: Rajesh Kumaravel Date: Fri, 25 Nov 2022 15:07:37 +0530 Subject: [PATCH 103/616] Issue #ED-521 fix: Updated CSP variables with Finalized vars --- ansible/roles/stack-sunbird/templates/sunbird_player.env | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/roles/stack-sunbird/templates/sunbird_player.env b/ansible/roles/stack-sunbird/templates/sunbird_player.env index c0ec466694..0180418fb0 100644 --- a/ansible/roles/stack-sunbird/templates/sunbird_player.env +++ b/ansible/roles/stack-sunbird/templates/sunbird_player.env @@ -231,5 +231,5 @@ cloud_private_storage_secret={{cloud_private_storage_secret | default("")}} cloud_private_storage_region={{cloud_private_storage_region | default("ap-south-1")}} cloud_private_storage_project={{cloud_private_storage_project | default("")}} cloud_storage_privatereports_bucketname={{cloud_storage_privatereports_bucketname | default("reports")}} -cloud_storage_resourceBundle_bucketname={{cloud_storage_resourceBundle_bucketname | default("label")}} -cloud_storage_desktopCrash_bucketname={{cloud_storage_desktopCrash_bucketname | default("desktopappcrashlogs")}} +cloud_storage_resourceBundle_bucketname={{cloud_storage_resourcebundle_bucketname | default("label")}} +cloud_storage_desktopCrash_bucketname={{cloud_storage_desktopcrash_bucketname | default("desktopappcrashlogs")}} From 1a3073db50dac65a0eaf16e66949083ac24cbd2d Mon Sep 17 00:00:00 2001 From: Rajesh Kumaravel Date: Fri, 25 Nov 2022 15:33:09 +0530 Subject: [PATCH 104/616] Issue #ED-521 fix: Updated CSP variables with Finalized vars --- ansible/roles/stack-sunbird/templates/sunbird_player.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/stack-sunbird/templates/sunbird_player.env b/ansible/roles/stack-sunbird/templates/sunbird_player.env index 0180418fb0..9cb6473418 100644 --- a/ansible/roles/stack-sunbird/templates/sunbird_player.env +++ b/ansible/roles/stack-sunbird/templates/sunbird_player.env @@ -231,5 +231,5 @@ cloud_private_storage_secret={{cloud_private_storage_secret | default("")}} cloud_private_storage_region={{cloud_private_storage_region | default("ap-south-1")}} cloud_private_storage_project={{cloud_private_storage_project | default("")}} cloud_storage_privatereports_bucketname={{cloud_storage_privatereports_bucketname | default("reports")}} -cloud_storage_resourceBundle_bucketname={{cloud_storage_resourcebundle_bucketname | default("label")}} +cloud_storage_resourceBundle_bucketname={{cloud_storage_label_bucketname | default("label")}} cloud_storage_desktopCrash_bucketname={{cloud_storage_desktopcrash_bucketname | default("desktopappcrashlogs")}} From aaa3644c5af698e581878c848bd8f0eed598101f Mon Sep 17 00:00:00 2001 From: Surabhi Date: Fri, 25 Nov 2022 16:14:58 +0530 Subject: [PATCH 105/616] variables changes for sunbird cloud agnostic tool --- ansible/roles/stack-sunbird/templates/inbound.env | 8 ++++---- ansible/roles/stack-sunbird/templates/transformer.env | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/ansible/roles/stack-sunbird/templates/inbound.env b/ansible/roles/stack-sunbird/templates/inbound.env index 331ae1d3fc..d8b3bcba55 100644 --- a/ansible/roles/stack-sunbird/templates/inbound.env +++ b/ansible/roles/stack-sunbird/templates/inbound.env @@ -59,10 +59,10 @@ NETCORE_WHATSAPP_SOURCE={{uci_netcore_whatsapp_source}} NETCORE_WHATSAPP_URI={{uci_netcore_whatsapp_uri | default('https://waapi.pepipost.com/api/v2/')}} #Sunbird CDN Configuration -SUNBIRD_CLOUD_MEDIA_STORAGE_TYPE=azure -SUNBIRD_CLOUD_MEDIA_STORAGE_KEY={{sunbird_private_storage_account_name}} -SUNBIRD_CLOUD_MEDIA_STORAGE_SECRET={{sunbird_private_storage_account_key}} -SUNBIRD_CLOUD_MEDIA_STORAGE_CONTAINER={{sunbird_azure_uci_container_name | default('uci-' + env )}} +SUNBIRD_CLOUD_MEDIA_STORAGE_TYPE={{cloud_service_provider}} +SUNBIRD_CLOUD_MEDIA_STORAGE_KEY={{cloud_public_storage_accountname}} +SUNBIRD_CLOUD_MEDIA_STORAGE_SECRET={{cloud_public_storage_secret}} +SUNBIRD_CLOUD_MEDIA_STORAGE_CONTAINER={{cloud_storage_uci_bucketname | default('uci-' + env )}} #Selected CDN Configuration SELECTED_FILE_CDN=sunbird diff --git a/ansible/roles/stack-sunbird/templates/transformer.env b/ansible/roles/stack-sunbird/templates/transformer.env index b5be5c4451..f8bcde12dc 100644 --- a/ansible/roles/stack-sunbird/templates/transformer.env +++ b/ansible/roles/stack-sunbird/templates/transformer.env @@ -71,10 +71,10 @@ EXHAUST_TELEMETRY_ENABLED=TRUE POSTHOG_EVENT_ENABLED=FALSE #Sunbird CDN Configuration -SUNBIRD_CLOUD_MEDIA_STORAGE_TYPE=azure -SUNBIRD_CLOUD_MEDIA_STORAGE_KEY={{sunbird_private_storage_account_name}} -SUNBIRD_CLOUD_MEDIA_STORAGE_SECRET={{sunbird_private_storage_account_key}} -SUNBIRD_CLOUD_MEDIA_STORAGE_CONTAINER={{sunbird_azure_uci_container_name | default('uci-' + env )}} +SUNBIRD_CLOUD_MEDIA_STORAGE_TYPE={{cloud_service_provider}} +SUNBIRD_CLOUD_MEDIA_STORAGE_KEY={{cloud_public_storage_accountname}} +SUNBIRD_CLOUD_MEDIA_STORAGE_SECRET={{cloud_public_storage_secret}} +SUNBIRD_CLOUD_MEDIA_STORAGE_CONTAINER={{cloud_storage_uci_bucketname | default('uci-' + env )}} #Selected CDN Configuration SELECTED_FILE_CDN=sunbird \ No newline at end of file From b26fbbc807c7d20328ea365f07fd97fb7df03786 Mon Sep 17 00:00:00 2001 From: Surabhi Date: Fri, 25 Nov 2022 20:13:43 +0530 Subject: [PATCH 106/616] variables changes - private vars --- ansible/roles/stack-sunbird/templates/inbound.env | 4 ++-- ansible/roles/stack-sunbird/templates/transformer.env | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ansible/roles/stack-sunbird/templates/inbound.env b/ansible/roles/stack-sunbird/templates/inbound.env index d8b3bcba55..1104836d93 100644 --- a/ansible/roles/stack-sunbird/templates/inbound.env +++ b/ansible/roles/stack-sunbird/templates/inbound.env @@ -60,8 +60,8 @@ NETCORE_WHATSAPP_URI={{uci_netcore_whatsapp_uri | default('https://waapi.pepipos #Sunbird CDN Configuration SUNBIRD_CLOUD_MEDIA_STORAGE_TYPE={{cloud_service_provider}} -SUNBIRD_CLOUD_MEDIA_STORAGE_KEY={{cloud_public_storage_accountname}} -SUNBIRD_CLOUD_MEDIA_STORAGE_SECRET={{cloud_public_storage_secret}} +SUNBIRD_CLOUD_MEDIA_STORAGE_KEY={{cloud_private_storage_accountname}} +SUNBIRD_CLOUD_MEDIA_STORAGE_SECRET={{cloud_private_storage_secret}} SUNBIRD_CLOUD_MEDIA_STORAGE_CONTAINER={{cloud_storage_uci_bucketname | default('uci-' + env )}} #Selected CDN Configuration diff --git a/ansible/roles/stack-sunbird/templates/transformer.env b/ansible/roles/stack-sunbird/templates/transformer.env index f8bcde12dc..72aa85a7d1 100644 --- a/ansible/roles/stack-sunbird/templates/transformer.env +++ b/ansible/roles/stack-sunbird/templates/transformer.env @@ -72,8 +72,8 @@ POSTHOG_EVENT_ENABLED=FALSE #Sunbird CDN Configuration SUNBIRD_CLOUD_MEDIA_STORAGE_TYPE={{cloud_service_provider}} -SUNBIRD_CLOUD_MEDIA_STORAGE_KEY={{cloud_public_storage_accountname}} -SUNBIRD_CLOUD_MEDIA_STORAGE_SECRET={{cloud_public_storage_secret}} +SUNBIRD_CLOUD_MEDIA_STORAGE_KEY={{cloud_private_storage_accountname}} +SUNBIRD_CLOUD_MEDIA_STORAGE_SECRET={{cloud_private_storage_secret}} SUNBIRD_CLOUD_MEDIA_STORAGE_CONTAINER={{cloud_storage_uci_bucketname | default('uci-' + env )}} #Selected CDN Configuration From 4c14f86a8f3a68429b4309f34958f82932370f90 Mon Sep 17 00:00:00 2001 From: Akash Shah Date: Mon, 28 Nov 2022 18:10:39 +0530 Subject: [PATCH 107/616] Make roles as anonymous for project certificate --- .../sunbird-RC/registry/schemas/ProjectCertificate.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json b/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json index 0035464abb..cdfce21ac4 100644 --- a/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json +++ b/kubernetes/helm_charts/sunbird-RC/registry/schemas/ProjectCertificate.json @@ -64,6 +64,7 @@ ], "ownershipAttributes": [], "roles": [ + "anonymous" ], "inviteRoles": [ "anonymous" @@ -72,4 +73,4 @@ "enableLogin": false, "credentialTemplate": "{{ upstream_url }}/schema/project_credential_template.json" } -} \ No newline at end of file +} From 1f4f735d84d93b205313a08e9fa2f25a81007da2 Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Tue, 29 Nov 2022 10:46:11 +0530 Subject: [PATCH 108/616] Fix syntax (#3621) --- ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml | 4 ++-- ansible/roles/azure-cloud-storage/tasks/blob-upload.yml | 4 ++-- .../roles/azure-cloud-storage/tasks/upload-using-azcopy.yml | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml b/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml index 82f806a803..53a57d7398 100644 --- a/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml +++ b/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml @@ -3,9 +3,9 @@ include_role: name: azure-cloud-storage tasks_from: container-create.yml - when: create_container is True + when: create_container == "True" - name: upload files and folders - uploading {{ blob_container_name }}{{ blob_container_folder_path }} shell: "az storage blob upload-batch --destination {{ blob_container_name }}{{ blob_container_folder_path }} --source {{ local_file_or_folder_path }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" async: 3600 - poll: 10 \ No newline at end of file + poll: 10 diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-upload.yml b/ansible/roles/azure-cloud-storage/tasks/blob-upload.yml index 36423dcfc6..d895cf46cf 100644 --- a/ansible/roles/azure-cloud-storage/tasks/blob-upload.yml +++ b/ansible/roles/azure-cloud-storage/tasks/blob-upload.yml @@ -3,9 +3,9 @@ include_role: name: azure-cloud-storage tasks_from: container-create.yml - when: create_container is True + when: create_container == "True" - name: upload file to azure storage container shell: "az storage blob upload --container-name {{ blob_container_name }} --file {{ local_file_or_folder_path }} --name {{ blob_file_name }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" async: 3600 - poll: 10 \ No newline at end of file + poll: 10 diff --git a/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml b/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml index 0726b48f00..e37243cd70 100644 --- a/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml +++ b/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml @@ -3,11 +3,11 @@ include_role: name: azure-cloud-storage tasks_from: container-create.yml - when: create_container is True + when: create_container == "True" - name: upload files and folders to azure storage using azcopy shell: "azcopy copy {{ local_file_or_folder_path }} 'https://{{ storage_account_name }}.blob.core.windows.net/{{ blob_container_name }}{{ blob_container_folder_path }}{{ storage_account_sas_token }}' --recursive" environment: AZCOPY_CONCURRENT_FILES: "10" async: 10800 - poll: 10 \ No newline at end of file + poll: 10 From a5f853386839da3ab4313b1c635d59668a888d2d Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Tue, 29 Nov 2022 12:28:28 +0530 Subject: [PATCH 109/616] Change condition to match boolean (#3623) --- ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml | 2 +- ansible/roles/azure-cloud-storage/tasks/blob-upload.yml | 2 +- ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml b/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml index 53a57d7398..900ecee515 100644 --- a/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml +++ b/ansible/roles/azure-cloud-storage/tasks/blob-upload-batch.yml @@ -3,7 +3,7 @@ include_role: name: azure-cloud-storage tasks_from: container-create.yml - when: create_container == "True" + when: create_container == True - name: upload files and folders - uploading {{ blob_container_name }}{{ blob_container_folder_path }} shell: "az storage blob upload-batch --destination {{ blob_container_name }}{{ blob_container_folder_path }} --source {{ local_file_or_folder_path }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" diff --git a/ansible/roles/azure-cloud-storage/tasks/blob-upload.yml b/ansible/roles/azure-cloud-storage/tasks/blob-upload.yml index d895cf46cf..5430aba8fa 100644 --- a/ansible/roles/azure-cloud-storage/tasks/blob-upload.yml +++ b/ansible/roles/azure-cloud-storage/tasks/blob-upload.yml @@ -3,7 +3,7 @@ include_role: name: azure-cloud-storage tasks_from: container-create.yml - when: create_container == "True" + when: create_container == True - name: upload file to azure storage container shell: "az storage blob upload --container-name {{ blob_container_name }} --file {{ local_file_or_folder_path }} --name {{ blob_file_name }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }}" diff --git a/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml b/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml index e37243cd70..affbc8c002 100644 --- a/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml +++ b/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml @@ -3,7 +3,7 @@ include_role: name: azure-cloud-storage tasks_from: container-create.yml - when: create_container == "True" + when: create_container == True - name: upload files and folders to azure storage using azcopy shell: "azcopy copy {{ local_file_or_folder_path }} 'https://{{ storage_account_name }}.blob.core.windows.net/{{ blob_container_name }}{{ blob_container_folder_path }}{{ storage_account_sas_token }}' --recursive" From 20955da02e6434923d51649980b7472bf89ac521 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Tue, 29 Nov 2022 13:14:41 +0530 Subject: [PATCH 110/616] Update main.yml --- ansible/roles/ml-analytics-service/defaults/main.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index fd73a69673..99e7526e4b 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -111,8 +111,8 @@ ml_analytics_azure_account_key: "{{ cloud_private_storage_secret }}" ML_Cloud_Service_Provider: "{{ cloud_service_provider | default('azure') }}" ## Valid options - ORACLE, gcloud, aws & azure ml_analytics_cloud_package_path: "{{ config_path }}/cloud_storage" ml_analytics_cname_url: "https://{{ cloud_private_storage_accountname }}.blob.core.windows.net/{{ cloud_storage_telemetry_bucketname }}" -ml_GCP_secret_json_file: gcp_secrets.json -ml_GCP_bucket_name: {{ cloud_storage_telemetry_bucketname }} +ml_GCP_secret_json_file: "gcp_secrets.json" +ml_GCP_bucket_name: "{{ cloud_storage_telemetry_bucketname }}" ml_GCP_Secrets: - account_name: {{ cloud_private_storage_accountname }} - account_key: {{ cloud_private_storage_secret }} + account_name: "{{ cloud_private_storage_accountname }}" + account_key: "{{ cloud_private_storage_secret }}" From d002f1e51fc1aa8bb01c17c857a82b475b9aea3d Mon Sep 17 00:00:00 2001 From: Akhil <30873558+saiakhil46@users.noreply.github.com> Date: Tue, 29 Nov 2022 13:32:27 +0530 Subject: [PATCH 111/616] updated CACHE_CONTEXT_URLS var (#3625) --- kubernetes/helm_charts/sunbird-RC/certificatesign/values.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kubernetes/helm_charts/sunbird-RC/certificatesign/values.j2 b/kubernetes/helm_charts/sunbird-RC/certificatesign/values.j2 index 62c9114a76..58d931b861 100644 --- a/kubernetes/helm_charts/sunbird-RC/certificatesign/values.j2 +++ b/kubernetes/helm_charts/sunbird-RC/certificatesign/values.j2 @@ -35,4 +35,4 @@ certificatesignenv: CERTIFICATE_PRIVATE_KEY: |- {{ CERTIFICATE_PRIVATE_KEY | default("''") | indent(width=4) }} SIGNING_KEY_TYPE: "{{ SIGNING_KEY_TYPE|default('RSA')}}" - CACHE_CONTEXT_URLS: "{{ cache_context_urls | default(upstream_url + '/schema/v1_context.json,upstream_url + '/schema/sunbird_context.json,upstream_url + '/schema/credential_template.json')}}" \ No newline at end of file + CACHE_CONTEXT_URLS: "{{ cache_context_urls | default(upstream_url + '/schema/v1_context.json,' + upstream_url + '/schema/sunbird_context.json,' + upstream_url + '/schema/credential_template.json') }}" \ No newline at end of file From b8b4fc4546effee1f6acef19ae78cc6b75fb36b8 Mon Sep 17 00:00:00 2001 From: SadanandGowda Date: Tue, 29 Nov 2022 18:29:06 +0530 Subject: [PATCH 112/616] csp migration variables update --- .../core/analytics/templates/deployment.yaml | 16 ++++++++-------- kubernetes/helm_charts/core/analytics/values.j2 | 8 ++++---- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/kubernetes/helm_charts/core/analytics/templates/deployment.yaml b/kubernetes/helm_charts/core/analytics/templates/deployment.yaml index 0926360f76..57198cb77b 100644 --- a/kubernetes/helm_charts/core/analytics/templates/deployment.yaml +++ b/kubernetes/helm_charts/core/analytics/templates/deployment.yaml @@ -35,14 +35,14 @@ spec: value: {{ .Values.env.min_heap | quote }} - name: MAX_HEAP value: {{ .Values.env.max_heap | quote }} - - name: azure_storage_secret - value: {{ .Values.env.azure_private_account_secret | quote }} - - name: azure_storage_key - value: {{ .Values.env.azure_private_account_name | quote }} - - name: public_azure_storage_secret - value: {{ .Values.env.azure_public_account_secret | quote }} - - name: public_azure_storage_key - value: {{ .Values.env.azure_public_account_name | quote }} + - name: cloud_storage_secret + value: {{ .Values.env.cloud_private_account_secret | quote }} + - name: cloud_storage_key + value: {{ .Values.env.cloud_private_account_name | quote }} + - name: public_cloud_storage_secret + value: {{ .Values.env.cloud_public_account_secret | quote }} + - name: public_cloud_storage_key + value: {{ .Values.env.cloud_public_account_name | quote }} - name: _JAVA_OPTIONS value: -Dlog4j2.formatMsgNoLookups=true envFrom: diff --git a/kubernetes/helm_charts/core/analytics/values.j2 b/kubernetes/helm_charts/core/analytics/values.j2 index 354dcab3d3..f86925ad5c 100644 --- a/kubernetes/helm_charts/core/analytics/values.j2 +++ b/kubernetes/helm_charts/core/analytics/values.j2 @@ -8,10 +8,10 @@ env: javaoptions: {{analytics_java_mem_limit|default('-Xmx600m')}} min_heap: {{analytics_min_heap_limit|default('-Xms1g')}} max_heap: {{analytics_max_heap_limit|default('-Xmx2g')}} - azure_private_account_secret: {{ sunbird_private_storage_account_key }} - azure_private_account_name: {{ sunbird_private_storage_account_name }} - azure_public_account_secret: {{ sunbird_public_storage_account_key }} - azure_public_account_name: {{ sunbird_public_storage_account_name }} + cloud_private_account_secret: {{ sunbird_private_storage_account_key }} + cloud_private_account_name: {{ sunbird_private_storage_account_name }} + cloud_public_account_secret: {{ sunbird_public_storage_account_key }} + cloud_public_account_name: {{ sunbird_public_storage_account_name }} replicaCount: {{analytics_replicacount|default(1)}} repository: {{analytics_repository|default('sunbird-analytics-service')}} From daef17772d996e7da7f90b2b1b925b17dec876cf Mon Sep 17 00:00:00 2001 From: saiakhil46 Date: Wed, 30 Nov 2022 17:37:18 +0530 Subject: [PATCH 113/616] added config.json file in certificatesign and updated templates --- kubernetes/ansible/roles/helm-deploy/tasks/main.yml | 8 ++++++++ .../sunbird-RC/certificatesign/configs/config.json | 11 +++++++++++ .../certificatesign/templates/configmap.yaml | 13 +++++++++++++ .../certificatesign/templates/deployment.yaml | 7 +++++++ 4 files changed, 39 insertions(+) create mode 100644 kubernetes/helm_charts/sunbird-RC/certificatesign/configs/config.json diff --git a/kubernetes/ansible/roles/helm-deploy/tasks/main.yml b/kubernetes/ansible/roles/helm-deploy/tasks/main.yml index a57c847b97..8f4881089a 100644 --- a/kubernetes/ansible/roles/helm-deploy/tasks/main.yml +++ b/kubernetes/ansible/roles/helm-deploy/tasks/main.yml @@ -22,6 +22,14 @@ - "{{ chart_path }}/schemas/*.json" when: release_name == "registry" +- name: template config json + template: + src: "{{ item }}" + dest: "{{ item }}" + with_fileglob: + - "{{ chart_path }}/configs/*.json" + when: release_name == "certificatesign" + - name: Load role to decrypt private keys, copy to private keys helm chart include_role: name: mount-keys diff --git a/kubernetes/helm_charts/sunbird-RC/certificatesign/configs/config.json b/kubernetes/helm_charts/sunbird-RC/certificatesign/configs/config.json new file mode 100644 index 0000000000..0909ace256 --- /dev/null +++ b/kubernetes/helm_charts/sunbird-RC/certificatesign/configs/config.json @@ -0,0 +1,11 @@ +{ + "issuers": { + "default": { + "publicKey": {{ CERTIFICATE_PUBLIC_KEY | default("") }}, + "privateKey": {{ CERTIFICATE_PRIVATE_KEY | default("") }}, + "signatureType": "RSA", + "verificationMethod": "did:india", + "$comment": "The above are test keys and it needs to be replaced before going to production" + } + } +} \ No newline at end of file diff --git a/kubernetes/helm_charts/sunbird-RC/certificatesign/templates/configmap.yaml b/kubernetes/helm_charts/sunbird-RC/certificatesign/templates/configmap.yaml index 05b928173d..587c7e9dfb 100644 --- a/kubernetes/helm_charts/sunbird-RC/certificatesign/templates/configmap.yaml +++ b/kubernetes/helm_charts/sunbird-RC/certificatesign/templates/configmap.yaml @@ -9,3 +9,16 @@ metadata: creationTimestamp: null name: {{ .Chart.Name }}-config namespace: {{ .Values.namespace }} + +--- + +{{- $configs := .Files.Glob "configs/*" }} +{{ if $configs }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Chart.Name }}-conf + namespace: {{ .Values.namespace }} +data: +{{ (.Files.Glob "configs/*").AsConfig | indent 2 }} +{{ end }} \ No newline at end of file diff --git a/kubernetes/helm_charts/sunbird-RC/certificatesign/templates/deployment.yaml b/kubernetes/helm_charts/sunbird-RC/certificatesign/templates/deployment.yaml index 7fa927020a..c271409ef9 100644 --- a/kubernetes/helm_charts/sunbird-RC/certificatesign/templates/deployment.yaml +++ b/kubernetes/helm_charts/sunbird-RC/certificatesign/templates/deployment.yaml @@ -34,6 +34,13 @@ spec: {{ toYaml .Values.resources | indent 10 }} ports: - containerPort: {{ .Values.network.port }} + volumeMounts: + - name: {{ .Chart.Name }}-conf + mountPath: /etc/signer + volumes: + - name: {{ .Chart.Name }}-conf + configMap: + name: {{ .Chart.Name }}-conf --- From 6f9ffe1985f150b2ca095442577fa01ed5572d4e Mon Sep 17 00:00:00 2001 From: saiakhil46 Date: Thu, 1 Dec 2022 11:36:12 +0530 Subject: [PATCH 114/616] updated certificatesign helm_chart --- .../sunbird-RC/certificatesign/configs/config.json | 4 ++-- kubernetes/helm_charts/sunbird-RC/certificatesign/values.j2 | 5 ++--- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/kubernetes/helm_charts/sunbird-RC/certificatesign/configs/config.json b/kubernetes/helm_charts/sunbird-RC/certificatesign/configs/config.json index 0909ace256..98052b982e 100644 --- a/kubernetes/helm_charts/sunbird-RC/certificatesign/configs/config.json +++ b/kubernetes/helm_charts/sunbird-RC/certificatesign/configs/config.json @@ -1,8 +1,8 @@ { "issuers": { "default": { - "publicKey": {{ CERTIFICATE_PUBLIC_KEY | default("") }}, - "privateKey": {{ CERTIFICATE_PRIVATE_KEY | default("") }}, + "publicKey": "{{ CERTIFICATESIGN_PUBLIC_KEY | default('') }}", + "privateKey": "{{ CERTIFICATESIGN_PRIVATE_KEY | default('') }}", "signatureType": "RSA", "verificationMethod": "did:india", "$comment": "The above are test keys and it needs to be replaced before going to production" diff --git a/kubernetes/helm_charts/sunbird-RC/certificatesign/values.j2 b/kubernetes/helm_charts/sunbird-RC/certificatesign/values.j2 index 58d931b861..9e6beb68d4 100644 --- a/kubernetes/helm_charts/sunbird-RC/certificatesign/values.j2 +++ b/kubernetes/helm_charts/sunbird-RC/certificatesign/values.j2 @@ -31,8 +31,7 @@ certificatesignenv: PORT: "8079" QR_TYPE: {{ QR_TYPE|default('URL') }} CERTIFICATE_DOMAIN_URL: "https://{{domain_name}}" - CERTIFICATE_PUBLIC_KEY: {{CERTIFICATE_PUBLIC_KEY | default("''")}} - CERTIFICATE_PRIVATE_KEY: |- - {{ CERTIFICATE_PRIVATE_KEY | default("''") | indent(width=4) }} + CERTIFICATE_PUBLIC_KEY: "{{ CERTIFICATESIGN_PUBLIC_KEY | default('') }}" + CERTIFICATE_PRIVATE_KEY: "{{ CERTIFICATESIGN_PRIVATE_KEY | default('') }}" SIGNING_KEY_TYPE: "{{ SIGNING_KEY_TYPE|default('RSA')}}" CACHE_CONTEXT_URLS: "{{ cache_context_urls | default(upstream_url + '/schema/v1_context.json,' + upstream_url + '/schema/sunbird_context.json,' + upstream_url + '/schema/credential_template.json') }}" \ No newline at end of file From 53268216c638206567129ea0269af973139898fc Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Thu, 1 Dec 2022 14:00:53 +0530 Subject: [PATCH 115/616] fix: adding verbosity for desktop deploy (#3633) --- pipelines/offlineinstaller/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipelines/offlineinstaller/Jenkinsfile b/pipelines/offlineinstaller/Jenkinsfile index c97c01a9bd..a4e6a8f610 100644 --- a/pipelines/offlineinstaller/Jenkinsfile +++ b/pipelines/offlineinstaller/Jenkinsfile @@ -31,7 +31,7 @@ node() { } stage('Install the offline desktop Application') { ansiblePlaybook = "${currentWs}/ansible/offline-installer.yml --vault-password-file /var/lib/jenkins/secrets/vault-pass" - ansibleExtraArgs = "--extra-vars \"offline_repo_location=$currentWs offline_installer_type=${offline_installer_type}\"" + ansibleExtraArgs = "--extra-vars \"offline_repo_location=$currentWs offline_installer_type=${offline_installer_type}\" -v" values.put('ansiblePlaybook', ansiblePlaybook) values.put('ansibleExtraArgs', ansibleExtraArgs) println values From 8648a6183dc08554ee3353f3efc1afa908ed8a8b Mon Sep 17 00:00:00 2001 From: Akash Shah Date: Fri, 2 Dec 2022 14:39:37 +0530 Subject: [PATCH 116/616] Set authentication_enabled as false by default Add authentication_enabled as a new env key for the registry service and set it false by default. --- kubernetes/helm_charts/sunbird-RC/registry/values.j2 | 1 + 1 file changed, 1 insertion(+) diff --git a/kubernetes/helm_charts/sunbird-RC/registry/values.j2 b/kubernetes/helm_charts/sunbird-RC/registry/values.j2 index e7e6e6f31c..09181622b7 100644 --- a/kubernetes/helm_charts/sunbird-RC/registry/values.j2 +++ b/kubernetes/helm_charts/sunbird-RC/registry/values.j2 @@ -44,6 +44,7 @@ rccoreenv: connectionInfo_maxPoolSize: {{ registry_connectionInfo_maxPoolSize|default('200')}} auditTaskExecutor_queueCapacity: {{ registry_auditTaskExecutor_queueCapacity|default('100')}} taskExecutor_index_queueCapacity: {{ registry_taskExecutor_index_queueCapacity|default('100')}} + authentication_enabled: {{ registry_authentication_enabled|default('false')}} {# The below should get enabled once the service has probes implemented #} {# {{ registry_liveness_readiness | to_nice_yaml }} #} From 4b370d0f9a22cb5152d9dd15cdff7c74e710f584 Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Sat, 3 Dec 2022 22:21:04 +0530 Subject: [PATCH 117/616] Release 5.1.0 (#3637) * Update deploy plugins for GCP * Fix looping issue --- ansible/deploy-plugins.yml | 30 +++++++++++++++++-- .../tasks/delete-batch-no-poll.yml | 6 ++++ .../tasks/upload-batch-no-poll.yml | 5 ++++ 3 files changed, 39 insertions(+), 2 deletions(-) create mode 100644 ansible/roles/gcp-cloud-storage/tasks/delete-batch-no-poll.yml create mode 100644 ansible/roles/gcp-cloud-storage/tasks/upload-batch-no-poll.yml diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index 8da2bd445e..6f5460809f 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -86,7 +86,8 @@ tags: - plugins when: cloud_service_provider == "azure" - + +### GCP tasks #### - name: this block consists of tasks related to gcloud storage block: - name: set common gcloud variables @@ -135,6 +136,31 @@ local_file_or_folder_path: "{{ source_file_name }}" tags: - preview + + - block: + - name: Authenticate to gcloud + include_role: + name: gcp-cloud-storage + tasks_from: gcloud-auth.yml + + - name: delete and re-upload plugins + include_role: + name: gcp-cloud-storage + tasks_from: "{{ item[0] }}" + vars: + file_delete_pattern: "content-plugins/{{ item[1] }}/*" + dest_folder_path: "content-plugins/{{ item[1] }}" + local_file_or_folder_path: "{{ source_folder }}/{{ item[1] }}" + with_nested: + - ['delete-batch-no-poll.yml', 'upload-batch-no-poll.yml'] + - "{{ lookup('file', plugins_to_delete_and_upload).split('\n') }}" + + - name: Revoke gcloud access + include_role: + name: gcp-cloud-storage + tasks_from: gcloud-revoke.yml + tags: + - plugins when: cloud_service_provider == "gcloud" ################################### AWS tasks ######################### @@ -200,4 +226,4 @@ AWS_SECRET_ACCESS_KEY: "{{ aws_secret_access_key }}" tags: - plugins - when: cloud_service_provider == "aws" \ No newline at end of file + when: cloud_service_provider == "aws" diff --git a/ansible/roles/gcp-cloud-storage/tasks/delete-batch-no-poll.yml b/ansible/roles/gcp-cloud-storage/tasks/delete-batch-no-poll.yml new file mode 100644 index 0000000000..ca02b8a064 --- /dev/null +++ b/ansible/roles/gcp-cloud-storage/tasks/delete-batch-no-poll.yml @@ -0,0 +1,6 @@ +--- +- name: Delete folder recursively in gcp storage + shell: gsutil rm -r "gs://{{ gcp_bucket_name }}/{{ file_delete_pattern }}" + async: 1800 + poll: 0 + diff --git a/ansible/roles/gcp-cloud-storage/tasks/upload-batch-no-poll.yml b/ansible/roles/gcp-cloud-storage/tasks/upload-batch-no-poll.yml new file mode 100644 index 0000000000..0d8755ab26 --- /dev/null +++ b/ansible/roles/gcp-cloud-storage/tasks/upload-batch-no-poll.yml @@ -0,0 +1,5 @@ +--- +- name: Upload files from a local directory gcp storage + shell: gsutil -m cp -r "{{ local_file_or_folder_path }}" "gs://{{ gcp_bucket_name }}/{{ dest_folder_name }}/{{ dest_folder_path }}" + async: 1800 + poll: 0 From b8bffc23589f6715c26b475a602ca39bf51b481b Mon Sep 17 00:00:00 2001 From: Reshmi V Nair <54312456+reshmi-nair@users.noreply.github.com> Date: Tue, 6 Dec 2022 10:13:47 +0530 Subject: [PATCH 118/616] LR-262 CSP related variable - Cloud storage base path added (#3641) --- ansible/inventory/env/group_vars/all.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index 1aaa166d5c..62cac6441d 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -132,6 +132,8 @@ cassandra_backup_azure_container_name: cassandra-backup cassandra_backup_dir: /data/cassandra/backup ### Release 5.0.0 ### cassandra_multi_dc_enabled: false +# Release-5.0.1 +cloud_storage_base_url: "{{cloud_storage_base_url}}" keycloak_realm: sunbird sunbird_content_player_url: "http://kong:8000/" From 4e4cd89769a48b356e9fcd066854c6e7d2ffce01 Mon Sep 17 00:00:00 2001 From: Akash Shah Date: Tue, 6 Dec 2022 11:59:53 +0530 Subject: [PATCH 119/616] Add environment keys to enable async certificate Add environment keys to enable async certificate creation --- kubernetes/helm_charts/sunbird-RC/registry/values.j2 | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/kubernetes/helm_charts/sunbird-RC/registry/values.j2 b/kubernetes/helm_charts/sunbird-RC/registry/values.j2 index 09181622b7..7668715b20 100644 --- a/kubernetes/helm_charts/sunbird-RC/registry/values.j2 +++ b/kubernetes/helm_charts/sunbird-RC/registry/values.j2 @@ -45,6 +45,16 @@ rccoreenv: auditTaskExecutor_queueCapacity: {{ registry_auditTaskExecutor_queueCapacity|default('100')}} taskExecutor_index_queueCapacity: {{ registry_taskExecutor_index_queueCapacity|default('100')}} authentication_enabled: {{ registry_authentication_enabled|default('false')}} + async_enabled: {{ registry_async_enabled|default('true')}} + webhook_enabled: {{ registry_webhook_enabled|default('true')}} + ZOOKEEPER_CLIENT_PORT: {{ registry_zookeeper_client_port|default('2181')}} + ZOOKEEPER_TICK_TIME: {{ registry_zookeeper_tick_time|default('2000')}} + KAFKA_BROKER_ID: {{ registry_kafka_broker_id|default('1')}} + KAFKA_ZOOKEEPER_CONNECT: "{{groups['processing-cluster-zookeepers']|join(':2181,')}}:2181" + KAFKA_ADVERTISED_LISTENERS: "{{groups['processing-cluster-kafka']|join(':9092,')}}:9092" + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: {{ registry_listener_security_protocol_map|default('INTERNAL:PLAINTEXT,OUTSIDE:PLAINTEXT')}} + KAFKA_INTER_BROKER_LISTENER_NAME: {{ registry_inter_broker_listener_name|default('INTERNAL')}} + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: {{ registry_offsets_topic_replication_factor|default('1')}} {# The below should get enabled once the service has probes implemented #} {# {{ registry_liveness_readiness | to_nice_yaml }} #} From 5dc030bf3356a7bb06c7a356506fdafe3adc29bd Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Tue, 6 Dec 2022 14:25:17 +0530 Subject: [PATCH 120/616] Update config.j2 (#3644) --- .../ml-analytics-service/defaults/main.yml | 44 ++++++------- .../roles/ml-analytics-service/tasks/main.yml | 2 +- .../ml-analytics-service/templates/config.j2 | 61 ++++++++++--------- 3 files changed, 52 insertions(+), 55 deletions(-) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index 99e7526e4b..da571d3bed 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -44,10 +44,8 @@ ml_analytics_kafka_survey_druid_topic_name: "{{ env_name }}.ml.survey.druid" ml_analytics_observation_log_folder_path: "{{ WORKDIR }}/logs/observation" ml_analytics_project_log_folder_path: "{{ WORKDIR }}/logs/project" ml_analytics_survey_log_folder_path: "{{ WORKDIR }}/logs/survey" -ml_analytics_azure_account_name: "{{ cloud_private_storage_accountname }}" -ml_analytics_azure_container_name: "{{ cloud_storage_telemetry_bucketname }}" ml_analytics_observation_azure_blob_path: "observation/status/" -ml_analytics_project_azure_blob_path: "projects/" +ml_analytics_project_cloud_blob_path: "projects/" ml_analytics_redis_host: "{{ml_redis_host | default(groups['dp-redis'][0])}}" ml_analytics_redis_port: "{{ ml_redis_device_port | default('6379') }}" ml_analytics_redis_db_name: "12" @@ -57,62 +55,60 @@ ml_analytics_api_authorization_key: "{{ml_api_auth_token | default('sunbird_api_ ml_analytics_api_access_token: "{{ml_api_access_token | default('ml_core_internal_access_token')}}" ml_analytics_druid_observation_status_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type": "azure","uris": ["azure://telemetry-data-store/observation/status/sl_observation_status.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"sl-observation-status","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"updatedAt","format":"auto"},"dimensionsSpec":{"dimensions":["status","entity_externalId","entity_id","entity_type","solution_id","solution_externalId","submission_id","entity_name","solution_name","role_title","school_name","school_code","school_externalId","state_name","state_code","state_externalId","district_name","district_code","district_externalId","block_name","block_code","block_externalId","cluster_name","cluster_code","cluster_externalId","completedDate","channel","parent_channel","program_id","program_externalId","program_name","app_name","user_id","private_program","solution_type","organisation_name","ecm_marked_na","board_name","updatedAt","organisation_id","user_type","observed_school_name","observed_school_id","observed_school_code","observed_state_name","observed_state_id","observed_state_code","observed_district_name","observed_district_id","observed_district_code","observed_block_name","observed_block_id","observed_block_code","observed_cluster_name","observed_cluster_id","observed_cluster_code"]},"metricsSpec":[]}}}' ml_analytics_druid_project_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type": "azure","uris": ["azure://telemetry-data-store/projects/sl_projects.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"sl-project","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"project_updated_date","format":"auto"},"dimensionsSpec":{"dimensions":[]},"metricsSpec":[]}}}' -ml_analytics_azure_sas_token: "{{ cloud_private_storage_secret }}" ml_analytics_druid_distinctCnt_obs_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/distinctCount/ml_observation_distinctCount_status.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-obs-status","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"solution_name"},{"type":"string","name":"solution_id"},{"type":"string","name":"status"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"parent_channel"},{"type":"string","name":"solution_type"},{"type":"string","name":"private_program"},{"type":"long","name":"unique_submissions"},{"type":"long","name":"unique_entities"},{"type":"long","name":"unique_users"},{"type":"long","name":"unique_solution"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' ml_analytics_druid_distinctCnt_obs_domain_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/distinctCount_domain/ml_observation_distinctCount_domain.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-obs-domain","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"solution_name"},{"type":"string","name":"solution_id"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"parent_channel"},{"type":"string","name":"solution_type"},{"type":"string","name":"private_program"},{"type":"string","name":"domain_name"},{"type":"string","name":"domain_externalId"},{"type":"string","name":"domain_level"},{"type":"long","name":"unique_submissions"},{"type":"long","name":"unique_entities"},{"type":"long","name":"unique_users"},{"type":"long","name":"unique_solution"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' ml_analytics_druid_distinctCnt_obs_domain_criteria_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/distinctCount_domain_criteria/ml_observation_distinctCount_domain_criteria.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-obs-domain-criteria","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"solution_name"},{"type":"string","name":"solution_id"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"parent_channel"},{"type":"string","name":"solution_type"},{"type":"string","name":"private_program"},{"type":"string","name":"domain_name"},{"type":"string","name":"domain_externalId"},{"type":"string","name":"domain_level"},{"type":"string","name":"criteria_name"},{"type":"string","name":"criteria_score"},{"type":"string","name":"criteria_id"},{"type":"long","name":"unique_submissions"},{"type":"long","name":"unique_entities"},{"type":"long","name":"unique_users"},{"type":"long","name":"unique_solution"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' ml_analytics_druid_distinctCnt_projects_status_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/projects/distinctCount/ml_projects_distinctCount.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-project-status","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"project_title"},{"type":"string","name":"solution_id"},{"type":"string","name":"status_of_project"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"private_program"},{"type":"string","name":"project_created_type"},{"type":"string","name":"parent_channel"},{"type":"long","name":"unique_projects"},{"type":"long","name":"unique_users"},{"type":"long","name":"unique_solution"},{"type":"long","name":"no_of_imp_with_evidence"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' -ml_analytics_obs_distinctCnt_azure_blob_path: "observation/distinctCount/" -ml_analytics_obs_distinctCnt_domain_azure_blob_path: "observation/distinctCount_domain/" -ml_analytics_obs_distinctCnt_domain_criteria_azure_blob_path: "observation/distinctCount_domain_criteria/" -ml_analytics_projects_distinctCnt_azure_blob_path: "projects/distinctCount/" +ml_analytics_obs_distinctCnt_cloud_blob_path: "observation/distinctCount/" +ml_analytics_obs_distinctCnt_domain_cloud_blob_path: "observation/distinctCount_domain/" +ml_analytics_obs_distinctCnt_domain_criteria_cloud_blob_path: "observation/distinctCount_domain_criteria/" +ml_analytics_projects_distinctCnt_cloud_blob_path: "projects/distinctCount/" ml_analytics_obs_distinctCnt_output_dir: "{{ WORKDIR }}/source/observations/distinctCount/output" ml_analytics_obs_distinctCnt_domain_output_dir: "{{ WORKDIR }}/source/observations/distinctCount_domain/output" ml_analytics_obs_distinctCnt_domain_criteria_output_dir: "{{ WORKDIR }}/source/observations/distinctCount_domain_criteria/output" ml_analytics_projects_distinctCnt_output_dir: "{{ WORKDIR }}/source/projects/distinctCount/output" -ml_analytics_survey_rollup_azure_blob_path: "survey/rollup/" +ml_analytics_survey_rollup_cloud_blob_path: "survey/rollup/" ml_analytics_druid_survey_rollup_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/survey/rollup/sl_survey_rollup.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-survey-status-rollup","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":true,"segmentGranularity":"DAY"},"timestampSpec":{"column":"updatedAt","format":"iso"},"dimensionsSpec":{"dimensions":["survey_submission_id", "submission_status", "user_id", "user_sub_type", "user_type", "state_externalId", "block_externalId", "district_externalId", "cluster_externalId", "school_externalId", "state_name", "block_name", "district_name", "cluster_name", "school_name", "board_name", "organisation_id", "organisation_name", "program_externalId", "program_id", "program_name", "survey_name", "survey_id", "survey_externalId", "created_date", "submission_date", "updatedAt", "parent_channel",{"type":"long","name":"status_code"}, "solution_name", "solution_id"]},"metricsSpec":[{"name":"count","type":"count"},{"name":"sum___v","type":"longSum","fieldName":"__v"},{"name":"sum_status_code","type":"longMax","fieldName":"status_code"},{"type":"HLLSketchBuild","name":"count_of_user_id","fieldName":"user_id"},{"type":"HLLSketchBuild","name":"count_of_survey_submission_id","fieldName":"survey_submission_id"},{"type":"HLLSketchBuild","name":"count_of_solution_id","fieldName":"solution_id"},{"type":"HLLSketchBuild","name":"count_of_program_id","fieldName":"program_id"}]}}}' ml_analytics_survey_rollup_output_dir: "/opt/sparkjobs/source/survey/output" -ml_analytics_project_rollup_azure_blob_path: "projects/rollup" -ml_analytics_observation_rollup_azure_blob_path: "observation/rollup" +ml_analytics_project_rollup_cloud_blob_path: "projects/rollup" +ml_analytics_observation_rollup_cloud_blob_path: "observation/rollup" ml_analytics_project_rollup_output_dir: "/opt/sparkjobs/source/projects/output_rollup" ml_analytics_observation_status_rollup_output_dir: "/opt/sparkjobs/source/observations/output_rollup" ml_analytics_druid_project_rollup_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/projects/rollup/projects_rollup.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-project-status-rollup","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"project_updated_date","format":"iso"},"dimensionsSpec":{"dimensions":["project_title","project_goal","area_of_improvement","status_of_project","tasks_name","tasks_status","designation","task_evidence_status","project_id","task_id","project_created_type","parent_channel","program_id","program_name","project_updated_date","createdBy","program_externalId","private_program","task_deleted_flag","project_terms_and_condition","state_externalId","block_externalId","district_externalId","cluster_externalId","school_externalId","state_name","block_name","district_name","cluster_name","school_name","board_name","organisation_name","solution_id","organisation_id",{"name":"status_code","type":"long"}]},"metricsSpec":[{"name":"count","type":"count"},{"name":"sum___v","type":"longSum","fieldName":"__v"},{"name":"sum_status_code","type":"longMax","fieldName":"status_code"},{"type":"HLLSketchBuild","name":"count_of_createBy","fieldName":"createdBy"},{"type":"HLLSketchBuild","name":"count_of_project_id","fieldName":"project_id"},{"type":"HLLSketchBuild","name":"count_of_solution_id","fieldName":"solution_id"},{"type":"HLLSketchBuild","name":"count_of_program_id","fieldName":"program_id"}]}}}' ml_analytics_druid_observation_status_rollup_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/rollup/observation_status_rollup.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-observation-status-rollup","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"updatedAt","format":"iso"},"dimensionsSpec":{"dimensions":["status","user_id","solution_id","submission_id","entity_name","completedDate","program_id","private_program","solution_type","updatedAt","role_title","solution_name","program_name","channel","parent_channel","block_name","district_name","school_name","cluster_name","state_name","organisation_name","board_name","district_externalId","state_externalId","block_externalId","cluster_externalId","school_externalId","organisation_id",{"type":"long","name":"status_code"}]},"metricsSpec":[{"type":"count","name":"count"},{"type":"longSum","name":"sum___v","fieldName":"__v","expression":null},{"type":"HLLSketchBuild","name":"count_distinct_solution","fieldName":"solution_id","lgK":12,"tgtHllType":"HLL_4","round":false},{"type":"HLLSketchBuild","name":"count_distinct_submission_id","fieldName":"submission_id","lgK":12,"tgtHllType":"HLL_4","round":false},{"type":"HLLSketchBuild","name":"count_distinct_user_id","fieldName":"user_id","lgK":12,"tgtHllType":"HLL_4","round":false}]}}}' ml_analytics_druid_rollup_url: "{{groups['druid'][0]}}:8081" -ml_analytics_AWS_access_key: "{{ cloud_private_storage_accountname }}" -ml_analytics_AWS_secret_access_key: "{{ cloud_private_storage_secret }}" -ml_analytics_AWS_region_name: "{{ cloud_private_storage_region }}" -ml_analytics_AWS_bucket_name: "{{ cloud_storage_telemetry_bucketname }}" ml_analytics_druid_distinctCnt_prglevel_projects_status_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/projects/distinctCountPrglevel/ml_projects_distinctCount_prgmlevel.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-project-programLevel-status","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"status_of_project"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"private_program"},{"type":"string","name":"project_created_type"},{"type":"string","name":"parent_channel"},{"type":"long","name":"unique_projects"},{"type":"long","name":"unique_users"},{"type":"long","name":"no_of_imp_with_evidence"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' ml_analytics_projects_distinctCnt_prglevel_output_dir: "{{ WORKDIR }}/source/projects/distinctCountPrglevel/output" -ml_analytics_projects_distinctCnt_prglevel_azure_blob_path: "projects/distinctCountPrglevel/" +ml_analytics_projects_distinctCnt_prglevel_cloud_blob_path: "projects/distinctCountPrglevel/" ml_analytics_survey_status_output_dir : "{{ WORKDIR }}/source/survey/status/output" -ml_analytics_survey_azure_blob_path : "survey/status/" +ml_analytics_survey_cloud_blob_path : "survey/status/" ml_analytics_druid_survey_status_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/survey/status/sl_survey_status.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-survey-status","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":true,"segmentGranularity":"DAY"},"timestampSpec":{"column":"updatedAt","format":"iso"},"dimensionsSpec":{"dimensions":["survey_submission_id", "submission_status", "user_id", "user_sub_type", "user_type", "state_externalId", "block_externalId", "district_externalId", "cluster_externalId", "school_externalId", "state_name", "block_name", "district_name", "cluster_name", "school_name", "board_name", "organisation_id", "organisation_name", "program_externalId", "program_id", "program_name", "survey_name", "survey_id", "survey_externalId", "created_date", "submission_date", "updatedAt", "parent_channel", "solution_name", "solution_id","private_program"]},"metricsSpec":[]}}}' ml_analytics_slack_token: "{{ ml_slack_token | default('') }}" ml_analytics_channel_name: "{{ ml_slack_channel | default('') }}" -ml_analytics_program_dashboard_azure_blob_path: "{{ ml_program_blob_path | default('') }}" +ml_analytics_program_dashboard_cloud_blob_path: "{{ ml_program_blob_path | default('') }}" ml_druid_query_data: "{{ ml_druid_query | default('') }}" ml_program_dashboard_data: "{{ ml_program_data | default('') }}" ml_analytics_druid_query_url: "{{groups['druid'][0]}}:8082" ml_analytics_druid_observation_query_spec: '{"queryType":"scan","dataSource":"sl-observation","resultFormat":"list","columns":["completedDate","createdAt","createdBy","criteriaExternalId","criteriaId","criteriaName","entityType","entityTypeId","observationId","observationName","observationSubmissionId","questionAnswer","questionECM","questionExternalId","questionId","questionName","questionResponseLabel","questionResponseType","solutionExternalId","solutionId","solutionName","updatedAt","instanceParentId","instanceId","instanceParentResponsetype","instanceParentQuestion","questionSequenceByEcm","maxScore","minScore","percentageScore","pointsBasedScoreInParent","totalScore","scoreAchieved","totalpercentage","instanceParentExternalId","instanceParentEcmSequence","remarks","total_evidences","evidence_count","school","block","district","cluster","state","schoolName","blockName","districtName","clusterName","stateName","schoolExternalId","blockExternalId","districtExternalId","clusterExternalId","stateExternalId","schoolTypes","administrationTypes","instanceParentCriteriaId","instanceParentCriteriaExternalId","instanceParentCriteriaName","role_title","location_validated_with_geotag","distance_in_meters","entity","entityExternalId","entityName","isAPrivateProgram","programId","programName","programExternalId","questionResponseLabel_number","criteriaLevel","criteriaScore","submissionNumber","submissionTitle","channel","parent_channel","user_districtName","user_blockName","user_clusterName","appName","evidences","user_stateName","domainName","domainExternalId","childName","childType","childExternalid","level","criteriaDescription","programDescription","solutionDescription","label","imp_project_id","imp_project_title","imp_project_goal","imp_project_externalId","ancestorName","scoringSystem","domainLevel","domainScore","criteriaLevelReport","user_schoolName","user_schoolId","user_schoolUDISE_code","solution_type","organisation_name","user_boardName","district_externalId","state_externalId","block_externalId","cluster_externalId","organisation_id","user_type"],"intervals":["1901-01-01T00:00:00+00:00/2101-01-01T00:00:00+00:00"]}' ml_analytics_druid_observation_batch_ingestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type": "azure","uris": ["azure://telemetry-data-store/observation/batchDeletion/druidData.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"sl-observation","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"completedDate","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"completedDate"},{"type":"string","name":"createdAt"},{"type":"string","name":"createdBy"},{"type":"string","name":"criteriaExternalId"},{"type":"string","name":"criteriaId"},{"type":"string","name":"criteriaName"},{"type":"string","name":"entityType"},{"type":"string","name":"entityTypeId"},{"type":"string","name":"observationId"},{"type":"string","name":"observationName"},{"type":"string","name":"observationSubmissionId"},{"type":"string","name":"questionAnswer"},{"type":"string","name":"questionECM"},{"type":"string","name":"questionExternalId"},{"type":"string","name":"questionId"},{"type":"string","name":"questionName"},{"type":"string","name":"questionResponseLabel"},{"type":"string","name":"questionResponseType"},{"type":"string","name":"solutionExternalId"},{"type":"string","name":"solutionId"},{"type":"string","name":"solutionName"},{"type":"string","name":"updatedAt"},{"type":"string","name":"instanceParentId"},{"type":"string","name":"instanceId"},{"type":"string","name":"instanceParentResponsetype"},{"type":"string","name":"instanceParentQuestion"},{"type":"string","name":"questionSequenceByEcm"},{"type":"string","name":"maxScore"},{"type":"string","name":"minScore"},{"type":"string","name":"percentageScore"},{"type":"string","name":"pointsBasedScoreInParent"},{"type":"string","name":"totalScore"},{"type":"string","name":"scoreAchieved"},{"type":"string","name":"totalpercentage"},{"type":"string","name":"instanceParentExternalId"},{"type":"string","name":"instanceParentEcmSequence"},{"type":"string","name":"remarks"},{"type":"string","name":"total_evidences"},{"type":"string","name":"evidence_count"},{"type":"string","name":"school"},{"type":"string","name":"block"},{"type":"string","name":"district"},{"type":"string","name":"cluster"},{"type":"string","name":"state"},{"type":"string","name":"schoolName"},{"type":"string","name":"blockName"},{"type":"string","name":"districtName"},{"type":"string","name":"clusterName"},{"type":"string","name":"stateName"},{"type":"string","name":"schoolExternalId"},{"type":"string","name":"blockExternalId"},{"type":"string","name":"districtExternalId"},{"type":"string","name":"clusterExternalId"},{"type":"string","name":"stateExternalId"},{"type":"string","name":"schoolTypes"},{"type":"string","name":"administrationTypes"},{"type":"string","name":"instanceParentCriteriaId"},{"type":"string","name":"instanceParentCriteriaExternalId"},{"type":"string","name":"instanceParentCriteriaName"},{"type":"string","name":"role_title"},{"type":"string","name":"location_validated_with_geotag"},{"type":"string","name":"distance_in_meters"},{"type":"string","name":"entity"},{"type":"string","name":"entityExternalId"},{"type":"string","name":"entityName"},{"type":"string","name":"isAPrivateProgram"},{"type":"string","name":"programId"},{"type":"string","name":"programName"},{"type":"string","name":"programExternalId"},{"name":"questionResponseLabel_number","type":"float"},{"type":"string","name":"criteriaLevel"},{"type":"string","name":"criteriaScore"},{"type":"string","name":"submissionNumber"},{"type":"string","name":"submissionTitle"},{"type":"string","name":"channel"},{"type":"string","name":"parent_channel"},{"type":"string","name":"user_districtName"},{"type":"string","name":"user_blockName"},{"type":"string","name":"user_clusterName"},{"type":"string","name":"appName"},{"type":"string","name":"evidences"},{"type":"string","name":"user_stateName"},{"type":"string","name":"domainName"},{"type":"string","name":"domainExternalId"},{"type":"string","name":"childName"},{"type":"string","name":"childType"},{"type":"string","name":"childExternalid"},{"type":"string","name":"level"},{"type":"string","name":"criteriaDescription"},{"type":"string","name":"programDescription"},{"type":"string","name":"solutionDescription"},{"type":"string","name":"label"},{"type":"string","name":"imp_project_id"},{"type":"string","name":"imp_project_title"},{"type":"string","name":"imp_project_goal"},{"type":"string","name":"imp_project_externalId"},{"type":"string","name":"ancestorName"},{"type":"string","name":"scoringSystem"},{"type":"string","name":"domainLevel"},{"type":"string","name":"domainScore"},{"name":"criteriaLevelReport","type":"boolean"},{"type":"string","name":"user_schoolName"},{"type":"string","name":"user_schoolId"},{"type":"string","name":"user_schoolUDISE_code"},{"type":"string","name":"solution_type"},{"type":"string","name":"organisation_name"},{"type":"string","name":"user_boardName"},{"type":"string","name":"district_externalId"},{"type":"string","name":"state_externalId"},{"type":"string","name":"block_externalId"},{"type":"string","name":"cluster_externalId"},{"type":"string","name":"organisation_id"},{"type":"string","name":"user_type"},{"type":"string","name":"isSubmissionDeleted"}]},"metricsSpec":[{"type":"floatSum","name":"question_response_number","fieldName":"questionResponseLabel_number"}]}}}' -ml_analytics_observation_batchupdate_azure_blob_path: "observation/batchDeletion" +ml_analytics_observation_batchupdate_cloud_blob_path: "observation/batchDeletion" ml_analytics_observation_submission_id_filepath: "{{ WORKDIR }}/ml-analytics-service/observations/submissions.csv" ml_analytics_observation_batchupdate_output_dir: "{{ WORKDIR }}/source/observations/" ml_analytics_druid_survey_query_spec : '{"queryType":"scan","dataSource":"sl-survey","resultFormat":"list","columns":["completedDate","createdAt","createdBy","criteriaExternalId","criteriaId","criteriaName","surveyId","surveyName","surveySubmissionId","questionAnswer","questionECM","questionExternalId","questionId","questionName","questionResponseLabel","questionResponseType","solutionExternalId","solutionId","solutionName","updatedAt","instanceParentId","instanceId","instanceParentResponsetype","instanceParentQuestion","questionSequenceByEcm","maxScore","minScore","percentageScore","pointsBasedScoreInParent","totalScore","scoreAchieved","totalpercentage","instanceParentExternalId","instanceParentEcmSequence","remarks","total_evidences","evidence_count","instanceParentCriteriaId","instanceParentCriteriaExternalId","instanceParentCriteriaName","isAPrivateProgram","programId","programName","programExternalId","questionResponseLabel_number","channel","parent_channel","appName","organisation_name","user_subtype","user_type","board_name","district_code","district_name","district_externalId","block_code","block_name","block_externalId","school_code","school_name","school_externalId","cluster_code","cluster_name","cluster_externalId","state_code","state_name","state_externalId","organisation_id","evidences"],"intervals":["1901-01-01T00:00:00+00:00/2101-01-01T00:00:00+00:00"]}' ml_analytics_druid_survey_batch_ingestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris": ["azure://telemetry-data-store/survey/batchDeletion/druidData.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"sl-survey","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"completedDate","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"completedDate"},{"type":"string","name":"createdAt"},{"type":"string","name":"createdBy"},{"type":"string","name":"criteriaExternalId"},{"type":"string","name":"criteriaId"},{"type":"string","name":"criteriaName"},{"type":"string","name":"surveyId"},{"type":"string","name":"surveyName"},{"type":"string","name":"surveySubmissionId"},{"type":"string","name":"questionAnswer"},{"type":"string","name":"questionECM"},{"type":"string","name":"questionExternalId"},{"type":"string","name":"questionId"},{"type":"string","name":"questionName"},{"type":"string","name":"questionResponseLabel"},{"type":"string","name":"questionResponseType"},{"type":"string","name":"solutionExternalId"},{"type":"string","name":"solutionId"},{"type":"string","name":"solutionName"},{"type":"string","name":"updatedAt"},{"type":"string","name":"instanceParentId"},{"type":"string","name":"instanceId"},{"type":"string","name":"instanceParentResponsetype"},{"type":"string","name":"instanceParentQuestion"},{"type":"string","name":"questionSequenceByEcm"},{"type":"string","name":"maxScore"},{"type":"string","name":"minScore"},{"type":"string","name":"percentageScore"},{"type":"string","name":"pointsBasedScoreInParent"},{"type":"string","name":"totalScore"},{"type":"string","name":"scoreAchieved"},{"type":"string","name":"totalpercentage"},{"type":"string","name":"instanceParentExternalId"},{"type":"string","name":"instanceParentEcmSequence"},{"type":"string","name":"remarks"},{"type":"string","name":"total_evidences"},{"type":"string","name":"evidence_count"},{"type":"string","name":"evidences"},{"type":"string","name":"instanceParentCriteriaId"},{"type":"string","name":"instanceParentCriteriaExternalId"},{"type":"string","name":"instanceParentCriteriaName"},{"type":"string","name":"isAPrivateProgram"},{"type":"string","name":"programId"},{"type":"string","name":"programName"},{"type":"string","name":"programExternalId"},{"name":"questionResponseLabel_number","type":"float"},{"type":"string","name":"channel"},{"type":"string","name":"parent_channel"},{"type":"string","name":"appName"},{"type":"string","name":"organisation_name"},{"type":"string","name":"user_subtype"},{"type":"string","name":"user_type"},{"type":"string","name":"board_name"},{"type":"string","name":"district_code"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_code"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"school_code"},{"type":"string","name":"school_name"},{"type":"string","name":"school_externalId"},{"type":"string","name":"cluster_code"},{"type":"string","name":"cluster_name"},{"type":"string","name":"cluster_externalId"},{"type":"string","name":"state_code"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"organisation_id"},{"type":"string","name":"isSubmissionDeleted"}]},"metricsSpec":[{"type":"floatSum","name":"question_response_number","fieldName":"questionResponseLabel_number"}]}}}' -ml_analytics_survey_batchupdate_azure_blob_path : "survey/batchDeletion" +ml_analytics_survey_batchupdate_cloud_blob_path : "survey/batchDeletion" ml_analytics_survey_submission_id_filepath : "{{ WORKDIR }}/ml-analytics-service/survey/submissions.csv" ml_analytics_survey_batchupdate_output_dir : "{{ WORKDIR }}/source/survey/" ml_analytics_druid_interval_list: '["1901-01-01T00:00:00+00:00/2020-01-01T00:00:00+00:00","2020-01-01T00:00:00+00:00/2020-06-01T00:00:00+00:00","2020-06-01T00:00:00+00:00/2021-01-01T00:00:00+00:00","2021-01-01T00:00:00+00:00/2021-06-01T00:00:00+00:00","2021-06-01T00:00:00+00:00/2022-01-01T00:00:00+00:00","2022-01-01T00:00:00+00:00/2022-03-01T00:00:00+00:00","2022-03-01T00:00:00+00:00/2022-06-01T00:00:00+00:00","2022-06-01T00:00:00+00:00/2022-09-01T00:00:00+00:00","2022-09-01T00:00:00+00:00/2023-01-01T00:00:00+00:00"]' -ml_analytics_azure_account_key: "{{ cloud_private_storage_secret }}" ML_Cloud_Service_Provider: "{{ cloud_service_provider | default('azure') }}" ## Valid options - ORACLE, gcloud, aws & azure ml_analytics_cloud_package_path: "{{ config_path }}/cloud_storage" ml_analytics_cname_url: "https://{{ cloud_private_storage_accountname }}.blob.core.windows.net/{{ cloud_storage_telemetry_bucketname }}" -ml_GCP_secret_json_file: "gcp_secrets.json" -ml_GCP_bucket_name: "{{ cloud_storage_telemetry_bucketname }}" -ml_GCP_Secrets: +ml_Cloud_secret_json_file: "cloud_secrets.json" +ml_Cloud_Secrets: account_name: "{{ cloud_private_storage_accountname }}" account_key: "{{ cloud_private_storage_secret }}" +cloud_private_storage_accountname: "{{ cloud_private_storage_accountname }}" +cloud_storage_telemetry_bucketname: "{{ cloud_storage_telemetry_bucketname }}" +cloud_private_storage_secret: "{{ cloud_private_storage_secret }}" +cloud_private_storage_region: "{{ cloud_private_storage_region }}" +cloud_private_storage_endpoint: "{{ cloud_private_storage_endpoint }}" diff --git a/ansible/roles/ml-analytics-service/tasks/main.yml b/ansible/roles/ml-analytics-service/tasks/main.yml index dfa015c99c..ee609b8806 100755 --- a/ansible/roles/ml-analytics-service/tasks/main.yml +++ b/ansible/roles/ml-analytics-service/tasks/main.yml @@ -106,7 +106,7 @@ backup: yes - name: Copy GCP Secrets to JSON file - copy: dest="{{config_path}}/{{ml_GCP_secret_json_file}}" content="{{ ml_GCP_Secrets | to_nice_json}}" mode=0400 owner="{{ USER }}" + copy: dest="{{config_path}}/{{ml_Cloud_secret_json_file}}" content="{{ ml_Cloud_Secrets | to_nice_json}}" mode=0400 owner="{{ USER }}" when: ML_Cloud_Service_Provider == 'gcloud' - name: Templating the shell_script_config.j2 to shell_script_config diff --git a/ansible/roles/ml-analytics-service/templates/config.j2 b/ansible/roles/ml-analytics-service/templates/config.j2 index 770de394cb..70160c64c0 100644 --- a/ansible/roles/ml-analytics-service/templates/config.j2 +++ b/ansible/roles/ml-analytics-service/templates/config.j2 @@ -154,50 +154,51 @@ survey_streaming_error = {{ ml_analytics_survey_log_folder_path }}/error.log [ORACLE] -endpoint_url = {{ ml_ORACLE_endpoint_url }} +endpoint_url = {{ cloud_private_storage_endpoint }} -access_key = {{ ml_ORACLE_access_key }} +access_key = {{ cloud_private_storage_accountname }} -secret_access_key = {{ ml_ORACLE_secret_access_key }} +secret_access_key = {{ cloud_private_storage_secret }} -region_name = {{ ml_ORACLE_region_name }} +region_name = {{ cloud_private_storage_region }} -bucket_name = {{ ml_ORACLE_bucket_name }} +bucket_name = {{ cloud_storage_telemetry_bucketname }} {% elif ML_Cloud_Service_Provider is eq 'gcloud' %} [GCP] -secret_data = {{ ml_GCP_secret_json_file }} +secret_data = {{ ml_Cloud_secret_json_file }} -bucket_name = {{ ml_GCP_bucket_name }} +bucket_name = {{ cloud_storage_telemetry_bucketname }} {% elif ML_Cloud_Service_Provider is eq 'aws' %} [AWS] -service_name = {{ ml_AWS_service_name }} +service_name = S3 -access_key = {{ ml_AWS_access_key }} +access_key = {{ cloud_private_storage_accountname }} -secret_access_key = {{ ml_AWS_secret_access_key }} +secret_access_key = {{ cloud_private_storage_secret }} -region_name = {{ ml_AWS_region_name }} +region_name = {{ cloud_private_storage_region }} -bucket_name = {{ ml_AWS_bucket_name }} +bucket_name = {{ cloud_storage_telemetry_bucketname }} {% else %} [AZURE] -account_name = {{ ml_analytics_azure_account_name }} +account_name = {{ cloud_private_storage_accountname }} -sas_token = {{ ml_analytics_azure_sas_token }} +sas_token = {{ cloud_private_storage_secret }} -container_name = {{ ml_analytics_azure_container_name }} +container_name = {{ cloud_storage_telemetry_bucketname }} -account_key = {{ ml_analytics_azure_account_key }} +account_key = {{ cloud_private_storage_secret }} +{% endif %} [OUTPUT_DIR] @@ -250,32 +251,32 @@ program_dashboard_data = {{ ml_program_dashboard_data }} cloud_module_path = {{ ml_analytics_cloud_package_path }} -observation_blob_path = {{ ml_analytics_observation_azure_blob_path }} +observation_blob_path = {{ ml_analytics_observation_cloud_blob_path }} -projects_blob_path = {{ ml_analytics_project_azure_blob_path }} +projects_blob_path = {{ ml_analytics_project_cloud_blob_path }} -observation_distinctCount_blob_path = {{ ml_analytics_obs_distinctCnt_azure_blob_path }} +observation_distinctCount_blob_path = {{ ml_analytics_obs_distinctCnt_cloud_blob_path }} -observation_distinctCount_domain_blob_path = {{ ml_analytics_obs_distinctCnt_domain_azure_blob_path }} +observation_distinctCount_domain_blob_path = {{ ml_analytics_obs_distinctCnt_domain_cloud_blob_path }} -observation_distinctCount_domain_criteria_blob_path = {{ ml_analytics_obs_distinctCnt_domain_criteria_azure_blob_path }} +observation_distinctCount_domain_criteria_blob_path = {{ ml_analytics_obs_distinctCnt_domain_criteria_cloud_blob_path }} -projects_distinctCnt_blob_path = {{ ml_analytics_projects_distinctCnt_azure_blob_path }} +projects_distinctCnt_blob_path = {{ ml_analytics_projects_distinctCnt_cloud_blob_path }} -projects_distinctCnt_prgmlevel_blob_path = {{ ml_analytics_projects_distinctCnt_prglevel_azure_blob_path }} +projects_distinctCnt_prgmlevel_blob_path = {{ ml_analytics_projects_distinctCnt_prglevel_cloud_blob_path }} -projects_rollup_blob_path = {{ ml_analytics_project_rollup_azure_blob_path }} +projects_rollup_blob_path = {{ ml_analytics_project_rollup_cloud_blob_path }} -observation_rollup_blob_path = {{ ml_analytics_observation_rollup_azure_blob_path }} +observation_rollup_blob_path = {{ ml_analytics_observation_rollup_cloud_blob_path }} -survey_rollup_blob_path = {{ ml_analytics_survey_rollup_azure_blob_path }} +survey_rollup_blob_path = {{ ml_analytics_survey_rollup_cloud_blob_path }} -survey_blob_path = {{ ml_analytics_survey_azure_blob_path }} +survey_blob_path = {{ ml_analytics_survey_cloud_blob_path }} -projects_program_csv = {{ ml_analytics_program_dashboard_azure_blob_path }} +projects_program_csv = {{ ml_analytics_program_dashboard_cloud_blob_path }} -observation_batch_ingestion_data_del = {{ ml_analytics_observation_batchupdate_azure_blob_path }} +observation_batch_ingestion_data_del = {{ ml_analytics_observation_batchupdate_cloud_blob_path }} -survey_batch_ingestion_data_del = {{ ml_analytics_survey_batchupdate_azure_blob_path}} +survey_batch_ingestion_data_del = {{ ml_analytics_survey_batchupdate_cloud_blob_path}} cname_url = {{ ml_analytics_cname_url }} From e942e5df1f6b47b64c1c3f404952da9d31b11136 Mon Sep 17 00:00:00 2001 From: Akash Shah Date: Tue, 6 Dec 2022 14:26:47 +0530 Subject: [PATCH 121/616] Add kafka_bootstrap_address env key Add kafka_bootstrap_address env key --- kubernetes/helm_charts/sunbird-RC/registry/values.j2 | 1 + 1 file changed, 1 insertion(+) diff --git a/kubernetes/helm_charts/sunbird-RC/registry/values.j2 b/kubernetes/helm_charts/sunbird-RC/registry/values.j2 index 7668715b20..c582ae8941 100644 --- a/kubernetes/helm_charts/sunbird-RC/registry/values.j2 +++ b/kubernetes/helm_charts/sunbird-RC/registry/values.j2 @@ -52,6 +52,7 @@ rccoreenv: KAFKA_BROKER_ID: {{ registry_kafka_broker_id|default('1')}} KAFKA_ZOOKEEPER_CONNECT: "{{groups['processing-cluster-zookeepers']|join(':2181,')}}:2181" KAFKA_ADVERTISED_LISTENERS: "{{groups['processing-cluster-kafka']|join(':9092,')}}:9092" + kafka_bootstrap_address: "{{groups['processing-cluster-kafka']|join(':9092,')}}:9092" KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: {{ registry_listener_security_protocol_map|default('INTERNAL:PLAINTEXT,OUTSIDE:PLAINTEXT')}} KAFKA_INTER_BROKER_LISTENER_NAME: {{ registry_inter_broker_listener_name|default('INTERNAL')}} KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: {{ registry_offsets_topic_replication_factor|default('1')}} From e7f3b0f70e86af5bbc42322bb3ebfb8c3009c956 Mon Sep 17 00:00:00 2001 From: Rajesh Kumaravel Date: Tue, 6 Dec 2022 14:44:53 +0530 Subject: [PATCH 122/616] Issue #ED-536 feat: Added fetch all API for form --- ansible/roles/kong-api/defaults/main.yml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/ansible/roles/kong-api/defaults/main.yml b/ansible/roles/kong-api/defaults/main.yml index ff4323914e..2e431aa0cb 100644 --- a/ansible/roles/kong-api/defaults/main.yml +++ b/ansible/roles/kong-api/defaults/main.yml @@ -9815,3 +9815,24 @@ kong_apis: - name: opa-checks config.required: true config.enabled: true + +- name: fetchAllForm + uris: "{{ data_service_prefix }}/v1/form/fetchAll" + upstream_url: "{{ player_service_url }}/plugin/v1/form/fetchAll" + strip_uri: true + plugins: + - name: jwt + - name: cors + - "{{ statsd_pulgin }}" + - name: acl + config.whitelist: + - appAdmin + - name: rate-limiting + config.policy: local + config.hour: "{{ small_rate_limit_per_hour }}" + config.limit_by: credential + - name: request-size-limiting + config.allowed_payload_size: "{{ medium_request_size_limit }}" + - name: opa-checks + config.required: false + config.enabled: false From 6930cd7ec16b5bdef9be97d499f70528bdc2dca4 Mon Sep 17 00:00:00 2001 From: Rajesh Kumaravel Date: Tue, 6 Dec 2022 17:34:02 +0530 Subject: [PATCH 123/616] Issue #ED-536 fix: fetch all api added --- ansible/roles/kong-api/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/kong-api/defaults/main.yml b/ansible/roles/kong-api/defaults/main.yml index 2e431aa0cb..bb6e0c7c56 100644 --- a/ansible/roles/kong-api/defaults/main.yml +++ b/ansible/roles/kong-api/defaults/main.yml @@ -9826,7 +9826,7 @@ kong_apis: - "{{ statsd_pulgin }}" - name: acl config.whitelist: - - appAdmin + - formUpdate - name: rate-limiting config.policy: local config.hour: "{{ small_rate_limit_per_hour }}" From 9aef1be4b81af6927643fa8acff6c30075bb5d79 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Wed, 7 Dec 2022 11:09:30 +0530 Subject: [PATCH 124/616] Update main.yml (#3649) --- ansible/roles/ml-analytics-service/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index da571d3bed..a480b01ccd 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -44,7 +44,7 @@ ml_analytics_kafka_survey_druid_topic_name: "{{ env_name }}.ml.survey.druid" ml_analytics_observation_log_folder_path: "{{ WORKDIR }}/logs/observation" ml_analytics_project_log_folder_path: "{{ WORKDIR }}/logs/project" ml_analytics_survey_log_folder_path: "{{ WORKDIR }}/logs/survey" -ml_analytics_observation_azure_blob_path: "observation/status/" +ml_analytics_observation_cloud_blob_path: "observation/status/" ml_analytics_project_cloud_blob_path: "projects/" ml_analytics_redis_host: "{{ml_redis_host | default(groups['dp-redis'][0])}}" ml_analytics_redis_port: "{{ ml_redis_device_port | default('6379') }}" From 2bbfe4eca53e631db698d37128992434660f61a4 Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Wed, 7 Dec 2022 11:25:32 +0530 Subject: [PATCH 125/616] Release 5.1.0 vars change (#3650) --- ansible/artifacts-download.yml | 11 +++++------ ansible/artifacts-upload.yml | 11 +++++------ ansible/assets-upload.yml | 4 ++-- ansible/deploy-plugins.yml | 17 +++++++++-------- ansible/desktop-faq-upload.yml | 12 ++++++------ ansible/dial_upload-schema.yml | 9 ++++----- ansible/kp_upload-schema.yml | 9 ++++----- ansible/plugins.yml | 10 +++++----- .../roles/cassandra-backup/defaults/main.yml | 6 +++++- ansible/roles/cassandra-backup/tasks/main.yml | 9 ++++----- .../roles/cassandra-restore/defaults/main.yml | 5 ++++- ansible/roles/cassandra-restore/tasks/main.yml | 7 +++---- ansible/roles/cert-templates/defaults/main.yml | 2 +- ansible/roles/cert-templates/tasks/main.yml | 9 ++++----- ansible/roles/desktop-deploy/defaults/main.yml | 2 +- ansible/roles/desktop-deploy/tasks/main.yml | 12 ++++++------ ansible/roles/es-azure-snapshot/tasks/main.yml | 4 ++-- .../es6/tasks/plugins/repository-azure.yml | 6 +++--- .../roles/gcp-cloud-storage/defaults/main.yml | 6 +++--- .../roles/gcp-cloud-storage/tasks/download.yml | 4 ++-- .../tasks/upload-batch-no-poll.yml | 2 +- .../gcp-cloud-storage/tasks/upload-batch.yml | 2 +- .../roles/gcp-cloud-storage/tasks/upload.yml | 2 +- ansible/roles/grafana-backup/defaults/main.yml | 6 ++++-- ansible/roles/grafana-backup/tasks/main.yml | 7 +++---- .../jenkins-backup-upload/defaults/main.yml | 5 ++++- .../roles/jenkins-backup-upload/tasks/main.yml | 7 +++---- .../log-es6/tasks/plugins/repository-azure.yml | 6 +++--- ansible/roles/mongodb-backup/defaults/main.yml | 5 ++++- ansible/roles/mongodb-backup/tasks/main.yml | 7 +++---- .../defaults/main.yml | 5 ++++- .../tasks/main.yml | 7 +++---- .../defaults/main.yml | 6 +++++- .../tasks/main.yml | 7 +++---- .../roles/postgresql-backup/defaults/main.yml | 3 +++ ansible/roles/postgresql-backup/tasks/main.yml | 9 ++++----- .../roles/postgresql-restore/defaults/main.yml | 5 ++++- ansible/roles/postgresql-restore/tasks/main.yml | 7 +++---- .../prometheus-backup-v2/defaults/main.yml | 5 ++++- .../roles/prometheus-backup-v2/tasks/main.yml | 7 +++---- .../roles/prometheus-backup/defaults/main.yml | 5 ++++- ansible/roles/prometheus-backup/tasks/main.yml | 7 +++---- .../roles/prometheus-restore/defaults/main.yml | 5 ++++- ansible/roles/prometheus-restore/tasks/main.yml | 7 +++---- ansible/roles/redis-backup/defaults/main.yml | 5 ++++- ansible/roles/redis-backup/tasks/main.yml | 7 +++---- ansible/uploadFAQs.yml | 4 ++-- 47 files changed, 164 insertions(+), 141 deletions(-) diff --git a/ansible/artifacts-download.yml b/ansible/artifacts-download.yml index 043446554d..25869f80a3 100644 --- a/ansible/artifacts-download.yml +++ b/ansible/artifacts-download.yml @@ -12,8 +12,8 @@ blob_container_name: "{{ artifacts_container }}" blob_file_name: "{{ artifact }}" local_file_or_folder_path: "{{ artifact_path }}" - storage_account_name: "{{ azure_artifact_storage_account_name }}" - storage_account_key: "{{ azure_artifact_storage_account_key }}" + storage_account_name: "{{ cloud_artifact_storage_accountname }}" + storage_account_key: "{{ cloud_artifact_storage_secret }}" when: cloud_service_provider == "azure" - name: download artifact from gcloud storage @@ -21,9 +21,8 @@ name: gcp-cloud-storage tasks_from: download.yml vars: - gcp_bucket_name: "{{ gcloud_artifact_bucket_name }}" - dest_folder_name: "{{ artifacts_container }}" - dest_file_name: "{{ artifact }}" + gcp_bucket_name: "{{ cloud_storage_artifacts_bucketname }}" + gcp_path: "{{ artifact }}" local_file_or_folder_path: "{{ artifact_path }}" when: cloud_service_provider == "gcloud" @@ -38,4 +37,4 @@ aws_default_region: "{{ aws_region }}" aws_access_key_id: "{{ aws_artifact_bucket_access_key }}" aws_secret_access_key: "{{ aws_artifact_bucket_secret_access_key }}" - when: cloud_service_provider == "aws" \ No newline at end of file + when: cloud_service_provider == "aws" diff --git a/ansible/artifacts-upload.yml b/ansible/artifacts-upload.yml index 32e866808c..31f2589a68 100644 --- a/ansible/artifacts-upload.yml +++ b/ansible/artifacts-upload.yml @@ -13,8 +13,8 @@ container_public_access: "off" blob_file_name: "{{ artifact }}" local_file_or_folder_path: "{{ artifact_path }}" - storage_account_name: "{{ azure_artifact_storage_account_name }}" - storage_account_key: "{{ azure_artifact_storage_account_key }}" + storage_account_name: "{{ cloud_artifact_storage_accountname }}" + storage_account_key: "{{ cloud_artifact_storage_secret }}" when: cloud_service_provider == "azure" - name: upload artifact to gcloud storage @@ -22,9 +22,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: - gcp_bucket_name: "{{ gcloud_artifact_bucket_name }}" - dest_folder_name: "{{ artifacts_container }}" - dest_file_name: "{{ artifact }}" + gcp_bucket_name: "{{ cloud_storage_artifacts_bucketname }}" + gcp_path: "{{ artifact }}" local_file_or_folder_path: "{{ artifact_path }}" when: cloud_service_provider == "gcloud" @@ -39,4 +38,4 @@ aws_default_region: "{{ aws_region }}" aws_access_key_id: "{{ aws_artifact_bucket_access_key }}" aws_secret_access_key: "{{ aws_artifact_bucket_secret_access_key }}" - when: cloud_service_provider == "aws" \ No newline at end of file + when: cloud_service_provider == "aws" diff --git a/ansible/assets-upload.yml b/ansible/assets-upload.yml index 12021680fe..8adf3cae74 100644 --- a/ansible/assets-upload.yml +++ b/ansible/assets-upload.yml @@ -18,8 +18,8 @@ blob_container_name: "{{ player_cdn_storage }}" container_public_access: "container" blob_container_folder_path: "" - storage_account_name: "{{ azure_public_storage_account_name }}" - storage_account_key: "{{ azure_public_storage_account_key }}" + storage_account_name: "{{ cloud_public_storage_accountname }}" + storage_account_key: "{{ cloud_public_storage_secret }}" storage_account_sas_token: "{{ azure_public_storage_account_sas }}" - name: delete files and folders from azure storage using azcopy diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index 6f5460809f..ae7f21637a 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -24,8 +24,8 @@ set_fact: blob_container_name: "{{ plugin_storage }}" container_public_access: "container" - storage_account_name: "{{ azure_public_storage_account_name }}" - storage_account_key: "{{ azure_public_storage_account_key }}" + storage_account_name: "{{ cloud_public_storage_accountname }}" + storage_account_key: "{{ cloud_public_storage_secret }}" storage_account_sas_token: "{{ azure_public_storage_account_sas }}" tags: - always @@ -92,8 +92,9 @@ block: - name: set common gcloud variables set_fact: - dest_folder_name: "{{ plugin_storage }}" - gcp_bucket_name: "{{ gcloud_public_bucket_name }}" + gcp_bucket_name: "{{ cloud_storage_content_bucketname }}" + gcp_path: "" + tags: - always @@ -103,7 +104,7 @@ name: gcp-cloud-storage tasks_from: delete-batch.yml vars: - file_delete_pattern: "{{ dest_folder_name }}/{{ folder_name }}" + file_delete_pattern: "{{ folder_name }}" tags: - content-editor - collection-editor @@ -116,7 +117,7 @@ name: gcp-cloud-storage tasks_from: upload-batch.yml vars: - dest_folder_path: "{{ folder_name }}" + gcp_path: "{{ folder_name }}" local_file_or_folder_path: "{{ source_name }}" tags: - content-editor @@ -132,7 +133,7 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: - dest_file_name: "artefacts/content-player/content-player-{{ player_version_number }}.zip" + gcp_path: "artefacts/content-player/content-player-{{ player_version_number }}.zip" local_file_or_folder_path: "{{ source_file_name }}" tags: - preview @@ -149,7 +150,7 @@ tasks_from: "{{ item[0] }}" vars: file_delete_pattern: "content-plugins/{{ item[1] }}/*" - dest_folder_path: "content-plugins/{{ item[1] }}" + gcp_path: "content-plugins/{{ item[1] }}" local_file_or_folder_path: "{{ source_folder }}/{{ item[1] }}" with_nested: - ['delete-batch-no-poll.yml', 'upload-batch-no-poll.yml'] diff --git a/ansible/desktop-faq-upload.yml b/ansible/desktop-faq-upload.yml index 911153576b..d36b0e3721 100644 --- a/ansible/desktop-faq-upload.yml +++ b/ansible/desktop-faq-upload.yml @@ -20,8 +20,8 @@ tasks_from: blob-upload.yml vars: container_public_access: "container" - storage_account_name: "{{ azure_public_storage_account_name }}" - storage_account_key: "{{ azure_public_storage_account_key }}" + storage_account_name: "{{ cloud_public_storage_accountname }}" + storage_account_key: "{{ cloud_public_storage_secret }}" tags: - upload-desktop-faq @@ -32,8 +32,8 @@ tasks_from: blob-upload.yml vars: container_public_access: "off" - storage_account_name: "{{ azure_private_storage_account_name }}" - storage_account_key: "{{ azure_private_storage_account_key }}" + storage_account_name: "{{ cloud_private_storage_accountname }}" + storage_account_key: "{{ cloud_private_storage_secret }}" tags: - upload-label @@ -44,8 +44,8 @@ tasks_from: blob-upload-batch.yml vars: container_public_access: "container" - storage_account_name: "{{ azure_public_storage_account_name }}" - storage_account_key: "{{ azure_public_storage_account_key }}" + storage_account_name: "{{ cloud_public_storage_accountname }}" + storage_account_key: "{{ cloud_public_storage_secret }}" tags: - upload-chatbot-config - upload-batch diff --git a/ansible/dial_upload-schema.yml b/ansible/dial_upload-schema.yml index f046e63462..6572c12e55 100644 --- a/ansible/dial_upload-schema.yml +++ b/ansible/dial_upload-schema.yml @@ -35,8 +35,8 @@ container_public_access: "blob" blob_container_folder_path: "/schemas/local" local_file_or_folder_path: "dial_schema_template_files" - storage_account_name: "{{ azure_public_storage_account_name }}" - storage_account_key: "{{ azure_public_storage_account_key }}" + storage_account_name: "{{ cloud_public_storage_accountname }}" + storage_account_key: "{{ cloud_public_storage_secret }}" when: cloud_service_provider == "azure" - name: upload batch of files to aws s3 @@ -57,9 +57,8 @@ name: gcp-cloud-storage tasks_from: upload-batch.yml vars: - dest_folder_name: "{{ dial_plugin_storage }}" - dest_folder_path: "schemas/local" + gcp_bucket_name: "{{ cloud_storage_dial_bucketname }}" + gcp_path: "schemas/local" local_file_or_folder_path: "dial_schema_template_files" - gcp_bucket_name: "{{ gcloud_public_bucket_name }}" when: cloud_service_provider == "gcloud" diff --git a/ansible/kp_upload-schema.yml b/ansible/kp_upload-schema.yml index aecdab077a..2b09dac310 100644 --- a/ansible/kp_upload-schema.yml +++ b/ansible/kp_upload-schema.yml @@ -19,8 +19,8 @@ container_public_access: "container" blob_container_folder_path: "/schemas/local" local_file_or_folder_path: "{{ source_name }}" - storage_account_name: "{{ azure_public_storage_account_name }}" - storage_account_key: "{{ azure_public_storage_account_key }}" + storage_account_name: "{{ cloud_public_storage_accountname }}" + storage_account_key: "{{ cloud_public_storage_secret }}" when: cloud_service_provider == "azure" - name: upload batch of files to aws s3 @@ -41,8 +41,7 @@ name: gcp-cloud-storage tasks_from: upload-batch.yml vars: - dest_folder_name: "{{ plugin_storage }}" - dest_folder_path: "schemas/local" + gcp_bucket_name: "{{ cloud_storage_content_bucketname }}" + gcp_path: "{{ schemas/local" local_file_or_folder_path: "{{ source_name }}" - gcp_bucket_name: "{{ gcloud_public_bucket_name }}" when: cloud_service_provider == "gcloud" diff --git a/ansible/plugins.yml b/ansible/plugins.yml index ab32d9f756..fa5967b462 100644 --- a/ansible/plugins.yml +++ b/ansible/plugins.yml @@ -20,8 +20,8 @@ blob_delete_pattern: "content-plugins/{{ plugins_name }}" blob_container_folder_path: "/content-plugins/{{ plugins_name }}" local_file_or_folder_path: "{{ source_file }}" - storage_account_name: "{{ azure_public_storage_account_name }}" - storage_account_key: "{{ azure_public_storage_account_key }}" + storage_account_name: "{{ cloud_public_storage_accountname }}" + storage_account_key: "{{ cloud_public_storage_secret }}" - name: delete batch of files from azure storage include_role: @@ -34,14 +34,14 @@ tasks_from: blob-upload-batch.yml when: cloud_service_provider == "azure" +### GCP tasks ### - name: this block consists of tasks related to gcloud storage block: - name: set common gcloud variables set_fact: - dest_folder_name: "{{ plugin_storage }}" - gcp_bucket_name: "{{ gcloud_public_bucket_name }}" + gcp_bucket_name: "{{ cloud_storage_content_bucketname }}" + gcp_path: "/content-plugins/{{ plugins_name }}" file_delete_pattern: "content-plugins/{{ plugins_name }}" - dest_folder_path: "/content-plugins/{{ plugins_name }}" local_file_or_folder_path: "{{ source_file }}" - name: delete files and folders from gcloud storage diff --git a/ansible/roles/cassandra-backup/defaults/main.yml b/ansible/roles/cassandra-backup/defaults/main.yml index 139fd1d810..dffec63096 100644 --- a/ansible/roles/cassandra-backup/defaults/main.yml +++ b/ansible/roles/cassandra-backup/defaults/main.yml @@ -1,5 +1,6 @@ cassandra_root_dir: '/etc/cassandra' data_dir: '/var/lib/cassandra/data' + cassandra_backup_azure_container_name: core-cassandra # This variable is added for the below reason - @@ -7,4 +8,7 @@ cassandra_backup_azure_container_name: core-cassandra # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos -cassandra_backup_storage: "{{ cassandra_backup_azure_container_name }}" \ No newline at end of file +cassandra_backup_storage: "{{ cassandra_backup_azure_container_name }}" + +cloud_storage_cassandrabackup_bucketname: "{{cloud_storage_management_bucketname}}" +cloud_storage_cassandrabackup_foldername: 'cassandra-backup' diff --git a/ansible/roles/cassandra-backup/tasks/main.yml b/ansible/roles/cassandra-backup/tasks/main.yml index 507aeb190b..304385515c 100755 --- a/ansible/roles/cassandra-backup/tasks/main.yml +++ b/ansible/roles/cassandra-backup/tasks/main.yml @@ -37,11 +37,11 @@ name: azure-cloud-storage tasks_from: upload-using-azcopy.yml vars: - blob_container_name: "{{ cassandra_backup_storage }}" + blob_container_name: "{{ cloud_storage_cassandrabackup_bucketname }}" container_public_access: "off" blob_container_folder_path: "" local_file_or_folder_path: "/data/cassandra/backup/{{ cassandra_backup_folder_name }}" - storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_name: "{{ cloud_management_storage_accountname }}" storage_account_sas_token: "{{ azure_management_storage_account_sas }}" when: cloud_service_provider == "azure" @@ -63,9 +63,8 @@ name: gcp-cloud-storage tasks_from: upload-batch.yml vars: - gcp_bucket_name: "{{ gcloud_management_bucket_name }}" - dest_folder_name: "{{ cassandra_backup_storage }}" - dest_folder_path: "" + gcp_bucket_name: "{{ cloud_storage_cassandrabackup_bucketname }}" + gcp_path: "{{ cloud_storage_cassandrabackup_foldername }}" local_file_or_folder_path: "/data/cassandra/backup/{{ cassandra_backup_folder_name }}" when: cloud_service_provider == "gcloud" diff --git a/ansible/roles/cassandra-restore/defaults/main.yml b/ansible/roles/cassandra-restore/defaults/main.yml index 4a4828144e..834c103d58 100644 --- a/ansible/roles/cassandra-restore/defaults/main.yml +++ b/ansible/roles/cassandra-restore/defaults/main.yml @@ -5,4 +5,7 @@ user_home: "/home/{{ ansible_ssh_user }}/" # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos -cassandra_backup_storage: "{{ cassandra_backup_azure_container_name }}" \ No newline at end of file +cassandra_backup_storage: "{{ cassandra_backup_azure_container_name }}" + +cloud_storage_cassandrabackup_bucketname: "{{cloud_storage_management_bucketname}}" +cloud_storage_cassandrabackup_foldername: 'cassandra-backup' diff --git a/ansible/roles/cassandra-restore/tasks/main.yml b/ansible/roles/cassandra-restore/tasks/main.yml index 8a47ab7089..304c9b8b09 100755 --- a/ansible/roles/cassandra-restore/tasks/main.yml +++ b/ansible/roles/cassandra-restore/tasks/main.yml @@ -14,7 +14,7 @@ blob_container_name: "{{ cassandra_backup_storage }}" blob_file_name: "{{ cassandra_restore_gzip_file_name }}" local_file_or_folder_path: "{{ cassandra_restore_gzip_file_path }}" - storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_name: "{{ cloud_management_storage_accountname }}" storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" @@ -37,9 +37,8 @@ name: gcp-cloud-storage tasks_from: download.yml vars: - gcp_bucket_name: "{{ gcloud_management_bucket_name }}" - dest_folder_name: "{{ cassandra_backup_storage }}" - dest_file_name: "{{ cassandra_restore_gzip_file_name }}" + gcp_bucket_name: "{{ cloud_storage_cassandrabackup_bucketname }}" + gcp_path: "{{ cloud_storage_cassandrabackup_foldername }}/{{ cassandra_restore_gzip_file_name }}" local_file_or_folder_path: "{{ cassandra_restore_gzip_file_path }}" when: cloud_service_provider == "gcloud" diff --git a/ansible/roles/cert-templates/defaults/main.yml b/ansible/roles/cert-templates/defaults/main.yml index c621d6ddb8..1ca7f44958 100644 --- a/ansible/roles/cert-templates/defaults/main.yml +++ b/ansible/roles/cert-templates/defaults/main.yml @@ -8,4 +8,4 @@ certs_badge_key_id: "" # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos -cert_service_storage: "{{ cert_service_container_name }}" \ No newline at end of file +cert_service_storage: "{{ cert_service_container_name }}" diff --git a/ansible/roles/cert-templates/tasks/main.yml b/ansible/roles/cert-templates/tasks/main.yml index 78f1f769b3..0700f1e61a 100644 --- a/ansible/roles/cert-templates/tasks/main.yml +++ b/ansible/roles/cert-templates/tasks/main.yml @@ -40,8 +40,8 @@ container_public_access: "off" blob_container_folder_path: "" local_file_or_folder_path: "{{ cert_location }}/cert-templates/certUtilScripts/out" - storage_account_name: "{{ azure_private_storage_account_name }}" - storage_account_key: "{{ azure_private_storage_account_key }}" + storage_account_name: "{{ cloud_private_storage_accountname }}" + storage_account_key: "{{ cloud_private_storage_secret }}" when: cloud_service_provider == "azure" - name: upload batch of files to aws s3 @@ -62,8 +62,7 @@ name: gcp-cloud-storage tasks_from: upload-batch.yml vars: - dest_folder_name: "{{ cert_service_storage }}" - dest_folder_path: "" + gcp_bucket_name: "{{ cloud_storage_certservice_bucketname }}" + gcp_path: "" local_file_or_folder_path: "{{ cert_location }}/cert-templates/certUtilScripts/out" - gcp_bucket_name: "{{ gcloud_private_bucket_name }}" when: cloud_service_provider == "gcloud" diff --git a/ansible/roles/desktop-deploy/defaults/main.yml b/ansible/roles/desktop-deploy/defaults/main.yml index 3010db2349..06bdd6fe1f 100644 --- a/ansible/roles/desktop-deploy/defaults/main.yml +++ b/ansible/roles/desktop-deploy/defaults/main.yml @@ -7,4 +7,4 @@ offline_installer_container_name: "{{env}}-offlineinstaller" # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos -offline_installer_storage: "{{ offline_installer_container_name }}" \ No newline at end of file +offline_installer_storage: "{{ offline_installer_container_name }}" diff --git a/ansible/roles/desktop-deploy/tasks/main.yml b/ansible/roles/desktop-deploy/tasks/main.yml index 09c41300ef..70fa94cb1d 100644 --- a/ansible/roles/desktop-deploy/tasks/main.yml +++ b/ansible/roles/desktop-deploy/tasks/main.yml @@ -55,8 +55,8 @@ set_fact: blob_container_name: "{{ offline_installer_storage }}" container_public_access: "blob" - storage_account_name: "{{ azure_public_storage_account_name }}" - storage_account_key: "{{ azure_public_storage_account_key }}" + storage_account_name: "{{ cloud_public_storage_accountname }}" + storage_account_key: "{{ cloud_public_storage_secret }}" - name: upload batch of files to azure storage include_role: @@ -103,19 +103,19 @@ local_file_or_folder_path: "{{ offline_repo_location }}/desktop_uploader_assets/{{ time }}/" when: cloud_service_provider == "aws" +### GCP Tasks ### - name: this block consists of tasks related to gcloud storage block: - name: set common gcloud variables set_fact: - dest_folder_name: "{{ offline_installer_storage }}" - gcp_bucket_name: "{{ gcloud_public_bucket_name }}" + gcp_bucket_name: "{{ cloud_storage_offlineinstaller_bucketname }}" - name: upload batch of files to gcloud storage include_role: name: gcp-cloud-storage tasks_from: upload-batch.yml vars: - dest_folder_path: "" + gcp_path: "" local_file_or_folder_path: "{{ offline_repo_location }}/desktop_uploader_assets" - name: upload batch of files to gcloud storage @@ -123,6 +123,6 @@ name: gcp-cloud-storage tasks_from: upload-batch.yml vars: - dest_folder_path: "latest" + gcp_path: "latest" local_file_or_folder_path: "{{ offline_repo_location }}/desktop_uploader_assets/{{ time }}/" when: cloud_service_provider == "gcloud" diff --git a/ansible/roles/es-azure-snapshot/tasks/main.yml b/ansible/roles/es-azure-snapshot/tasks/main.yml index 8ce0fcd267..23be535db9 100644 --- a/ansible/roles/es-azure-snapshot/tasks/main.yml +++ b/ansible/roles/es-azure-snapshot/tasks/main.yml @@ -13,8 +13,8 @@ vars: blob_container_name: "{{ es_backup_storage }}" container_public_access: "off" - storage_account_name: "{{ azure_management_storage_account_name }}" - storage_account_key: "{{ azure_management_storage_account_key }}" + storage_account_name: "{{ cloud_management_storage_accountname }}" + storage_account_key: "{{ cloud_management_storage_secret }}" - name: Create Azure Repository uri: diff --git a/ansible/roles/es6/tasks/plugins/repository-azure.yml b/ansible/roles/es6/tasks/plugins/repository-azure.yml index 170a84000e..dd7fcc3a20 100644 --- a/ansible/roles/es6/tasks/plugins/repository-azure.yml +++ b/ansible/roles/es6/tasks/plugins/repository-azure.yml @@ -1,7 +1,7 @@ --- - name: Add default azure account name for backups become: yes - shell: echo "{{ azure_management_storage_account_name }}" | {{ es_home }}/bin/elasticsearch-keystore add -f azure.client.default.account + shell: echo "{{ cloud_management_storage_accountname }}" | {{ es_home }}/bin/elasticsearch-keystore add -f azure.client.default.account no_log: True environment: ES_PATH_CONF: "{{ conf_dir }}" @@ -9,7 +9,7 @@ - name: Add default azure account key for backups become: yes - shell: echo "{{ azure_management_storage_account_key }}" | {{ es_home }}/bin/elasticsearch-keystore add -f azure.client.default.key + shell: echo "{{ cloud_management_storage_secret }}" | {{ es_home }}/bin/elasticsearch-keystore add -f azure.client.default.key no_log: True environment: - ES_PATH_CONF: "{{ conf_dir }}" \ No newline at end of file + ES_PATH_CONF: "{{ conf_dir }}" diff --git a/ansible/roles/gcp-cloud-storage/defaults/main.yml b/ansible/roles/gcp-cloud-storage/defaults/main.yml index 086cf9c50d..b0fd847b26 100644 --- a/ansible/roles/gcp-cloud-storage/defaults/main.yml +++ b/ansible/roles/gcp-cloud-storage/defaults/main.yml @@ -10,8 +10,8 @@ gcp_storage_key_file: "" # Folder name in GCP bucket # Example - -# dest_folder_name: "my-destination-folder" -dest_folder_name: "" +# gcp_path: "my-destination-folder" +gcp_path: "" # The delete pattern to delete files and folder # Example - @@ -36,7 +36,7 @@ dest_file_name: "" # The folder path in gcloud storage to upload the files starting from the root of the bucket # This path should start with / if we provide a value for this variable since we are going to append this path as below -# {{ bucket_name }}{{ dest_folder_name }} +# {{ bucket_name }}{{ gcp_path }} # The above translates to "my-bucket/my-folder-path" # Example - # dest_folder_path: "/my-folder/json-files-folder" diff --git a/ansible/roles/gcp-cloud-storage/tasks/download.yml b/ansible/roles/gcp-cloud-storage/tasks/download.yml index c8c6e956ad..73bf76bb04 100644 --- a/ansible/roles/gcp-cloud-storage/tasks/download.yml +++ b/ansible/roles/gcp-cloud-storage/tasks/download.yml @@ -3,9 +3,9 @@ include_tasks: gcloud-auth.yml - name: Download from gcloud storage - shell: gsutil cp "gs://{{ gcp_bucket_name }}/{{ dest_folder_name }}/{{ dest_file_name }}" "{{ local_file_or_folder_path }}" + shell: gsutil cp "gs://{{ gcp_bucket_name }}/{{ gcp_path }}" "{{ local_file_or_folder_path }}" async: 3600 poll: 10 - name: Revoke gcloud access - include_tasks: gcloud-revoke.yml \ No newline at end of file + include_tasks: gcloud-revoke.yml diff --git a/ansible/roles/gcp-cloud-storage/tasks/upload-batch-no-poll.yml b/ansible/roles/gcp-cloud-storage/tasks/upload-batch-no-poll.yml index 0d8755ab26..40e9b8a66a 100644 --- a/ansible/roles/gcp-cloud-storage/tasks/upload-batch-no-poll.yml +++ b/ansible/roles/gcp-cloud-storage/tasks/upload-batch-no-poll.yml @@ -1,5 +1,5 @@ --- - name: Upload files from a local directory gcp storage - shell: gsutil -m cp -r "{{ local_file_or_folder_path }}" "gs://{{ gcp_bucket_name }}/{{ dest_folder_name }}/{{ dest_folder_path }}" + shell: gsutil -m cp -r "{{ local_file_or_folder_path }}" "gs://{{ gcp_bucket_name }}/{{ gcp_path }}" async: 1800 poll: 0 diff --git a/ansible/roles/gcp-cloud-storage/tasks/upload-batch.yml b/ansible/roles/gcp-cloud-storage/tasks/upload-batch.yml index 49abd5b822..dc103969aa 100644 --- a/ansible/roles/gcp-cloud-storage/tasks/upload-batch.yml +++ b/ansible/roles/gcp-cloud-storage/tasks/upload-batch.yml @@ -3,7 +3,7 @@ include_tasks: gcloud-auth.yml - name: Upload files from a local directory gcp storage - shell: gsutil -m cp -r "{{ local_file_or_folder_path }}" "gs://{{ gcp_bucket_name }}/{{ dest_folder_name }}/{{ dest_folder_path }}" + shell: gsutil -m cp -r "{{ local_file_or_folder_path }}" "gs://{{ gcp_bucket_name }}/{{ gcp_path}}" async: 3600 poll: 10 diff --git a/ansible/roles/gcp-cloud-storage/tasks/upload.yml b/ansible/roles/gcp-cloud-storage/tasks/upload.yml index 2f88d9407f..de766a94c7 100644 --- a/ansible/roles/gcp-cloud-storage/tasks/upload.yml +++ b/ansible/roles/gcp-cloud-storage/tasks/upload.yml @@ -3,7 +3,7 @@ include_tasks: gcloud-auth.yml - name: Upload to gcloud storage - shell: gsutil cp "{{ local_file_or_folder_path }}" "gs://{{ gcp_bucket_name }}/{{ dest_folder_name }}/{{ dest_file_name }}" + shell: gsutil cp "{{ local_file_or_folder_path }}" "gs://{{ gcp_bucket_name }}/{{ gcp_path }}" async: 3600 poll: 10 diff --git a/ansible/roles/grafana-backup/defaults/main.yml b/ansible/roles/grafana-backup/defaults/main.yml index fc62843964..b6850bee97 100644 --- a/ansible/roles/grafana-backup/defaults/main.yml +++ b/ansible/roles/grafana-backup/defaults/main.yml @@ -5,10 +5,12 @@ grafana_data_dir: /var/dockerdata/grafana/grafana.db sunbird_management_storage_account_name: sunbird_management_storage_account_key: '' grafana_backup_azure_container_name: grafana-backup - # This variable is added for the below reason - # 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos -grafana_backup_storage: "{{ grafana_backup_azure_container_name }}" \ No newline at end of file +grafana_backup_storage: "{{ grafana_backup_azure_container_name }}" + +cloud_storage_grafanabackup_bucketname: "{{ cloud_storage_management_bucketname }}" +cloud_storage_grafanabackup_foldername: 'grafana-backup' diff --git a/ansible/roles/grafana-backup/tasks/main.yml b/ansible/roles/grafana-backup/tasks/main.yml index 2c8520030c..c309d409fb 100644 --- a/ansible/roles/grafana-backup/tasks/main.yml +++ b/ansible/roles/grafana-backup/tasks/main.yml @@ -28,7 +28,7 @@ container_public_access: "off" blob_file_name: "{{ grafana_backup_gzip_file_name }}" local_file_or_folder_path: "{{ grafana_backup_gzip_file_path }}" - storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_name: "{{ cloud_management_storage_accountname }}" storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" @@ -50,9 +50,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: - gcp_bucket_name: "{{ gcloud_management_bucket_name }}" - dest_folder_name: "{{ grafana_backup_storage }}" - dest_file_name: "{{ grafana_backup_gzip_file_name }}" + gcp_bucket_name: "{{ cloud_storage_grafanabackup_bucketname }}" + gcp_path: "{{ cloud_storage_grafanabackup_foldername }}/{{ grafana_backup_gzip_file_name }}" local_file_or_folder_path: "{{ grafana_backup_gzip_file_path }}" when: cloud_service_provider == "gcloud" diff --git a/ansible/roles/jenkins-backup-upload/defaults/main.yml b/ansible/roles/jenkins-backup-upload/defaults/main.yml index 40a231d3d5..d9c5c9f58a 100644 --- a/ansible/roles/jenkins-backup-upload/defaults/main.yml +++ b/ansible/roles/jenkins-backup-upload/defaults/main.yml @@ -9,4 +9,7 @@ jenkins_backup_max_delay_in_days: 1 # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos -jenkins_backup_storage: "{{ jenkins_backup_azure_container_name }}" \ No newline at end of file +jenkins_backup_storage: "{{ jenkins_backup_azure_container_name }}" + +cloud_storage_jenkinsbackup_bucketname: "{{cloud_storage_management_bucketname}}" +cloud_storage_jenkinsbackup_foldername: 'jenkins-backup' diff --git a/ansible/roles/jenkins-backup-upload/tasks/main.yml b/ansible/roles/jenkins-backup-upload/tasks/main.yml index a94e57fe4a..018a9498f1 100644 --- a/ansible/roles/jenkins-backup-upload/tasks/main.yml +++ b/ansible/roles/jenkins-backup-upload/tasks/main.yml @@ -21,7 +21,7 @@ container_public_access: "off" blob_file_name: "{{ LATEST_BACKUP_DIR.stdout }}.zip" local_file_or_folder_path: "/tmp/{{ LATEST_BACKUP_DIR.stdout }}.zip" - storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_name: "{{ cloud_management_storage_accountname }}" storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" @@ -43,9 +43,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: - gcp_bucket_name: "{{ gcloud_management_bucket_name }}" - dest_folder_name: "{{ jenkins_backup_storage }}" - dest_file_name: "{{ LATEST_BACKUP_DIR.stdout }}.zip" + gcp_bucket_name: "{{ cloud_storage_jenkinsbackup_bucketname }}" + gcp_path: "{{ cloud_storage_jenkinsbackup_foldername }}/{{ LATEST_BACKUP_DIR.stdout }}.zip" local_file_or_folder_path: "/tmp/{{ LATEST_BACKUP_DIR.stdout }}.zip" when: cloud_service_provider == "gcloud" diff --git a/ansible/roles/log-es6/tasks/plugins/repository-azure.yml b/ansible/roles/log-es6/tasks/plugins/repository-azure.yml index 9c3b9d3774..43d512803f 100644 --- a/ansible/roles/log-es6/tasks/plugins/repository-azure.yml +++ b/ansible/roles/log-es6/tasks/plugins/repository-azure.yml @@ -1,7 +1,7 @@ --- - name: Add default azure account name for backups become: yes - shell: echo "{{ azure_management_storage_account_name }}" | {{ es_home }}/bin/elasticsearch-keystore add -f azure.client.default.account + shell: echo "{{ cloud_management_storage_accountname }}" | {{ es_home }}/bin/elasticsearch-keystore add -f azure.client.default.account no_log: True environment: ES_PATH_CONF: "{{ es_conf_dir }}" @@ -9,7 +9,7 @@ - name: Add default azure account key for backups become: yes - shell: echo "{{ azure_management_storage_account_key }}" | {{ es_home }}/bin/elasticsearch-keystore add -f azure.client.default.key + shell: echo "{{ cloud_management_storage_secret }}" | {{ es_home }}/bin/elasticsearch-keystore add -f azure.client.default.key no_log: True environment: - ES_PATH_CONF: "{{ es_conf_dir }}" \ No newline at end of file + ES_PATH_CONF: "{{ es_conf_dir }}" diff --git a/ansible/roles/mongodb-backup/defaults/main.yml b/ansible/roles/mongodb-backup/defaults/main.yml index da5a0f710f..1d54a69541 100644 --- a/ansible/roles/mongodb-backup/defaults/main.yml +++ b/ansible/roles/mongodb-backup/defaults/main.yml @@ -6,4 +6,7 @@ mongo_backup_azure_container_name: "mongodb-backup" # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos -mongo_backup_storage: "{{ mongo_backup_azure_container_name }}" \ No newline at end of file +mongo_backup_storage: "{{ mongo_backup_azure_container_name }}" + +cloud_storage_mongodbbackup_bucketname: "{{cloud_storage_management_bucketname}}" +cloud_storage_mongodbbackup_foldername: 'mongodb-backup' diff --git a/ansible/roles/mongodb-backup/tasks/main.yml b/ansible/roles/mongodb-backup/tasks/main.yml index 0762f2754f..94b157648a 100644 --- a/ansible/roles/mongodb-backup/tasks/main.yml +++ b/ansible/roles/mongodb-backup/tasks/main.yml @@ -23,7 +23,7 @@ container_public_access: "off" blob_file_name: "{{ mongo_backup_file_name }}.tar.gz" local_file_or_folder_path: "{{ mongo_backup_file_path }}.tar.gz" - storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_name: "{{ cloud_management_storage_accountname }}" storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" @@ -45,9 +45,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: - gcp_bucket_name: "{{ gcloud_management_bucket_name }}" - dest_folder_name: "{{ mongo_backup_storage }}" - dest_file_name: "{{ mongo_backup_file_name }}.tar.gz" + gcp_bucket_name: "{{ cloud_storage_mongodbbackup_bucketname }}" + gcp_path: "{{ cloud_storage_mongodbbackup_foldername }}/{{ mongo_backup_file_name }}.tar.gz" local_file_or_folder_path: "{{ mongo_backup_file_path }}.tar.gz" when: cloud_service_provider == "gcloud" diff --git a/ansible/roles/postgres-managed-service-backup/defaults/main.yml b/ansible/roles/postgres-managed-service-backup/defaults/main.yml index 6e637bf3ce..6af37d7f96 100644 --- a/ansible/roles/postgres-managed-service-backup/defaults/main.yml +++ b/ansible/roles/postgres-managed-service-backup/defaults/main.yml @@ -14,4 +14,7 @@ postgres_password: "{{postgres_password}}" # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos -postgresql_backup_storage: "{{ postgresql_backup_azure_container_name }}" \ No newline at end of file +postgresql_backup_storage: "{{ postgresql_backup_azure_container_name }}" + +cloud_storage_postgresqlbackup_bucketname: "{{ cloud_storage_management_bucketname }}" +cloud_storage_postgresqlbackup_foldername: postgresql-backup diff --git a/ansible/roles/postgres-managed-service-backup/tasks/main.yml b/ansible/roles/postgres-managed-service-backup/tasks/main.yml index ea206146b3..660814d9bd 100644 --- a/ansible/roles/postgres-managed-service-backup/tasks/main.yml +++ b/ansible/roles/postgres-managed-service-backup/tasks/main.yml @@ -50,7 +50,7 @@ container_public_access: "off" blob_file_name: "{{ postgresql_backup_gzip_file_name }}.zip" local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" - storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_name: "{{ cloud_management_storage_accountname }}" storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" @@ -72,9 +72,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: - gcp_bucket_name: "{{ gcloud_management_bucket_name }}" - dest_folder_name: "{{ postgresql_backup_storage }}" - dest_file_name: "{{ postgresql_backup_gzip_file_name }}.zip" + gcp_bucket_name: "{{ cloud_storage_postgresqlbackup_bucketname }}" + gcp_path: "{{ cloud_storage_postgresqlbackup_foldername }}/{{ postgresql_backup_gzip_file_name }}.zip" local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" when: cloud_service_provider == "gcloud" diff --git a/ansible/roles/postgres-managed-service-restore/defaults/main.yml b/ansible/roles/postgres-managed-service-restore/defaults/main.yml index 4ac0d62151..41164b445c 100644 --- a/ansible/roles/postgres-managed-service-restore/defaults/main.yml +++ b/ansible/roles/postgres-managed-service-restore/defaults/main.yml @@ -18,4 +18,8 @@ postgres_env: # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos -postgres_backup_storage: "{{ postgres_backup_azure_container_name }}" \ No newline at end of file +postgres_backup_storage: "{{ postgres_backup_azure_container_name }}" + + +cloud_storage_postgresqlbackup_bucketname: "{{ cloud_storage_management_bucketname }}" +cloud_storage_postgresqlbackup_foldername: postgresql-backup diff --git a/ansible/roles/postgres-managed-service-restore/tasks/main.yml b/ansible/roles/postgres-managed-service-restore/tasks/main.yml index 0299ff3f73..8efa3dd561 100644 --- a/ansible/roles/postgres-managed-service-restore/tasks/main.yml +++ b/ansible/roles/postgres-managed-service-restore/tasks/main.yml @@ -17,7 +17,7 @@ blob_container_name: "{{ postgres_backup_storage }}" blob_file_name: "{{ postgres_backup_filename }}" local_file_or_folder_path: "{{ postgres_restore_dir }}/{{ postgres_backup_filepath }}" - storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_name: "{{ cloud_management_storage_accountname }}" storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" @@ -39,9 +39,8 @@ name: gcp-cloud-storage tasks_from: download.yml vars: - gcp_bucket_name: "{{ gcloud_management_bucket_name }}" - dest_folder_name: "{{ postgres_backup_storage }}" - dest_file_name: "{{ postgres_backup_filename }}" + gcp_bucket_name: "{{ cloud_storage_postgresqlbackup_bucketname }}" + gcp_path: "{{ cloud_storage_postgresqlbackup_foldername }}/{{ postgres_backup_filename }}" local_file_or_folder_path: "{{ postgres_restore_dir }}/{{ postgres_backup_filepath }}" when: cloud_service_provider == "gcloud" diff --git a/ansible/roles/postgresql-backup/defaults/main.yml b/ansible/roles/postgresql-backup/defaults/main.yml index 0b6a9bca4a..30902eac17 100644 --- a/ansible/roles/postgresql-backup/defaults/main.yml +++ b/ansible/roles/postgresql-backup/defaults/main.yml @@ -8,3 +8,6 @@ postgresql_backup_azure_container_name: postgresql-backup # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos postgresql_backup_storage: "{{ postgresql_backup_azure_container_name }}" + +cloud_storage_postgresqlbackup_bucketname: "{{ cloud_storage_management_bucketname }}" +cloud_storage_postgresqlbackup_foldername: postgresql-backup diff --git a/ansible/roles/postgresql-backup/tasks/main.yml b/ansible/roles/postgresql-backup/tasks/main.yml index 65116bede0..6710e49503 100644 --- a/ansible/roles/postgresql-backup/tasks/main.yml +++ b/ansible/roles/postgresql-backup/tasks/main.yml @@ -22,7 +22,7 @@ container_public_access: "off" blob_file_name: "{{ postgresql_backup_gzip_file_name }}" local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" - storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_name: "{{ cloud_management_storage_accountname }}" storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" @@ -44,11 +44,10 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: - gcp_bucket_name: "{{ gcloud_management_bucket_name }}" - dest_folder_name: "{{ postgresql_backup_storage }}" - dest_file_name: "{{ postgresql_backup_gzip_file_name }}" + gcp_bucket_name: "{{ cloud_storage_postgresqlbackup_bucketname }}" + gcp_path: "{{ cloud_storage_postgresqlbackup_foldername }}/{{ postgresql_backup_gzip_file_name }}" local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" when: cloud_service_provider == "gcloud" - name: clean up backup dir after upload - file: path="{{ postgresql_backup_dir }}" state=absent \ No newline at end of file + file: path="{{ postgresql_backup_dir }}" state=absent diff --git a/ansible/roles/postgresql-restore/defaults/main.yml b/ansible/roles/postgresql-restore/defaults/main.yml index 5f0708ed34..2bcc525469 100644 --- a/ansible/roles/postgresql-restore/defaults/main.yml +++ b/ansible/roles/postgresql-restore/defaults/main.yml @@ -10,4 +10,7 @@ postgresql_restore_azure_container_name: postgresql-backup # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos -postgresql_restore_storage: "{{ postgresql_restore_azure_container_name }}" \ No newline at end of file +postgresql_restore_storage: "{{ postgresql_restore_azure_container_name }}" + +cloud_storage_postgresqlbackup_bucketname: "{{ cloud_storage_management_bucketname }}" +cloud_storage_postgresqlbackup_foldername: postgresql-backup diff --git a/ansible/roles/postgresql-restore/tasks/main.yml b/ansible/roles/postgresql-restore/tasks/main.yml index 877e178987..b3411c5445 100644 --- a/ansible/roles/postgresql-restore/tasks/main.yml +++ b/ansible/roles/postgresql-restore/tasks/main.yml @@ -12,7 +12,7 @@ blob_container_name: "{{ postgresql_restore_storage }}" blob_file_name: "{{ postgresql_restore_gzip_file_name }}" local_file_or_folder_path: "{{ postgresql_restore_gzip_file_path }}" - storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_name: "{{ cloud_management_storage_accountname }}" storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" @@ -34,9 +34,8 @@ name: gcp-cloud-storage tasks_from: download.yml vars: - gcp_bucket_name: "{{ gcloud_management_bucket_name }}" - dest_folder_name: "{{ postgresql_restore_storage }}" - dest_file_name: "{{ postgresql_restore_gzip_file_name }}" + gcp_bucket_name: "{{ cloud_storage_postgresqlbackup_bucketname }}" + gcp_path: "{{ cloud_storage_postgresqlbackup_foldername }}/{{ postgresql_restore_gzip_file_name }}" local_file_or_folder_path: "{{ postgresql_restore_gzip_file_path }}" when: cloud_service_provider == "gcloud" diff --git a/ansible/roles/prometheus-backup-v2/defaults/main.yml b/ansible/roles/prometheus-backup-v2/defaults/main.yml index e3752a693f..430c9df4cb 100644 --- a/ansible/roles/prometheus-backup-v2/defaults/main.yml +++ b/ansible/roles/prometheus-backup-v2/defaults/main.yml @@ -7,4 +7,7 @@ prometheus_backup_azure_container_name: prometheus-backup # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos -prometheus_backup_storage: "{{ prometheus_backup_azure_container_name }}" \ No newline at end of file +prometheus_backup_storage: "{{ prometheus_backup_azure_container_name }}" + +cloud_storage_prometheusbackup_bucketname: "{{ cloud_storage_management_bucketname }}" +cloud_storage_prometheusbackup_foldername: prometheus-backup diff --git a/ansible/roles/prometheus-backup-v2/tasks/main.yml b/ansible/roles/prometheus-backup-v2/tasks/main.yml index 3831080dbc..9b25fc465c 100644 --- a/ansible/roles/prometheus-backup-v2/tasks/main.yml +++ b/ansible/roles/prometheus-backup-v2/tasks/main.yml @@ -25,7 +25,7 @@ container_public_access: "off" blob_file_name: "{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" local_file_or_folder_path: "/tmp/{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" - storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_name: "{{ cloud_management_storage_accountname }}" storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" @@ -47,9 +47,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: - gcp_bucket_name: "{{ gcloud_management_bucket_name }}" - dest_folder_name: "{{ prometheus_backup_storage }}" - dest_file_name: "{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" + gcp_bucket_name: "{{ cloud_storage_prometheusbackup_bucketname }}" + gcp_path: "{{ cloud_storage_prometheusbackup_foldername }}/{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" local_file_or_folder_path: "/tmp/{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" when: cloud_service_provider == "gcloud" diff --git a/ansible/roles/prometheus-backup/defaults/main.yml b/ansible/roles/prometheus-backup/defaults/main.yml index 17425092ee..3bba75124c 100644 --- a/ansible/roles/prometheus-backup/defaults/main.yml +++ b/ansible/roles/prometheus-backup/defaults/main.yml @@ -11,4 +11,7 @@ backup_storage_key: '' # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos -prometheus_backup_storage: "{{ prometheus_backup_azure_container_name }}" \ No newline at end of file +prometheus_backup_storage: "{{ prometheus_backup_azure_container_name }}" + +cloud_storage_prometheusbackup_bucketname: "{{ cloud_storage_management_bucketname }}" +cloud_storage_prometheusbackup_foldername: prometheus-backup diff --git a/ansible/roles/prometheus-backup/tasks/main.yml b/ansible/roles/prometheus-backup/tasks/main.yml index 55a51287ae..14cc74a41a 100644 --- a/ansible/roles/prometheus-backup/tasks/main.yml +++ b/ansible/roles/prometheus-backup/tasks/main.yml @@ -34,7 +34,7 @@ container_public_access: "off" blob_file_name: "{{ prometheus_backup_gzip_file_name }}" local_file_or_folder_path: "{{ prometheus_backup_gzip_file_path }}" - storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_name: "{{ cloud_management_storage_accountname }}" storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" @@ -56,9 +56,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: - gcp_bucket_name: "{{ gcloud_management_bucket_name }}" - dest_folder_name: "{{ prometheus_backup_storage }}" - dest_file_name: "{{ prometheus_backup_gzip_file_name }}" + gcp_bucket_name: "{{ cloud_storage_prometheusbackup_bucketname }}" + gcp_path: "{{ cloud_storage_prometheusbackup_foldername }}/{{ prometheus_backup_gzip_file_name }}" local_file_or_folder_path: "{{ prometheus_backup_gzip_file_path }}" when: cloud_service_provider == "gcloud" diff --git a/ansible/roles/prometheus-restore/defaults/main.yml b/ansible/roles/prometheus-restore/defaults/main.yml index bee405457a..580e865060 100644 --- a/ansible/roles/prometheus-restore/defaults/main.yml +++ b/ansible/roles/prometheus-restore/defaults/main.yml @@ -6,4 +6,7 @@ prometheus_backup_azure_container_name: prometheus-backup # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos -prometheus_backup_storage: "{{ prometheus_backup_azure_container_name }}" \ No newline at end of file +prometheus_backup_storage: "{{ prometheus_backup_azure_container_name }}" + +cloud_storage_prometheusbackup_bucketname: "{{ cloud_storage_management_bucketname }}" +cloud_storage_prometheusbackup_foldername: prometheus-backup diff --git a/ansible/roles/prometheus-restore/tasks/main.yml b/ansible/roles/prometheus-restore/tasks/main.yml index 2232770fdd..0c9b0749a9 100644 --- a/ansible/roles/prometheus-restore/tasks/main.yml +++ b/ansible/roles/prometheus-restore/tasks/main.yml @@ -10,7 +10,7 @@ blob_container_name: "{{ prometheus_backup_storage }}" blob_file_name: "{{ prometheus_backup_filename }}" local_file_or_folder_path: "{{ prometheus_backup_dir }}/{{ prometheus_backup_filepath }}" - storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_name: "{{ cloud_management_storage_accountname }}" storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" @@ -32,9 +32,8 @@ name: gcp-cloud-storage tasks_from: download.yml vars: - gcp_bucket_name: "{{ gcloud_management_bucket_name }}" - dest_folder_name: "{{ prometheus_backup_storage }}" - dest_file_name: "{{ prometheus_backup_filename }}" + gcp_bucket_name: "{{ cloud_storage_prometheusbackup_bucketname }}" + gcp_path: "{{ cloud_storage_prometheusbackup_foldername }}/{{ prometheus_backup_filename }}" local_file_or_folder_path: "{{ prometheus_backup_dir }}/{{ prometheus_backup_filepath }}" when: cloud_service_provider == "gcloud" diff --git a/ansible/roles/redis-backup/defaults/main.yml b/ansible/roles/redis-backup/defaults/main.yml index 9f6055682a..6aacb354d6 100644 --- a/ansible/roles/redis-backup/defaults/main.yml +++ b/ansible/roles/redis-backup/defaults/main.yml @@ -7,4 +7,7 @@ learner_user: learning # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo # or other default files and just assign the value to the newly introduced common variable # 3. After few releases, we will remove the older variables and use only the new variables across the repos -nodebb_redis_backup_storage: "{{ nodebb_redis_backup_azure_container_name }}" \ No newline at end of file +nodebb_redis_backup_storage: "{{ nodebb_redis_backup_azure_container_name }}" + +cloud_storage_redisbackup_bucketname: "{{ cloud_storage_management_bucketname }}" +cloud_storage_redisbackup_foldername: nodebb-redis-backup diff --git a/ansible/roles/redis-backup/tasks/main.yml b/ansible/roles/redis-backup/tasks/main.yml index 5359a362c8..9f0c15a815 100644 --- a/ansible/roles/redis-backup/tasks/main.yml +++ b/ansible/roles/redis-backup/tasks/main.yml @@ -22,7 +22,7 @@ container_public_access: "off" blob_file_name: "{{ redis_backup_file_name }}" local_file_or_folder_path: "{{ redis_backup_file_path }}" - storage_account_name: "{{ azure_management_storage_account_name }}" + storage_account_name: "{{ cloud_management_storage_accountname }}" storage_account_key: "{{ azure_management_storage_account_key }}" when: cloud_service_provider == "azure" @@ -44,9 +44,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: - gcp_bucket_name: "{{ gcloud_management_bucket_name }}" - dest_folder_name: "{{ nodebb_redis_backup_storage }}" - dest_file_name: "{{ redis_backup_file_name }}" + gcp_bucket_name: "{{ cloud_storage_redisbackup_bucketname }}" + gcp_path: "{{ cloud_storage_redisbackup_foldername }}/{{ redis_backup_file_name }}" local_file_or_folder_path: "{{ redis_backup_file_path }}" when: cloud_service_provider == "gcloud" diff --git a/ansible/uploadFAQs.yml b/ansible/uploadFAQs.yml index b37398b874..88d17aba81 100644 --- a/ansible/uploadFAQs.yml +++ b/ansible/uploadFAQs.yml @@ -24,8 +24,8 @@ container_public_access: "container" blob_container_folder_path: "" local_file_or_folder_path: "{{ playbook_dir }}/../utils/{{ item }}" - storage_account_name: "{{ azure_public_storage_account_name }}" - storage_account_key: "{{ azure_public_storage_account_key }}" + storage_account_name: "{{ cloud_public_storage_accountname }}" + storage_account_key: "{{ cloud_public_storage_secret }}" with_items: - "{{ source_folder.split(',') }}" when: cloud_service_provider == "azure" From 146c793dbbb3119dbfd4c887d819d04b117fe1f4 Mon Sep 17 00:00:00 2001 From: G33tha Date: Thu, 8 Dec 2022 13:42:45 +0530 Subject: [PATCH 126/616] updated knowlgbb service changes --- ansible/roles/stack-sunbird/defaults/main.yml | 5 +++++ pipelines/deploy/ContentFramework/Jenkinsfile | 1 + pipelines/upload/schema/dial/Jenkinsfile | 4 ++-- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/ansible/roles/stack-sunbird/defaults/main.yml b/ansible/roles/stack-sunbird/defaults/main.yml index 8b600f04a1..0c8c86242d 100644 --- a/ansible/roles/stack-sunbird/defaults/main.yml +++ b/ansible/roles/stack-sunbird/defaults/main.yml @@ -1048,3 +1048,8 @@ kong_desktop_device_consumer_names_for_opa: '["desktop"]' # Audience claim check is disabled as of now # List of keycloak clients as these can come in audience field of a JWT token # keycloak_allowed_aud: '"{{ keycloak_auth_server_url }}/realms/{{ keycloak_realm }}", "account", "realm-management"' + + +cloudstorage_relative_path_prefix_content: "CONTENT_STORAGE_BASE_PATH" +cloudstorage_relative_path_prefix_dial: "DIAL_STORAGE_BASE_PATH" +cloudstorage_metadata_list: '["appIcon", "artifactUrl", "posterImage", "previewUrl", "thumbnail", "assetsMap", "certTemplate", "itemSetPreviewUrl", "grayScaleAppIcon", "sourceURL", "variants", "downloadUrl", "streamingUrl", "toc_url", "data", "question", "solutions", "editorState", "media", "pdfUrl"]' \ No newline at end of file diff --git a/pipelines/deploy/ContentFramework/Jenkinsfile b/pipelines/deploy/ContentFramework/Jenkinsfile index c495bce266..a02c72eb69 100644 --- a/pipelines/deploy/ContentFramework/Jenkinsfile +++ b/pipelines/deploy/ContentFramework/Jenkinsfile @@ -44,6 +44,7 @@ node() { sh """ zip -r content-editor-artifact.zip ansible/content-editor cd ansible/content-editor + sudo npm install -g gulp npm install npm install promise gulp minifyJs diff --git a/pipelines/upload/schema/dial/Jenkinsfile b/pipelines/upload/schema/dial/Jenkinsfile index dd74b2f23f..553dcfd840 100644 --- a/pipelines/upload/schema/dial/Jenkinsfile +++ b/pipelines/upload/schema/dial/Jenkinsfile @@ -28,8 +28,8 @@ node() { rm -rf sunbird-dial-service git clone https://github.com/project-sunbird/sunbird-dial-service.git -b ${params.dial_branch_or_tag} """ - ansiblePlaybook = "${currentWs}/ansible/dial_upload-schema.yml" - ansibleExtraArgs = "--extra-vars \" source_name=${currentWs}/sunbird-dial-service/schemas\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" + ansiblePlaybook = "${currentWs}/ansible/dial_upload-schema.yml" + ansibleExtraArgs = "--extra-vars \" source_name=${currentWs}/sunbird-dial-service/jsonld-schema \" --vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) From 012961fa2520435d977ed27a8e58ff98139e82af Mon Sep 17 00:00:00 2001 From: G33tha Date: Thu, 8 Dec 2022 13:45:41 +0530 Subject: [PATCH 127/616] updated knowlgbb service changes --- pipelines/upload/schema/dial/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipelines/upload/schema/dial/Jenkinsfile b/pipelines/upload/schema/dial/Jenkinsfile index 553dcfd840..a91956eaf1 100644 --- a/pipelines/upload/schema/dial/Jenkinsfile +++ b/pipelines/upload/schema/dial/Jenkinsfile @@ -28,7 +28,7 @@ node() { rm -rf sunbird-dial-service git clone https://github.com/project-sunbird/sunbird-dial-service.git -b ${params.dial_branch_or_tag} """ - ansiblePlaybook = "${currentWs}/ansible/dial_upload-schema.yml" + ansiblePlaybook = "${currentWs}/ansible/dial_upload-schema.yml" ansibleExtraArgs = "--extra-vars \" source_name=${currentWs}/sunbird-dial-service/jsonld-schema \" --vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('currentWs', currentWs) values.put('env', envDir) From 5db7065a368ddd6aeb0df17500d177cea76376a0 Mon Sep 17 00:00:00 2001 From: anilgupta Date: Thu, 8 Dec 2022 13:55:03 +0530 Subject: [PATCH 128/616] Issue #KN-439 chore: Mering the changes from release-5.2.0-knowlg to release-5.2.0 --- ansible/roles/kong-api/defaults/main.yml | 167 +++++++++++++++++- .../content-service_application.conf | 11 +- .../templates/dial-service_application.conf | 10 ++ .../taxonomy-service_application.conf | 8 + 4 files changed, 194 insertions(+), 2 deletions(-) diff --git a/ansible/roles/kong-api/defaults/main.yml b/ansible/roles/kong-api/defaults/main.yml index bb6e0c7c56..d7589ae22e 100644 --- a/ansible/roles/kong-api/defaults/main.yml +++ b/ansible/roles/kong-api/defaults/main.yml @@ -127,6 +127,7 @@ assessment_prefix: /assessment # Service URLs knowledge_mw_service_url: "http://knowledge-mw-service:5000" learning_service_url: "http://learner-service:9000" +dial_service_url: "http://dial-service:9000" vm_learning_service_url: "http://{{learningservice_ip}}:8080/learning-service" telemetry_service_url: "http://telemetry-service:9001" player_service_url: "http://player:3000" @@ -2494,7 +2495,7 @@ kong_apis: - name: publishContent uris: "{{ content_prefix }}/v1/publish" - upstream_url: "{{ knowledge_mw_service_url }}/v1/content/publish" + upstream_url: "{{ content_service_url }}/content/v3/publish" strip_uri: true plugins: - name: jwt @@ -9836,3 +9837,167 @@ kong_apis: - name: opa-checks config.required: false config.enabled: false + +- name: releaseDialcodeContentV2 + uris: "{{ content_prefix }}/v2/dialcode/release" + upstream_url: "{{ content_service_url }}/content/v4/dialcode/release" + strip_uri: true + plugins: + - name: jwt + - name: cors + - "{{ statsd_pulgin }}" + - name: acl + config.whitelist: + - contentUpdate + - name: rate-limiting + config.policy: local + config.hour: "{{ medium_rate_limit_per_hour }}" + config.limit_by: credential + - name: request-size-limiting + config.allowed_payload_size: "{{ small_request_size_limit }}" + - name: opa-checks + config.required: true + config.enabled: true + +- name: releaseDialcodeCollection + uris: "{{ collection_prefix }}/v1/dialcode/release" + upstream_url: "{{ content_service_url }}/collection/v4/dialcode/release" + strip_uri: true + plugins: + - name: jwt + - name: cors + - "{{ statsd_pulgin }}" + - name: acl + config.whitelist: + - contentUpdate + - name: rate-limiting + config.policy: local + config.hour: "{{ medium_rate_limit_per_hour }}" + config.limit_by: credential + - name: request-size-limiting + config.allowed_payload_size: "{{ small_request_size_limit }}" + - name: opa-checks + config.required: true + config.enabled: true + +- name: publishContentV2 + uris: "{{ content_prefix }}/v2/publish" + upstream_url: "{{ content_service_url }}/content/v4/publish" + strip_uri: true + plugins: + - name: jwt + - name: cors + - "{{ statsd_pulgin }}" + - name: acl + config.whitelist: + - contentAdmin + - name: rate-limiting + config.policy: local + config.hour: "{{ medium_rate_limit_per_hour }}" + config.limit_by: credential + - name: request-size-limiting + config.allowed_payload_size: "{{ small_request_size_limit }}" + - name: opa-checks + config.required: true + config.enabled: true + +- name: unlistedPublishContent + uris: "{{ content_prefix }}/v1/unlisted/publish" + upstream_url: "{{ content_service_url }}/content/v3/unlisted/publish" + strip_uri: true + plugins: + - name: jwt + - name: cors + - "{{ statsd_pulgin }}" + - name: acl + config.whitelist: + - contentAdmin + - name: rate-limiting + config.policy: local + config.hour: "{{ medium_rate_limit_per_hour }}" + config.limit_by: credential + - name: request-size-limiting + config.allowed_payload_size: "{{ small_request_size_limit }}" + - name: opa-checks + config.required: true + config.enabled: true + +- name: unlistedPublishContentV2 + uris: "{{ content_prefix }}/v2/unlisted/publish" + upstream_url: "{{ content_service_url }}/content/v4/unlisted/publish" + strip_uri: true + plugins: + - name: jwt + - name: cors + - "{{ statsd_pulgin }}" + - name: acl + config.whitelist: + - contentAdmin + - name: rate-limiting + config.policy: local + config.hour: "{{ medium_rate_limit_per_hour }}" + config.limit_by: credential + - name: request-size-limiting + config.allowed_payload_size: "{{ small_request_size_limit }}" + - name: opa-checks + config.required: true + config.enabled: true + +- name: publishCollection + uris: "{{ collection_prefix }}/v1/publish" + upstream_url: "{{ content_service_url }}/collection/v4/publish" + strip_uri: true + plugins: + - name: jwt + - name: cors + - "{{ statsd_pulgin }}" + - name: acl + config.whitelist: + - contentAdmin + - name: rate-limiting + config.policy: local + config.hour: "{{ medium_rate_limit_per_hour }}" + config.limit_by: credential + - name: request-size-limiting + config.allowed_payload_size: "{{ small_request_size_limit }}" + - name: opa-checks + config.required: true + config.enabled: true + +- name: unlistedPublishCollection + uris: "{{ collection_prefix }}/v1/unlisted/publish" + upstream_url: "{{ content_service_url }}/collection/v4/unlisted/publish" + strip_uri: true + plugins: + - name: jwt + - name: cors + - "{{ statsd_pulgin }}" + - name: acl + config.whitelist: + - contentAdmin + - name: rate-limiting + config.policy: local + config.hour: "{{ medium_rate_limit_per_hour }}" + config.limit_by: credential + - name: request-size-limiting + config.allowed_payload_size: "{{ small_request_size_limit }}" + - name: opa-checks + config.required: true + config.enabled: true + +- name: readDIALCodesBatchInfo + uris: "{{ dialcode_service_prefix }}/v2/read/batch" + upstream_url: "{{ dial_service_url }}/dialcode/v4/batch/read" + strip_uri: true + plugins: + - name: cors + - "{{ statsd_pulgin }}" + - name: rate-limiting + config.policy: local + config.hour: "{{ medium_rate_limit_per_hour }}" + config.limit_by: ip + - name: request-size-limiting + config.allowed_payload_size: "{{ small_request_size_limit }}" + - name: opa-checks + config.required: true + config.enabled: true \ No newline at end of file diff --git a/ansible/roles/stack-sunbird/templates/content-service_application.conf b/ansible/roles/stack-sunbird/templates/content-service_application.conf index fb5a2e7667..bb44a71828 100644 --- a/ansible/roles/stack-sunbird/templates/content-service_application.conf +++ b/ansible/roles/stack-sunbird/templates/content-service_application.conf @@ -382,7 +382,7 @@ content { } h5p { library { - path: "{{ h5p_library_path }}" + path: "{{ h5p_library_path | default('https://sunbirddev.blob.core.windows.net/sunbird-content-dev/h5p-standalone-1.3.4.zip') }}" } } copy { @@ -494,6 +494,7 @@ kafka { urls : "{{ kafka_urls }}" topic.send.enable : true topics.instruction : "{{ env_name }}.learning.job.request" + publish.request.topic : "{{ env_name }}.publish.job.request" } # DIAL Link Config @@ -637,3 +638,11 @@ collection { } plugin.media.base.url="{{ plugin_media_base_url }}" + +cloudstorage { + metadata.replace_absolute_path={{ cloudstorage_replace_absolute_path | default('false') }} + relative_path_prefix={{ cloudstorage_relative_path_prefix_content }} + metadata.list={{ cloudstorage_metadata_list }} + read_base_path="{{ cloudstorage_base_path }}" + write_base_path={{ valid_cloudstorage_base_urls }} +} diff --git a/ansible/roles/stack-sunbird/templates/dial-service_application.conf b/ansible/roles/stack-sunbird/templates/dial-service_application.conf index 745a8b9bfe..dd7b11dbeb 100644 --- a/ansible/roles/stack-sunbird/templates/dial-service_application.conf +++ b/ansible/roles/stack-sunbird/templates/dial-service_application.conf @@ -150,6 +150,10 @@ system.config.table="system_config" publisher.keyspace.name="{{ env_name }}_dialcode_store" publisher.keyspace.table="publisher" +#QRCodes Configuration +qrcodes.keyspace.name="dialcodes" +qrcodes.keyspace.table="dialcode_batch" + #DIAL Code Generator Configuration dialcode.strip.chars="0" dialcode.length=6.0 @@ -191,3 +195,9 @@ jsonld { sb_schema = ["http://store.knowlg.sunbird.org/dial/specs/sb/schema.jsonld"] } +cloudstorage { + metadata.replace_absolute_path="{{ cloudstorage_replace_absolute_path | default('false') }}" + relative_path_prefix="{{ cloudstorage_relative_path_prefix_dial | default('DIAL_STORAGE_BASE_PATH') }}" + read_base_path="{{ cloudstorage_base_path }}" +} +cloud_storage_container="{{ cloud_storage_dial_bucketname | default('dial') }}" diff --git a/ansible/roles/stack-sunbird/templates/taxonomy-service_application.conf b/ansible/roles/stack-sunbird/templates/taxonomy-service_application.conf index e1298a1b92..332206c502 100644 --- a/ansible/roles/stack-sunbird/templates/taxonomy-service_application.conf +++ b/ansible/roles/stack-sunbird/templates/taxonomy-service_application.conf @@ -398,3 +398,11 @@ objectcategorydefinition.keyspace="{{ lp_cassandra_keyspace_prefix }}_category_s # Framework master category validation Supported values are Yes/No master.category.validation.enabled="{{ master_category_validation_enabled | default('Yes') }}" + +cloudstorage { + metadata.replace_absolute_path={{ cloudstorage_replace_absolute_path | default('false') }} + relative_path_prefix={{ cloudstorage_relative_path_prefix_content }} + metadata.list={{ cloudstorage_metadata_list }} + read_base_path="{{ cloudstorage_base_path }}" + write_base_path={{ valid_cloudstorage_base_urls }} +} From 484731bcdbe3106a1c9f83a971369e9046267dce Mon Sep 17 00:00:00 2001 From: swayangjit Date: Thu, 8 Dec 2022 15:52:41 +0530 Subject: [PATCH 129/616] Issue #ED-354 chore: Update aab firebase upload logic. --- ansible/roles/firebase_deploy/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/firebase_deploy/tasks/main.yml b/ansible/roles/firebase_deploy/tasks/main.yml index 0403ff7dab..8ebd281250 100644 --- a/ansible/roles/firebase_deploy/tasks/main.yml +++ b/ansible/roles/firebase_deploy/tasks/main.yml @@ -9,7 +9,7 @@ src: "uploadToGdrive.sh" dest: "./uploadToGdrive.sh" - name: Uploading build to {{ env_name }} firebase - shell: find ../ -maxdepth 1 -iregex ".*{{env_name}}.apk" -exec bash deployToFirebase.sh {} \; + shell: find ../ -maxdepth 1 -iregex ".*{{env_name}}.*.aab" -exec bash deployToFirebase.sh {} \; when: env_name!='production' - name: Uploading build to {{ env_name }} Gdrive shell: find ../ -maxdepth 1 -iregex ".*[0-9].apk" -exec bash uploadToGdrive.sh -v -r {} \; From b14d6de817d42373514f6013cdf83f8b4108ad17 Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Thu, 8 Dec 2022 20:45:54 +0530 Subject: [PATCH 130/616] Release 5.1.0 vars change (#3658) --- ansible/artifacts-download.yml | 12 +- ansible/artifacts-upload.yml | 12 +- ansible/assets-upload.yml | 27 ++-- ansible/deploy-plugins.yml | 25 ++-- ansible/desktop-faq-upload.yml | 120 ++++++++++++++---- ansible/dial_upload-schema.yml | 19 +-- ansible/inventory/env/group_vars/all.yml | 1 - ansible/kp_upload-schema.yml | 19 +-- .../roles/cassandra-backup/defaults/main.yml | 10 -- ansible/roles/cassandra-backup/tasks/main.yml | 12 +- .../roles/cassandra-restore/defaults/main.yml | 7 - .../roles/cassandra-restore/tasks/main.yml | 14 +- .../roles/cert-templates/defaults/main.yml | 7 - ansible/roles/cert-templates/tasks/main.yml | 12 +- .../roles/desktop-deploy/defaults/main.yml | 7 - ansible/roles/desktop-deploy/tasks/main.yml | 10 +- .../roles/grafana-backup/defaults/main.yml | 7 - ansible/roles/grafana-backup/tasks/main.yml | 14 +- .../jenkins-backup-upload/defaults/main.yml | 8 -- .../jenkins-backup-upload/tasks/main.yml | 14 +- .../roles/mongodb-backup/defaults/main.yml | 8 -- ansible/roles/mongodb-backup/tasks/main.yml | 12 +- .../defaults/main.yml | 9 -- .../tasks/main.yml | 14 +- .../defaults/main.yml | 10 -- .../tasks/main.yml | 14 +- .../roles/postgresql-backup/defaults/main.yml | 8 -- .../roles/postgresql-backup/tasks/main.yml | 14 +- .../postgresql-restore/defaults/main.yml | 8 -- .../roles/postgresql-restore/tasks/main.yml | 14 +- .../prometheus-backup-v2/defaults/main.yml | 8 -- .../roles/prometheus-backup-v2/tasks/main.yml | 12 +- .../roles/prometheus-backup/defaults/main.yml | 9 -- .../roles/prometheus-backup/tasks/main.yml | 14 +- .../prometheus-restore/defaults/main.yml | 9 -- .../roles/prometheus-restore/tasks/main.yml | 14 +- ansible/roles/redis-backup/defaults/main.yml | 8 -- ansible/roles/redis-backup/tasks/main.yml | 14 +- ansible/uploadFAQs.yml | 62 +++++++-- .../UploadCollectionHierarchyCSV/config.xml | 11 +- .../jobs/UploadChatbotConfig/config.xml | 11 +- .../UploadCollectionHierarchyCSV/config.xml | 13 +- .../jobs/UploadDiscussionUIDocs/config.xml | 11 +- .../Kubernetes/jobs/UploadFAQs/config.xml | 9 -- .../jobs/UploadPortalLabel/config.xml | 9 -- .../Sunbird-RC/jobs/UploadRCSchema/config.xml | 9 -- pipelines/deploy/desktop-faq/Jenkinsfile | 2 +- pipelines/upload/chatbot/Jenkinsfile | 2 +- pipelines/upload/discussion-UI/Jenkinsfile | 2 +- pipelines/upload/faqs/Jenkinsfile | 2 +- pipelines/upload/portal-csv/Jenkinsfile | 2 +- 51 files changed, 300 insertions(+), 401 deletions(-) diff --git a/ansible/artifacts-download.yml b/ansible/artifacts-download.yml index 25869f80a3..46167180e4 100644 --- a/ansible/artifacts-download.yml +++ b/ansible/artifacts-download.yml @@ -9,7 +9,7 @@ name: azure-cloud-storage tasks_from: blob-download.yml vars: - blob_container_name: "{{ artifacts_container }}" + blob_container_name: "{{ cloud_storage_artifacts_bucketname }}" blob_file_name: "{{ artifact }}" local_file_or_folder_path: "{{ artifact_path }}" storage_account_name: "{{ cloud_artifact_storage_accountname }}" @@ -32,9 +32,9 @@ tasks_from: download.yml vars: local_file_or_folder_path: "{{ artifact_path }}" - s3_bucket_name: "{{ aws_artifact_s3_bucket_name }}" - s3_path: "{{ artifacts_container }}/{{ artifact }}" - aws_default_region: "{{ aws_region }}" - aws_access_key_id: "{{ aws_artifact_bucket_access_key }}" - aws_secret_access_key: "{{ aws_artifact_bucket_secret_access_key }}" + s3_bucket_name: "{{ cloud_storage_artifacts_bucketname }}" + s3_path: "{{ artifact }}" + aws_default_region: "{{ cloud_public_storage_region }}" + aws_access_key_id: "{{ cloud_artifact_storage_accountname }}" + aws_secret_access_key: "{{ cloud_artifact_storage_secret }}" when: cloud_service_provider == "aws" diff --git a/ansible/artifacts-upload.yml b/ansible/artifacts-upload.yml index 31f2589a68..3bdbe73017 100644 --- a/ansible/artifacts-upload.yml +++ b/ansible/artifacts-upload.yml @@ -9,7 +9,7 @@ name: azure-cloud-storage tasks_from: blob-upload.yml vars: - blob_container_name: "{{ artifacts_container }}" + blob_container_name: "{{ cloud_storage_artifacts_bucketname }}" container_public_access: "off" blob_file_name: "{{ artifact }}" local_file_or_folder_path: "{{ artifact_path }}" @@ -33,9 +33,9 @@ tasks_from: upload.yml vars: local_file_or_folder_path: "{{ artifact_path }}" - s3_bucket_name: "{{ aws_artifact_s3_bucket_name }}" - s3_path: "{{ artifacts_container }}/{{ artifact }}" - aws_default_region: "{{ aws_region }}" - aws_access_key_id: "{{ aws_artifact_bucket_access_key }}" - aws_secret_access_key: "{{ aws_artifact_bucket_secret_access_key }}" + s3_bucket_name: "{{ cloud_storage_artifacts_bucketname }}" + s3_path: "{{ artifact }}" + aws_default_region: "{{ cloud_public_storage_region }}" + aws_access_key_id: "{{ cloud_artifact_storage_accountname }}" + aws_secret_access_key: "{{ cloud_artifact_storage_secret }}" when: cloud_service_provider == "aws" diff --git a/ansible/assets-upload.yml b/ansible/assets-upload.yml index 8adf3cae74..2d8d4b1bc2 100644 --- a/ansible/assets-upload.yml +++ b/ansible/assets-upload.yml @@ -2,20 +2,13 @@ - hosts: localhost vars_files: - ['{{inventory_dir}}/secrets.yml', 'secrets/{{env}}.yml'] - # The vars: section is added for the below reason - # 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name - # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo - # or other default files and just assign the value to the newly introduced common variable - # 3. After few releases, we will remove the older variables and use only the new variables across the repos - vars: - player_cdn_storage: "{{ player_cdn_container }}" # Azure tasks: - name: this block consists of tasks related to azure storage block: - name: set common azure variables set_fact: - blob_container_name: "{{ player_cdn_storage }}" + blob_container_name: "{{ cloud_storage_playercdn_bucketname }}" container_public_access: "container" blob_container_folder_path: "" storage_account_name: "{{ cloud_public_storage_accountname }}" @@ -40,11 +33,11 @@ block: - name: set common aws variables set_fact: - s3_bucket_name: "{{ aws_public_s3_bucket_name }}" - s3_path: "{{ player_cdn_storage }}" - aws_default_region: "{{ aws_region }}" - aws_access_key_id: "{{ aws_public_bucket_access_key }}" - aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" + s3_bucket_name: "{{ cloud_storage_playercdn_bucketname }}" + s3_path: "" + aws_default_region: "{{ cloud_public_storage_region }}" + aws_access_key_id: "{{ cloud_public_storage_accountname }}" + aws_secret_access_key: "{{ cloud_public_storage_secret }}" - name: delete files and folders from s3 include_role: @@ -64,10 +57,10 @@ block: - name: set common gcloud variables set_fact: - dest_folder_name: "{{ player_cdn_storage }}" - dest_folder_path: "" - file_delete_pattern: "{{ player_cdn_storage }}/" - gcp_bucket_name: "{{ gcloud_public_bucket_name }}" + gcp_bucket_name: "{{ cloud_storage_playercdn_bucketname }}" + gcp_path: "" + file_delete_pattern: "" + - name: delete files and folders from gcloud storage include_role: diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index ae7f21637a..6d048b18c4 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -2,13 +2,6 @@ gather_facts: no vars_files: - "{{inventory_dir}}/secrets.yml" - # The vars: section is added for the below reason - # 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name - # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo - # or other default files and just assign the value to the newly introduced common variable - # 3. After few releases, we will remove the older variables and use only the new variables across the repos - vars: - plugin_storage: "{{ plugin_container_name }}" tasks: - name: rename env_domain in preview_cdn.html for CDN shell: | @@ -22,7 +15,7 @@ block: - name: set common azure variables set_fact: - blob_container_name: "{{ plugin_storage }}" + blob_container_name: "{{ cloud_storage_content_bucketname }}" container_public_access: "container" storage_account_name: "{{ cloud_public_storage_accountname }}" storage_account_key: "{{ cloud_public_storage_secret }}" @@ -169,10 +162,10 @@ block: - name: set common aws variables set_fact: - aws_default_region: "{{ aws_region }}" - s3_bucket_name: "{{ aws_public_s3_bucket_name }}" - aws_access_key_id: "{{ aws_public_bucket_access_key }}" - aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" + aws_default_region: "{{ cloud_public_storage_region }}" + s3_bucket_name: "{{ cloud_storage_content_bucketname }}" + aws_access_key_id: "{{ cloud_public_storage_accountname }}" + aws_secret_access_key: "{{ cloud_public_storage_secret }}" tags: - always @@ -182,7 +175,7 @@ name: aws-cloud-storage tasks_from: delete-folder.yml vars: - s3_path: "{{ plugin_storage }}/{{ folder_name }}" + s3_path: "{{ folder_name }}" tags: - content-editor - collection-editor @@ -195,7 +188,7 @@ name: aws-cloud-storage tasks_from: upload-folder.yml vars: - s3_path: "{{ plugin_storage }}/{{ folder_name }}" + s3_path: "{{ folder_name }}" local_file_or_folder_path: "{{ source_name }}" tags: - content-editor @@ -211,14 +204,14 @@ name: aws-cloud-storage tasks_from: upload.yml vars: - s3_path: "{{ plugin_storage }}/artefacts/content-player/content-player-{{ player_version_number }}.zip" + s3_path: "artefacts/content-player/content-player-{{ player_version_number }}.zip" local_file_or_folder_path: "{{ source_file_name }}" tags: - preview - block: - name: run the s3_copy.sh script - shell: "bash {{ s3_file_path }} {{ plugin_storage }} {{ source_file }} {{ aws_public_s3_bucket_name }}" + shell: "bash {{ s3_file_path }} {{ source_file }} {{ cloud_public_storage_accountname }}" async: 3600 poll: 10 environment: diff --git a/ansible/desktop-faq-upload.yml b/ansible/desktop-faq-upload.yml index d36b0e3721..3683202043 100644 --- a/ansible/desktop-faq-upload.yml +++ b/ansible/desktop-faq-upload.yml @@ -6,7 +6,7 @@ block: - name: set common azure variables set_fact: - blob_container_name: "{{ upload_storage }}" + blob_container_name: "" blob_file_name: "{{ destination_path }}" blob_container_folder_path: "/{{ destination_path }}" local_file_or_folder_path: "{{ playbook_dir }}/../{{ src_file_path }}" @@ -20,6 +20,7 @@ tasks_from: blob-upload.yml vars: container_public_access: "container" + blob_container_name: "{{ cloud_storage_public_bucketname }}" storage_account_name: "{{ cloud_public_storage_accountname }}" storage_account_key: "{{ cloud_public_storage_secret }}" tags: @@ -32,32 +33,58 @@ tasks_from: blob-upload.yml vars: container_public_access: "off" + blob_container_name: "{{ cloud_storage_label_bucketname }}" storage_account_name: "{{ cloud_private_storage_accountname }}" storage_account_key: "{{ cloud_private_storage_secret }}" tags: - upload-label - block: - - name: upload batch of files to azure storage + - name: upload batch of files to azure storage - chatbot include_role: name: azure-cloud-storage tasks_from: blob-upload-batch.yml vars: container_public_access: "container" + blob_container_name: "{{ cloud_storage_chatbot_bucketname }}" storage_account_name: "{{ cloud_public_storage_accountname }}" storage_account_key: "{{ cloud_public_storage_secret }}" tags: - upload-chatbot-config - - upload-batch + + - block: + - name: upload batch of files to azure storage - csv-template + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + container_public_access: "container" + blob_container_name: "{{ cloud_storage_sourcing_bucketname }}" + storage_account_name: "{{ cloud_public_storage_accountname }}" + storage_account_key: "{{ cloud_public_storage_secret }}" + tags: + - upload-csv-template + + - block: + - name: upload batch of files to azure storage - discussion-ui + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + container_public_access: "container" + blob_container_name: "{{ cloud_storage_discussionui_bucketname }}" + storage_account_name: "{{ cloud_public_storage_accountname }}" + storage_account_key: "{{ cloud_public_storage_secret }}" + tags: + - upload-discussion-ui when: cloud_service_provider == "azure" +### GCP tasks ### - name: this block consists of tasks related to gcloud storage block: - name: set common gcloud variables set_fact: - dest_folder_name: "{{ upload_storage }}" - dest_file_name: "{{ destination_path }}" - dest_folder_path: "{{ destination_path }}" + gcp_path: "{{ destination_path }}" local_file_or_folder_path: "{{ playbook_dir }}/../{{ src_file_path }}" tags: - always @@ -68,7 +95,7 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: - gcp_bucket_name: "{{ gcloud_public_bucket_name }}" + gcp_bucket_name: "{{ cloud_storage_public_bucketname }}" tags: - upload-desktop-faq @@ -78,20 +105,39 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: - gcp_bucket_name: "{{ gcloud_private_bucket_name }}" + gcp_bucket_name: "{{ cloud_storage_label_bucketname }}" tags: - upload-label - block: - - name: upload batch of files to gcloud storage + - name: upload batch of files to gcloud storage - chatbot include_role: name: gcp-cloud-storage tasks_from: upload-batch.yml vars: - gcp_bucket_name: "{{ gcloud_public_bucket_name }}" + gcp_bucket_name: "{{ cloud_storage_chatbot_bucketname }}" tags: - upload-chatbot-config - - upload-batch + + - block: + - name: upload batch of files to gcloud storage - csv-template + include_role: + name: gcp-cloud-storage + tasks_from: upload-batch.yml + vars: + gcp_bucket_name: "{{ cloud_storage_sourcing_bucketname }}" + tags: + - upload-csv-template + + - block: + - name: upload batch of files to gcloud storage - discussion-ui + include_role: + name: gcp-cloud-storage + tasks_from: upload-batch.yml + vars: + gcp_bucket_name: "{{ cloud_storage_discussionui_bucketname }}" + tags: + - upload-discussion-ui when: cloud_service_provider == "gcloud" ######################## AWS tasks ######################################### @@ -100,9 +146,9 @@ block: - name: set common aws variables set_fact: - aws_default_region: "{{ aws_region }}" + aws_default_region: "{{ cloud_public_storage_region }}" local_file_or_folder_path: "{{ playbook_dir }}/../{{ src_file_path }}" - s3_path: "{{ upload_storage }}/{{ destination_path }}" + s3_path: "{{ destination_path }}" tags: - always @@ -112,9 +158,9 @@ name: aws-cloud-storage tasks_from: upload.yml vars: - s3_bucket_name: "{{ aws_public_s3_bucket_name }}" - aws_access_key_id: "{{ aws_public_bucket_access_key }}" - aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" + s3_bucket_name: "{{ cloud_storage_public_bucketname }}" + aws_access_key_id: "{{ cloud_public_storage_accountname }}" + aws_secret_access_key: "{{ cloud_public_storage_secret }}" tags: - upload-desktop-faq @@ -124,23 +170,45 @@ name: aws-cloud-storage tasks_from: upload.yml vars: - s3_bucket_name: "{{ aws_private_s3_bucket_name }}" - aws_access_key_id: "{{ aws_private_bucket_access_key }}" - aws_secret_access_key: "{{ aws_private_bucket_secret_access_key }}" + s3_bucket_name: "{{ cloud_storage_label_bucketname }}" + aws_access_key_id: "{{ cloud_private_storage_accountname }}" + aws_secret_access_key: "{{ cloud_private_storage_secret }}" tags: - upload-label - block: - - name: upload folder to aws s3 + - name: upload folder to aws s3 - chatbot include_role: name: aws-cloud-storage tasks_from: upload-folder.yml vars: - s3_bucket_name: "{{ aws_public_s3_bucket_name }}" - aws_access_key_id: "{{ aws_public_bucket_access_key }}" - aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" + s3_bucket_name: "{{ cloud_storage_chatbot_bucketname }}" + aws_access_key_id: "{{ cloud_public_storage_accountname }}" + aws_secret_access_key: "{{ cloud_public_storage_secret }}" tags: - upload-chatbot-config - - upload-batch - when: cloud_service_provider == "aws" - \ No newline at end of file + + - block: + - name: upload folder to aws s3 - csv-template + include_role: + name: aws-cloud-storage + tasks_from: upload-folder.yml + vars: + s3_bucket_name: "{{ cloud_storage_sourcing_bucketname }}" + aws_access_key_id: "{{ cloud_public_storage_accountname }}" + aws_secret_access_key: "{{ cloud_public_storage_secret }}" + tags: + - upload-csv-template + + - block: + - name: upload folder to aws s3 - discussion-ui + include_role: + name: aws-cloud-storage + tasks_from: upload-folder.yml + vars: + s3_bucket_name: "{{ cloud_storage_discussionui_bucketname }}" + aws_access_key_id: "{{ cloud_public_storage_accountname }}" + aws_secret_access_key: "{{ cloud_public_storage_secret }}" + tags: + - upload-discussion-ui + when: cloud_service_provider == "aws" diff --git a/ansible/dial_upload-schema.yml b/ansible/dial_upload-schema.yml index 6572c12e55..757a80f6e5 100644 --- a/ansible/dial_upload-schema.yml +++ b/ansible/dial_upload-schema.yml @@ -2,13 +2,6 @@ gather_facts: no vars_files: - "{{inventory_dir}}/secrets.yml" - # The vars: section is added for the below reason - # 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name - # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo - # or other default files and just assign the value to the newly introduced common variable - # 3. After few releases, we will remove the older variables and use only the new variables across the repos - vars: - dial_plugin_storage: "{{ dial_plugin_container_name }}" tasks: - name: Create directories file: @@ -31,7 +24,7 @@ name: azure-cloud-storage tasks_from: blob-upload-batch.yml vars: - blob_container_name: "{{ dial_plugin_storage }}" + blob_container_name: "{{ cloud_storage_dial_bucketname }}" container_public_access: "blob" blob_container_folder_path: "/schemas/local" local_file_or_folder_path: "dial_schema_template_files" @@ -44,12 +37,12 @@ name: aws-cloud-storage tasks_from: upload-folder.yml vars: - s3_bucket_name: "{{ aws_public_s3_bucket_name }}" - aws_access_key_id: "{{ aws_public_bucket_access_key }}" - aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" - aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ cloud_storage_dial_bucketname }}" + aws_access_key_id: "{{ cloud_public_storage_accountname }}" + aws_secret_access_key: "{{ cloud_public_storage_secret }}" + aws_default_region: "{{ cloud_public_storage_region }}" local_file_or_folder_path: "dial_schema_template_files" - s3_path: "{{ dial_plugin_storage }}/schemas/local" + s3_path: "schemas/local" when: cloud_service_provider == "aws" - name: upload batch of files to gcloud storage diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index 62cac6441d..8dfdd8a43d 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -128,7 +128,6 @@ cassandra_version: '3.9' cassandra_port: 9042 cassandra_rpc_address: 0.0.0.0 cassandra_restore_dir: "/home/{{ ansible_ssh_user }}/" -cassandra_backup_azure_container_name: cassandra-backup cassandra_backup_dir: /data/cassandra/backup ### Release 5.0.0 ### cassandra_multi_dc_enabled: false diff --git a/ansible/kp_upload-schema.yml b/ansible/kp_upload-schema.yml index 2b09dac310..d12b74433d 100644 --- a/ansible/kp_upload-schema.yml +++ b/ansible/kp_upload-schema.yml @@ -2,20 +2,13 @@ gather_facts: no vars_files: - "{{inventory_dir}}/secrets.yml" - # The vars: section is added for the below reason - # 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name - # 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo - # or other default files and just assign the value to the newly introduced common variable - # 3. After few releases, we will remove the older variables and use only the new variables across the repos - vars: - plugin_storage: "{{ plugin_container_name }}" tasks: - name: upload batch of files to azure storage include_role: name: azure-cloud-storage tasks_from: blob-upload-batch.yml vars: - blob_container_name: "{{ plugin_storage }}" + blob_container_name: "{{ cloud_storage_content_bucketname }}" container_public_access: "container" blob_container_folder_path: "/schemas/local" local_file_or_folder_path: "{{ source_name }}" @@ -28,12 +21,12 @@ name: aws-cloud-storage tasks_from: upload-folder.yml vars: - s3_bucket_name: "{{ aws_public_s3_bucket_name }}" - aws_access_key_id: "{{ aws_public_bucket_access_key }}" - aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" - aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ cloud_storage_content_bucketname }}" + aws_access_key_id: "{{ cloud_public_storage_accountname }}" + aws_secret_access_key: "{{ cloud_public_storage_secret }}" + aws_default_region: "{{ cloud_public_storage_region }}" local_file_or_folder_path: "{{ source_name }}" - s3_path: "{{ plugin_storage }}/schemas/local" + s3_path: "schemas/local" when: cloud_service_provider == "aws" - name: upload batch of files to gcloud storage diff --git a/ansible/roles/cassandra-backup/defaults/main.yml b/ansible/roles/cassandra-backup/defaults/main.yml index dffec63096..4481570cc6 100644 --- a/ansible/roles/cassandra-backup/defaults/main.yml +++ b/ansible/roles/cassandra-backup/defaults/main.yml @@ -1,14 +1,4 @@ cassandra_root_dir: '/etc/cassandra' data_dir: '/var/lib/cassandra/data' - -cassandra_backup_azure_container_name: core-cassandra - -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -cassandra_backup_storage: "{{ cassandra_backup_azure_container_name }}" - cloud_storage_cassandrabackup_bucketname: "{{cloud_storage_management_bucketname}}" cloud_storage_cassandrabackup_foldername: 'cassandra-backup' diff --git a/ansible/roles/cassandra-backup/tasks/main.yml b/ansible/roles/cassandra-backup/tasks/main.yml index 304385515c..ce0e646662 100755 --- a/ansible/roles/cassandra-backup/tasks/main.yml +++ b/ansible/roles/cassandra-backup/tasks/main.yml @@ -37,7 +37,7 @@ name: azure-cloud-storage tasks_from: upload-using-azcopy.yml vars: - blob_container_name: "{{ cloud_storage_cassandrabackup_bucketname }}" + blob_container_name: "{{ cloud_storage_cassandrabackup_foldername }}" container_public_access: "off" blob_container_folder_path: "" local_file_or_folder_path: "/data/cassandra/backup/{{ cassandra_backup_folder_name }}" @@ -51,11 +51,11 @@ tasks_from: upload-folder.yml vars: local_file_or_folder_path: "/data/cassandra/backup/{{ cassandra_backup_folder_name }}" - s3_bucket_name: "{{ aws_management_s3_bucket_name }}" - s3_path: "{{ cassandra_backup_storage }}" - aws_default_region: "{{ aws_region }}" - aws_access_key_id: "{{ aws_management_bucket_access_key }}" - aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" + s3_bucket_name: "{{ cloud_storage_cassandrabackup_bucketname }}" + s3_path: "{{ cloud_storage_cassandrabackup_foldername }}" + aws_default_region: "{{ cloud_public_storage_region }}" + aws_access_key_id: "{{ cloud_management_storage_accountname }}" + aws_secret_access_key: "{{ cloud_management_storage_secret }}" when: cloud_service_provider == "aws" - name: upload file to gcloud storage diff --git a/ansible/roles/cassandra-restore/defaults/main.yml b/ansible/roles/cassandra-restore/defaults/main.yml index 834c103d58..9ac0c38f95 100644 --- a/ansible/roles/cassandra-restore/defaults/main.yml +++ b/ansible/roles/cassandra-restore/defaults/main.yml @@ -1,11 +1,4 @@ user_home: "/home/{{ ansible_ssh_user }}/" -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -cassandra_backup_storage: "{{ cassandra_backup_azure_container_name }}" - cloud_storage_cassandrabackup_bucketname: "{{cloud_storage_management_bucketname}}" cloud_storage_cassandrabackup_foldername: 'cassandra-backup' diff --git a/ansible/roles/cassandra-restore/tasks/main.yml b/ansible/roles/cassandra-restore/tasks/main.yml index 304c9b8b09..4bd8c05991 100755 --- a/ansible/roles/cassandra-restore/tasks/main.yml +++ b/ansible/roles/cassandra-restore/tasks/main.yml @@ -11,11 +11,11 @@ name: azure-cloud-storage tasks_from: blob-download.yml vars: - blob_container_name: "{{ cassandra_backup_storage }}" + blob_container_name: "{{ cloud_storage_cassandrabackup_foldername }}" blob_file_name: "{{ cassandra_restore_gzip_file_name }}" local_file_or_folder_path: "{{ cassandra_restore_gzip_file_path }}" storage_account_name: "{{ cloud_management_storage_accountname }}" - storage_account_key: "{{ azure_management_storage_account_key }}" + storage_account_key: "{{ cloud_management_storage_secret }}" when: cloud_service_provider == "azure" - name: download a file from aws s3 @@ -24,12 +24,12 @@ name: aws-cloud-storage tasks_from: download.yml vars: - s3_bucket_name: "{{ aws_management_s3_bucket_name }}" - aws_access_key_id: "{{ aws_management_bucket_access_key }}" - aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" - aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ cloud_storage_cassandrabackup_bucketname }}" + aws_access_key_id: "{{ cloud_management_storage_accountname }}" + aws_secret_access_key: "{{ cloud_management_storage_secret }}" + aws_default_region: "{{ cloud_public_storage_region }}" local_file_or_folder_path: "{{ cassandra_restore_gzip_file_path }}" - s3_path: "{{ cassandra_backup_storage }}/{{ cassandra_restore_gzip_file_name }}" + s3_path: "{{ cloud_storage_cassandrabackup_foldername }}/{{ cassandra_restore_gzip_file_name }}" when: cloud_service_provider == "aws" - name: download file from gcloud storage diff --git a/ansible/roles/cert-templates/defaults/main.yml b/ansible/roles/cert-templates/defaults/main.yml index 1ca7f44958..c8710dd9d9 100644 --- a/ansible/roles/cert-templates/defaults/main.yml +++ b/ansible/roles/cert-templates/defaults/main.yml @@ -2,10 +2,3 @@ certs_badge_upload_retry_count: 3 certs_badge_criteria: "" certs_badge_batch_id: "" certs_badge_key_id: "" - -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -cert_service_storage: "{{ cert_service_container_name }}" diff --git a/ansible/roles/cert-templates/tasks/main.yml b/ansible/roles/cert-templates/tasks/main.yml index 0700f1e61a..0caf2b1bfe 100644 --- a/ansible/roles/cert-templates/tasks/main.yml +++ b/ansible/roles/cert-templates/tasks/main.yml @@ -36,7 +36,7 @@ name: azure-cloud-storage tasks_from: blob-upload-batch.yml vars: - blob_container_name: "{{ cert_service_storage }}" + blob_container_name: "{{ cloud_storage_certservice_bucketname }}" container_public_access: "off" blob_container_folder_path: "" local_file_or_folder_path: "{{ cert_location }}/cert-templates/certUtilScripts/out" @@ -49,12 +49,12 @@ name: aws-cloud-storage tasks_from: upload-folder.yml vars: - s3_bucket_name: "{{ aws_private_s3_bucket_name }}" - aws_access_key_id: "{{ aws_private_bucket_access_key }}" - aws_secret_access_key: "{{ aws_private_bucket_secret_access_key }}" - aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ cloud_storage_certservice_bucketname }}" + aws_access_key_id: "{{ cloud_private_storage_accountname }}" + aws_secret_access_key: "{{ cloud_private_storage_secret }}" + aws_default_region: "{{ cloud_public_storage_region }}" local_file_or_folder_path: "{{ cert_location }}/cert-templates/certUtilScripts/out" - s3_path: "{{ cert_service_storage }}" + s3_path: "" when: cloud_service_provider == "aws" - name: upload batch of files to gcloud storage diff --git a/ansible/roles/desktop-deploy/defaults/main.yml b/ansible/roles/desktop-deploy/defaults/main.yml index 06bdd6fe1f..2cff6657c7 100644 --- a/ansible/roles/desktop-deploy/defaults/main.yml +++ b/ansible/roles/desktop-deploy/defaults/main.yml @@ -1,10 +1,3 @@ --- time: "YEAR-MONTH-DATE-HOUR-MINUTE-SECOND-INSTALLERTYPE" offline_installer_container_name: "{{env}}-offlineinstaller" - -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -offline_installer_storage: "{{ offline_installer_container_name }}" diff --git a/ansible/roles/desktop-deploy/tasks/main.yml b/ansible/roles/desktop-deploy/tasks/main.yml index 70fa94cb1d..ba077b778f 100644 --- a/ansible/roles/desktop-deploy/tasks/main.yml +++ b/ansible/roles/desktop-deploy/tasks/main.yml @@ -53,7 +53,7 @@ block: - name: set common azure variables set_fact: - blob_container_name: "{{ offline_installer_storage }}" + blob_container_name: "{{ cloud_storage_offlineinstaller_bucketname }}" container_public_access: "blob" storage_account_name: "{{ cloud_public_storage_accountname }}" storage_account_key: "{{ cloud_public_storage_secret }}" @@ -81,10 +81,10 @@ block: - name: set common aws variables set_fact: - s3_bucket_name: "{{ aws_public_s3_bucket_name }}" - aws_access_key_id: "{{ aws_public_bucket_access_key }}" - aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" - aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ cloud_storage_offlineinstaller_bucketname }}" + aws_access_key_id: "{{ cloud_public_storage_accountname }}" + aws_secret_access_key: "{{ cloud_public_storage_secret }}" + aws_default_region: "{{ cloud_public_storage_region }}" - name: upload batch of files to aws s3 include_role: diff --git a/ansible/roles/grafana-backup/defaults/main.yml b/ansible/roles/grafana-backup/defaults/main.yml index b6850bee97..70bd76ff82 100644 --- a/ansible/roles/grafana-backup/defaults/main.yml +++ b/ansible/roles/grafana-backup/defaults/main.yml @@ -4,13 +4,6 @@ grafana_data_dir: /var/dockerdata/grafana/grafana.db # Override these values in group_vars sunbird_management_storage_account_name: sunbird_management_storage_account_key: '' -grafana_backup_azure_container_name: grafana-backup -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -grafana_backup_storage: "{{ grafana_backup_azure_container_name }}" cloud_storage_grafanabackup_bucketname: "{{ cloud_storage_management_bucketname }}" cloud_storage_grafanabackup_foldername: 'grafana-backup' diff --git a/ansible/roles/grafana-backup/tasks/main.yml b/ansible/roles/grafana-backup/tasks/main.yml index c309d409fb..90dc3526ca 100644 --- a/ansible/roles/grafana-backup/tasks/main.yml +++ b/ansible/roles/grafana-backup/tasks/main.yml @@ -24,12 +24,12 @@ name: azure-cloud-storage tasks_from: blob-upload.yml vars: - blob_container_name: "{{ grafana_backup_storage }}" + blob_container_name: "{{ cloud_storage_grafanabackup_foldername }}" container_public_access: "off" blob_file_name: "{{ grafana_backup_gzip_file_name }}" local_file_or_folder_path: "{{ grafana_backup_gzip_file_path }}" storage_account_name: "{{ cloud_management_storage_accountname }}" - storage_account_key: "{{ azure_management_storage_account_key }}" + storage_account_key: "{{ cloud_management_storage_secret }}" when: cloud_service_provider == "azure" - name: upload file to aws s3 @@ -37,12 +37,12 @@ name: aws-cloud-storage tasks_from: upload.yml vars: - s3_bucket_name: "{{ aws_management_s3_bucket_name }}" - aws_access_key_id: "{{ aws_management_bucket_access_key }}" - aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" - aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ cloud_storage_grafanabackup_bucketname }}" + aws_access_key_id: "{{ cloud_management_storage_accountname }}" + aws_secret_access_key: "{{ cloud_management_storage_secret }}" + aws_default_region: "{{ cloud_public_storage_region }}" local_file_or_folder_path: "{{ grafana_backup_gzip_file_path }}" - s3_path: "{{ grafana_backup_storage }}/{{ grafana_backup_gzip_file_name }}" + s3_path: "{{ cloud_storage_grafanabackup_foldername }}/{{ grafana_backup_gzip_file_name }}" when: cloud_service_provider == "aws" - name: upload file to gcloud storage diff --git a/ansible/roles/jenkins-backup-upload/defaults/main.yml b/ansible/roles/jenkins-backup-upload/defaults/main.yml index d9c5c9f58a..9fd90050bf 100644 --- a/ansible/roles/jenkins-backup-upload/defaults/main.yml +++ b/ansible/roles/jenkins-backup-upload/defaults/main.yml @@ -1,15 +1,7 @@ jenkins_user: jenkins jenkins_group: jenkins jenkins_backup_base_dir: /var/lib/jenkins/jenkins-backup -jenkins_backup_azure_container_name: jenkins-backup jenkins_backup_max_delay_in_days: 1 -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -jenkins_backup_storage: "{{ jenkins_backup_azure_container_name }}" - cloud_storage_jenkinsbackup_bucketname: "{{cloud_storage_management_bucketname}}" cloud_storage_jenkinsbackup_foldername: 'jenkins-backup' diff --git a/ansible/roles/jenkins-backup-upload/tasks/main.yml b/ansible/roles/jenkins-backup-upload/tasks/main.yml index 018a9498f1..89d8f3e29c 100644 --- a/ansible/roles/jenkins-backup-upload/tasks/main.yml +++ b/ansible/roles/jenkins-backup-upload/tasks/main.yml @@ -17,12 +17,12 @@ name: azure-cloud-storage tasks_from: blob-upload.yml vars: - blob_container_name: "{{ jenkins_backup_storage }}" + blob_container_name: "{{ cloud_storage_jenkinsbackup_foldername }}" container_public_access: "off" blob_file_name: "{{ LATEST_BACKUP_DIR.stdout }}.zip" local_file_or_folder_path: "/tmp/{{ LATEST_BACKUP_DIR.stdout }}.zip" storage_account_name: "{{ cloud_management_storage_accountname }}" - storage_account_key: "{{ azure_management_storage_account_key }}" + storage_account_key: "{{ cloud_management_storage_secret }}" when: cloud_service_provider == "azure" - name: upload file to aws s3 @@ -30,12 +30,12 @@ name: aws-cloud-storage tasks_from: upload.yml vars: - s3_bucket_name: "{{ aws_management_s3_bucket_name }}" - aws_access_key_id: "{{ aws_management_bucket_access_key }}" - aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" - aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ cloud_storage_jenkinsbackup_bucketname }}" + aws_access_key_id: "{{ cloud_management_storage_accountname }}" + aws_secret_access_key: "{{ cloud_management_storage_secret }}" + aws_default_region: "{{ cloud_public_storage_region }}" local_file_or_folder_path: "/tmp/{{ LATEST_BACKUP_DIR.stdout }}.zip" - s3_path: "{{ jenkins_backup_storage }}/{{ LATEST_BACKUP_DIR.stdout }}.zip" + s3_path: "{{ cloud_storage_jenkinsbackup_foldername }}/{{ LATEST_BACKUP_DIR.stdout }}.zip" when: cloud_service_provider == "aws" - name: upload file to gcloud storage diff --git a/ansible/roles/mongodb-backup/defaults/main.yml b/ansible/roles/mongodb-backup/defaults/main.yml index 1d54a69541..547137f0ca 100644 --- a/ansible/roles/mongodb-backup/defaults/main.yml +++ b/ansible/roles/mongodb-backup/defaults/main.yml @@ -1,12 +1,4 @@ mongo_backup_dir: '/tmp/mongo-backup' -mongo_backup_azure_container_name: "mongodb-backup" - -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -mongo_backup_storage: "{{ mongo_backup_azure_container_name }}" cloud_storage_mongodbbackup_bucketname: "{{cloud_storage_management_bucketname}}" cloud_storage_mongodbbackup_foldername: 'mongodb-backup' diff --git a/ansible/roles/mongodb-backup/tasks/main.yml b/ansible/roles/mongodb-backup/tasks/main.yml index 94b157648a..f51216b14f 100644 --- a/ansible/roles/mongodb-backup/tasks/main.yml +++ b/ansible/roles/mongodb-backup/tasks/main.yml @@ -19,12 +19,12 @@ name: azure-cloud-storage tasks_from: blob-upload.yml vars: - blob_container_name: "{{ mongo_backup_storage }}" + blob_container_name: "{{ cloud_storage_mongodbbackup_foldername }}" container_public_access: "off" blob_file_name: "{{ mongo_backup_file_name }}.tar.gz" local_file_or_folder_path: "{{ mongo_backup_file_path }}.tar.gz" storage_account_name: "{{ cloud_management_storage_accountname }}" - storage_account_key: "{{ azure_management_storage_account_key }}" + storage_account_key: "{{ cloud_management_storage_secret }}" when: cloud_service_provider == "azure" - name: upload file to aws s3 @@ -32,10 +32,10 @@ name: aws-cloud-storage tasks_from: upload.yml vars: - s3_bucket_name: "{{ aws_public_s3_bucket_name }}" - aws_access_key_id: "{{ aws_public_bucket_access_key }}" - aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" - aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ cloud_storage_mongodbbackup_bucketname }}" + aws_access_key_id: "{{ cloud_management_storage_accountname }}" + aws_secret_access_key: "{{ cloud_management_storage_secret }}" + aws_default_region: "{{ cloud_public_storage_region }}" local_file_or_folder_path: "{{ mongo_backup_file_path }}.tar.gz" s3_path: "{{ mongo_backup_storage }}/{{ mongo_backup_file_name }}.tar.gz" when: cloud_service_provider == "aws" diff --git a/ansible/roles/postgres-managed-service-backup/defaults/main.yml b/ansible/roles/postgres-managed-service-backup/defaults/main.yml index 6af37d7f96..ed62efd66b 100644 --- a/ansible/roles/postgres-managed-service-backup/defaults/main.yml +++ b/ansible/roles/postgres-managed-service-backup/defaults/main.yml @@ -1,7 +1,5 @@ postgresql_user: postgres postgresql_backup_dir: /tmp/postgres -postgresql_backup_azure_container_name: postgresql-backup - db_name: db: ['keycloak', 'api_manager_{{ postgres_env }}', 'quartz'] @@ -9,12 +7,5 @@ postgres_admin_user: "{{sunbird_pg_user}}" postgres_hostname: "{{groups['postgresql-master-1'][0]}}" postgres_password: "{{postgres_password}}" -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -postgresql_backup_storage: "{{ postgresql_backup_azure_container_name }}" - cloud_storage_postgresqlbackup_bucketname: "{{ cloud_storage_management_bucketname }}" cloud_storage_postgresqlbackup_foldername: postgresql-backup diff --git a/ansible/roles/postgres-managed-service-backup/tasks/main.yml b/ansible/roles/postgres-managed-service-backup/tasks/main.yml index 660814d9bd..ba101e2509 100644 --- a/ansible/roles/postgres-managed-service-backup/tasks/main.yml +++ b/ansible/roles/postgres-managed-service-backup/tasks/main.yml @@ -46,12 +46,12 @@ name: azure-cloud-storage tasks_from: blob-upload.yml vars: - blob_container_name: "{{ postgresql_backup_storage }}" + blob_container_name: "{{ cloud_storage_postgresqlbackup_foldername }}" container_public_access: "off" blob_file_name: "{{ postgresql_backup_gzip_file_name }}.zip" local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" storage_account_name: "{{ cloud_management_storage_accountname }}" - storage_account_key: "{{ azure_management_storage_account_key }}" + storage_account_key: "{{ cloud_management_storage_secret }}" when: cloud_service_provider == "azure" - name: upload file to aws s3 @@ -59,12 +59,12 @@ name: aws-cloud-storage tasks_from: upload.yml vars: - s3_bucket_name: "{{ aws_management_s3_bucket_name }}" - aws_access_key_id: "{{ aws_management_bucket_access_key }}" - aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" - aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ cloud_storage_postgresqlbackup_bucketname }}" + aws_access_key_id: "{{ cloud_management_storage_accountname }}" + aws_secret_access_key: "{{ cloud_management_storage_secret }}" + aws_default_region: "{{ cloud_public_storage_region }}" local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" - s3_path: "{{ postgresql_backup_storage }}/{{ postgresql_backup_gzip_file_name }}.zip" + s3_path: "{{ cloud_storage_postgresqlbackup_foldername }}/{{ postgresql_backup_gzip_file_name }}.zip" when: cloud_service_provider == "aws" - name: upload file to gcloud storage diff --git a/ansible/roles/postgres-managed-service-restore/defaults/main.yml b/ansible/roles/postgres-managed-service-restore/defaults/main.yml index 41164b445c..8893425000 100644 --- a/ansible/roles/postgres-managed-service-restore/defaults/main.yml +++ b/ansible/roles/postgres-managed-service-restore/defaults/main.yml @@ -1,6 +1,4 @@ postgresql_restore_dir: /tmp/postgres-restore -postgres_backup_azure_container_name: postgresql-backup - db: name: ['keycloak', 'api_manager_{{ postgres_env }}', 'quartz'] role: ['keycloak', 'api_manager_{{ postgres_env }}', 'quartz'] @@ -13,13 +11,5 @@ postgres_password: postgres_hostname: postgres_env: -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -postgres_backup_storage: "{{ postgres_backup_azure_container_name }}" - - cloud_storage_postgresqlbackup_bucketname: "{{ cloud_storage_management_bucketname }}" cloud_storage_postgresqlbackup_foldername: postgresql-backup diff --git a/ansible/roles/postgres-managed-service-restore/tasks/main.yml b/ansible/roles/postgres-managed-service-restore/tasks/main.yml index 8efa3dd561..c3d518db56 100644 --- a/ansible/roles/postgres-managed-service-restore/tasks/main.yml +++ b/ansible/roles/postgres-managed-service-restore/tasks/main.yml @@ -14,11 +14,11 @@ name: azure-cloud-storage tasks_from: blob-download.yml vars: - blob_container_name: "{{ postgres_backup_storage }}" + blob_container_name: "{{ cloud_storage_postgresqlbackup_foldername }}" blob_file_name: "{{ postgres_backup_filename }}" local_file_or_folder_path: "{{ postgres_restore_dir }}/{{ postgres_backup_filepath }}" storage_account_name: "{{ cloud_management_storage_accountname }}" - storage_account_key: "{{ azure_management_storage_account_key }}" + storage_account_key: "{{ cloud_management_storage_secret }}" when: cloud_service_provider == "azure" - name: download a file from aws s3 @@ -26,12 +26,12 @@ name: aws-cloud-storage tasks_from: download.yml vars: - s3_bucket_name: "{{ aws_management_s3_bucket_name }}" - aws_access_key_id: "{{ aws_management_bucket_access_key }}" - aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" - aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ cloud_storage_management_bucketname }}" + aws_access_key_id: "{{ cloud_management_storage_accountname }}" + aws_secret_access_key: "{{ cloud_management_storage_secret }}" + aws_default_region: "{{ cloud_public_storage_region }}" local_file_or_folder_path: "{{ postgres_restore_dir }}/{{ postgres_backup_filepath }}" - s3_path: "{{ postgres_backup_storage }}/{{ postgres_backup_filename }}" + s3_path: "{{ cloud_storage_postgresqlbackup_foldername }}/{{ postgres_backup_filename }}" when: cloud_service_provider == "aws" - name: download file from gcloud storage diff --git a/ansible/roles/postgresql-backup/defaults/main.yml b/ansible/roles/postgresql-backup/defaults/main.yml index 30902eac17..341b1c23ed 100644 --- a/ansible/roles/postgresql-backup/defaults/main.yml +++ b/ansible/roles/postgresql-backup/defaults/main.yml @@ -1,13 +1,5 @@ postgresql_backup_dir: /tmp/postgresql-backup postgresql_user: postgres -postgresql_backup_azure_container_name: postgresql-backup - -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -postgresql_backup_storage: "{{ postgresql_backup_azure_container_name }}" cloud_storage_postgresqlbackup_bucketname: "{{ cloud_storage_management_bucketname }}" cloud_storage_postgresqlbackup_foldername: postgresql-backup diff --git a/ansible/roles/postgresql-backup/tasks/main.yml b/ansible/roles/postgresql-backup/tasks/main.yml index 6710e49503..5b3303bf97 100644 --- a/ansible/roles/postgresql-backup/tasks/main.yml +++ b/ansible/roles/postgresql-backup/tasks/main.yml @@ -18,12 +18,12 @@ name: azure-cloud-storage tasks_from: blob-upload.yml vars: - blob_container_name: "{{ postgresql_backup_storage }}" + blob_container_name: "{{ cloud_storage_postgresqlbackup_foldername }}" container_public_access: "off" blob_file_name: "{{ postgresql_backup_gzip_file_name }}" local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" storage_account_name: "{{ cloud_management_storage_accountname }}" - storage_account_key: "{{ azure_management_storage_account_key }}" + storage_account_key: "{{ cloud_management_storage_secret }}" when: cloud_service_provider == "azure" - name: upload file to aws s3 @@ -31,12 +31,12 @@ name: aws-cloud-storage tasks_from: upload.yml vars: - s3_bucket_name: "{{ aws_management_s3_bucket_name }}" - aws_access_key_id: "{{ aws_management_bucket_access_key }}" - aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" - aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ cloud_storage_postgresqlbackup_bucketname }}" + aws_access_key_id: "{{ cloud_management_storage_accountname }}" + aws_secret_access_key: "{{ cloud_management_storage_secret }}" + aws_default_region: "{{ cloud_public_storage_region }}" local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" - s3_path: "{{ postgresql_backup_storage }}/{{ postgresql_backup_gzip_file_name }}" + s3_path: "{{ cloud_storage_postgresqlbackup_foldername }}/{{ postgresql_backup_gzip_file_name }}" when: cloud_service_provider == "aws" - name: upload file to gcloud storage diff --git a/ansible/roles/postgresql-restore/defaults/main.yml b/ansible/roles/postgresql-restore/defaults/main.yml index 2bcc525469..feeed7d6bb 100644 --- a/ansible/roles/postgresql-restore/defaults/main.yml +++ b/ansible/roles/postgresql-restore/defaults/main.yml @@ -3,14 +3,6 @@ postgresql_user: postgres postgresql_port: 5432 postgresql_cluster_version: 9.5 postgresql_cluster_name: main -postgresql_restore_azure_container_name: postgresql-backup - -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -postgresql_restore_storage: "{{ postgresql_restore_azure_container_name }}" cloud_storage_postgresqlbackup_bucketname: "{{ cloud_storage_management_bucketname }}" cloud_storage_postgresqlbackup_foldername: postgresql-backup diff --git a/ansible/roles/postgresql-restore/tasks/main.yml b/ansible/roles/postgresql-restore/tasks/main.yml index b3411c5445..e076590f23 100644 --- a/ansible/roles/postgresql-restore/tasks/main.yml +++ b/ansible/roles/postgresql-restore/tasks/main.yml @@ -9,11 +9,11 @@ name: azure-cloud-storage tasks_from: blob-download.yml vars: - blob_container_name: "{{ postgresql_restore_storage }}" + blob_container_name: "{{ cloud_storage_postgresqlbackup_foldername }}" blob_file_name: "{{ postgresql_restore_gzip_file_name }}" local_file_or_folder_path: "{{ postgresql_restore_gzip_file_path }}" storage_account_name: "{{ cloud_management_storage_accountname }}" - storage_account_key: "{{ azure_management_storage_account_key }}" + storage_account_key: "{{ cloud_management_storage_secret }}" when: cloud_service_provider == "azure" - name: download a file from aws s3 @@ -21,12 +21,12 @@ name: aws-cloud-storage tasks_from: download.yml vars: - s3_bucket_name: "{{ aws_management_s3_bucket_name }}" - aws_access_key_id: "{{ aws_management_bucket_access_key }}" - aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" - aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ cloud_storage_postgresqlbackup_bucketname }}" + aws_access_key_id: "{{ cloud_management_storage_accountname }}" + aws_secret_access_key: "{{ cloud_management_storage_secret }}" + aws_default_region: "{{ cloud_public_storage_region }}" local_file_or_folder_path: "{{ postgresql_restore_gzip_file_path }}" - s3_path: "{{ postgres_backup_storage }}/{{ postgresql_restore_gzip_file_name }}" + s3_path: "{{ cloud_storage_postgresqlbackup_foldername }}/{{ postgresql_restore_gzip_file_name }}" when: cloud_service_provider == "aws" - name: download file from gcloud storage diff --git a/ansible/roles/prometheus-backup-v2/defaults/main.yml b/ansible/roles/prometheus-backup-v2/defaults/main.yml index 430c9df4cb..919dcd82d9 100644 --- a/ansible/roles/prometheus-backup-v2/defaults/main.yml +++ b/ansible/roles/prometheus-backup-v2/defaults/main.yml @@ -1,13 +1,5 @@ --- # defaults file for ansible/roles/prometheus-backup-v2 -prometheus_backup_azure_container_name: prometheus-backup - -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -prometheus_backup_storage: "{{ prometheus_backup_azure_container_name }}" cloud_storage_prometheusbackup_bucketname: "{{ cloud_storage_management_bucketname }}" cloud_storage_prometheusbackup_foldername: prometheus-backup diff --git a/ansible/roles/prometheus-backup-v2/tasks/main.yml b/ansible/roles/prometheus-backup-v2/tasks/main.yml index 9b25fc465c..4a65bb6f8f 100644 --- a/ansible/roles/prometheus-backup-v2/tasks/main.yml +++ b/ansible/roles/prometheus-backup-v2/tasks/main.yml @@ -26,7 +26,7 @@ blob_file_name: "{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" local_file_or_folder_path: "/tmp/{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" storage_account_name: "{{ cloud_management_storage_accountname }}" - storage_account_key: "{{ azure_management_storage_account_key }}" + storage_account_key: "{{ cloud_management_storage_secret }}" when: cloud_service_provider == "azure" - name: upload file to aws s3 @@ -34,12 +34,12 @@ name: aws-cloud-storage tasks_from: upload.yml vars: - s3_bucket_name: "{{ aws_management_s3_bucket_name }}" - aws_access_key_id: "{{ aws_management_bucket_access_key }}" - aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" - aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ cloud_storage_prometheusbackup_bucketname }}" + aws_access_key_id: "{{ cloud_management_storage_accountname }}" + aws_secret_access_key: "{{ cloud_management_storage_secret }}" + aws_default_region: "{{ cloud_public_storage_region }}" local_file_or_folder_path: "/tmp/{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" - s3_path: "{{ prometheus_backup_storage }}/{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" + s3_path: "{{ cloud_storage_prometheusbackup_foldername }}/{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" when: cloud_service_provider == "aws" - name: upload file to gcloud storage diff --git a/ansible/roles/prometheus-backup/defaults/main.yml b/ansible/roles/prometheus-backup/defaults/main.yml index 3bba75124c..e5a4ecdcb3 100644 --- a/ansible/roles/prometheus-backup/defaults/main.yml +++ b/ansible/roles/prometheus-backup/defaults/main.yml @@ -1,17 +1,8 @@ prometheus_backup_dir: /tmp/prometheus-backup -prometheus_backup_azure_container_name: prometheus-backup - # Set these vars per environment as show in example below # Override these values in group_vars backup_storage_name: backups backup_storage_key: '' -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -prometheus_backup_storage: "{{ prometheus_backup_azure_container_name }}" - cloud_storage_prometheusbackup_bucketname: "{{ cloud_storage_management_bucketname }}" cloud_storage_prometheusbackup_foldername: prometheus-backup diff --git a/ansible/roles/prometheus-backup/tasks/main.yml b/ansible/roles/prometheus-backup/tasks/main.yml index 14cc74a41a..10d8e2fb3b 100644 --- a/ansible/roles/prometheus-backup/tasks/main.yml +++ b/ansible/roles/prometheus-backup/tasks/main.yml @@ -30,12 +30,12 @@ name: azure-cloud-storage tasks_from: blob-upload.yml vars: - blob_container_name: "{{ prometheus_backup_storage }}" + blob_container_name: "{{ cloud_storage_prometheusbackup_foldername }}" container_public_access: "off" blob_file_name: "{{ prometheus_backup_gzip_file_name }}" local_file_or_folder_path: "{{ prometheus_backup_gzip_file_path }}" storage_account_name: "{{ cloud_management_storage_accountname }}" - storage_account_key: "{{ azure_management_storage_account_key }}" + storage_account_key: "{{ cloud_management_storage_secret }}" when: cloud_service_provider == "azure" - name: upload file to aws s3 @@ -43,12 +43,12 @@ name: aws-cloud-storage tasks_from: upload.yml vars: - s3_bucket_name: "{{ aws_management_s3_bucket_name }}" - aws_access_key_id: "{{ aws_management_bucket_access_key }}" - aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" - aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ cloud_storage_prometheusbackup_bucketname }}" + aws_access_key_id: "{{ cloud_management_storage_accountname }}" + aws_secret_access_key: "{{ cloud_management_storage_secret }}" + aws_default_region: "{{ cloud_public_storage_region }}" local_file_or_folder_path: "{{ prometheus_backup_gzip_file_path }}" - s3_path: "{{ prometheus_backup_storage }}/{{ prometheus_backup_gzip_file_name }}" + s3_path: "{{ cloud_storage_prometheusbackup_foldername }}/{{ prometheus_backup_gzip_file_name }}" when: cloud_service_provider == "aws" - name: upload file to gcloud storage diff --git a/ansible/roles/prometheus-restore/defaults/main.yml b/ansible/roles/prometheus-restore/defaults/main.yml index 580e865060..f5f1511216 100644 --- a/ansible/roles/prometheus-restore/defaults/main.yml +++ b/ansible/roles/prometheus-restore/defaults/main.yml @@ -1,12 +1,3 @@ prometheus_backup_dir: /tmp/prometheus-backup -prometheus_backup_azure_container_name: prometheus-backup - -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -prometheus_backup_storage: "{{ prometheus_backup_azure_container_name }}" - cloud_storage_prometheusbackup_bucketname: "{{ cloud_storage_management_bucketname }}" cloud_storage_prometheusbackup_foldername: prometheus-backup diff --git a/ansible/roles/prometheus-restore/tasks/main.yml b/ansible/roles/prometheus-restore/tasks/main.yml index 0c9b0749a9..440b777fe4 100644 --- a/ansible/roles/prometheus-restore/tasks/main.yml +++ b/ansible/roles/prometheus-restore/tasks/main.yml @@ -7,11 +7,11 @@ name: azure-cloud-storage tasks_from: blob-download.yml vars: - blob_container_name: "{{ prometheus_backup_storage }}" + blob_container_name: "{{ cloud_storage_prometheusbackup_foldername }}" blob_file_name: "{{ prometheus_backup_filename }}" local_file_or_folder_path: "{{ prometheus_backup_dir }}/{{ prometheus_backup_filepath }}" storage_account_name: "{{ cloud_management_storage_accountname }}" - storage_account_key: "{{ azure_management_storage_account_key }}" + storage_account_key: "{{ cloud_management_storage_secret }}" when: cloud_service_provider == "azure" - name: download a file from aws s3 @@ -19,12 +19,12 @@ name: aws-cloud-storage tasks_from: download.yml vars: - s3_bucket_name: "{{ aws_management_s3_bucket_name }}" - aws_access_key_id: "{{ aws_management_bucket_access_key }}" - aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" - aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ cloud_storage_prometheusbackup_bucketname }}" + aws_access_key_id: "{{ cloud_management_storage_accountname }}" + aws_secret_access_key: "{{ cloud_management_storage_secret }}" + aws_default_region: "{{ cloud_public_storage_region }}" local_file_or_folder_path: "{{ prometheus_backup_dir }}/{{ prometheus_backup_filepath }}" - s3_path: "{{ prometheus_backup_storage }}/{{ prometheus_backup_filename }}" + s3_path: "{{ cloud_storage_prometheusbackup_foldername }}/{{ prometheus_backup_filename }}" when: cloud_service_provider == "aws" - name: download file from gcloud storage diff --git a/ansible/roles/redis-backup/defaults/main.yml b/ansible/roles/redis-backup/defaults/main.yml index 6aacb354d6..54b7c60a89 100644 --- a/ansible/roles/redis-backup/defaults/main.yml +++ b/ansible/roles/redis-backup/defaults/main.yml @@ -1,13 +1,5 @@ redis_backup_dir: /tmp/redis-backup -nodebb_redis_backup_azure_container_name: nodebb-redis-backup learner_user: learning -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -nodebb_redis_backup_storage: "{{ nodebb_redis_backup_azure_container_name }}" - cloud_storage_redisbackup_bucketname: "{{ cloud_storage_management_bucketname }}" cloud_storage_redisbackup_foldername: nodebb-redis-backup diff --git a/ansible/roles/redis-backup/tasks/main.yml b/ansible/roles/redis-backup/tasks/main.yml index 9f0c15a815..f1cf35622f 100644 --- a/ansible/roles/redis-backup/tasks/main.yml +++ b/ansible/roles/redis-backup/tasks/main.yml @@ -18,12 +18,12 @@ name: azure-cloud-storage tasks_from: blob-upload.yml vars: - blob_container_name: "{{ nodebb_redis_backup_storage }}" + blob_container_name: "{{ cloud_storage_redisbackup_foldername }}" container_public_access: "off" blob_file_name: "{{ redis_backup_file_name }}" local_file_or_folder_path: "{{ redis_backup_file_path }}" storage_account_name: "{{ cloud_management_storage_accountname }}" - storage_account_key: "{{ azure_management_storage_account_key }}" + storage_account_key: "{{ cloud_management_storage_secret }}" when: cloud_service_provider == "azure" - name: upload file to aws s3 @@ -31,12 +31,12 @@ name: aws-cloud-storage tasks_from: upload.yml vars: - s3_bucket_name: "{{ aws_management_s3_bucket_name }}" - aws_access_key_id: "{{ aws_management_bucket_access_key }}" - aws_secret_access_key: "{{ aws_management_bucket_secret_access_key }}" - aws_default_region: "{{ aws_region }}" + s3_bucket_name: "{{ cloud_storage_redisbackup_bucketname }}" + aws_access_key_id: "{{ cloud_management_storage_accountname }}" + aws_secret_access_key: "{{ cloud_management_storage_secret }}" + aws_default_region: "{{ cloud_public_storage_region }}" local_file_or_folder_path: "{{ redis_backup_file_path }}" - s3_path: "{{ nodebb_redis_backup_storage }}/{{ redis_backup_file_name }}" + s3_path: "{{ cloud_storage_redisbackup_foldername }}/{{ redis_backup_file_name }}" when: cloud_service_provider == "aws" - name: upload file to gcloud storage diff --git a/ansible/uploadFAQs.yml b/ansible/uploadFAQs.yml index 88d17aba81..cf90e343d1 100644 --- a/ansible/uploadFAQs.yml +++ b/ansible/uploadFAQs.yml @@ -20,7 +20,7 @@ name: azure-cloud-storage tasks_from: blob-upload-batch.yml vars: - blob_container_name: "{{ upload_storage }}" + blob_container_name: "{{ cloud_storage_public_bucketname }}" container_public_access: "container" blob_container_folder_path: "" local_file_or_folder_path: "{{ playbook_dir }}/../utils/{{ item }}" @@ -35,12 +35,12 @@ name: aws-cloud-storage tasks_from: upload-folder.yml vars: - s3_bucket_name: "{{ aws_public_s3_bucket_name }}" - aws_default_region: "{{ aws_region }}" - aws_access_key_id: "{{ aws_public_bucket_access_key }}" - aws_secret_access_key: "{{ aws_public_bucket_secret_access_key }}" + s3_bucket_name: "{{ cloud_storage_public_bucketname }}" + aws_default_region: "{{ cloud_public_storage_region }}" + aws_access_key_id: "{{ cloud_public_storage_accountname }}" + aws_secret_access_key: "{{ cloud_public_storage_secret }}" local_file_or_folder_path: "{{ playbook_dir }}/../utils/{{ item }}" - s3_path: "{{ upload_storage }}" + s3_path: "" with_items: - "{{ source_folder.split(',') }}" when: cloud_service_provider == "aws" @@ -50,13 +50,59 @@ name: gcp-cloud-storage tasks_from: upload-batch.yml vars: - dest_folder_name: "{{ upload_storage }}" + gcp_bucket_name: "{{ cloud_storage_public_bucketname }}" dest_folder_path: "" local_file_or_folder_path: "{{ playbook_dir }}/../utils/{{ item }}" - gcp_bucket_name: "{{ gcloud_public_bucket_name }}" with_items: - "{{ source_folder.split(',') }}" when: cloud_service_provider == "gcloud" tags: - upload-faqs + +- hosts: localhost + vars_files: + - "{{inventory_dir}}/secrets.yml" + tasks: + - name: upload batch of files to azure storage + include_role: + name: azure-cloud-storage + tasks_from: blob-upload-batch.yml + vars: + blob_container_name: "{{ cloud_storage_content_bucketname }}" + container_public_access: "container" + blob_container_folder_path: "" + local_file_or_folder_path: "{{ playbook_dir }}/../utils/{{ item }}" + storage_account_name: "{{ cloud_public_storage_accountname }}" + storage_account_key: "{{ cloud_public_storage_secret }}" + with_items: + - "{{ source_folder.split(',') }}" + when: cloud_service_provider == "azure" + + - name: upload batch of files to s3 + include_role: + name: aws-cloud-storage + tasks_from: upload-folder.yml + vars: + s3_bucket_name: "{{ cloud_storage_content_bucketname }}" + aws_default_region: "{{ cloud_public_storage_region }}" + aws_access_key_id: "{{ cloud_public_storage_accountname }}" + aws_secret_access_key: "{{ cloud_public_storage_secret }}" + local_file_or_folder_path: "{{ playbook_dir }}/../utils/{{ item }}" + s3_path: "" + with_items: + - "{{ source_folder.split(',') }}" + when: cloud_service_provider == "aws" + + - name: upload batch of files to gcloud storage + include_role: + name: gcp-cloud-storage + tasks_from: upload-batch.yml + vars: + gcp_bucket_name: "{{ cloud_storage_content_bucketname }}" + dest_folder_path: "" + local_file_or_folder_path: "{{ playbook_dir }}/../utils/{{ item }}" + with_items: + - "{{ source_folder.split(',') }}" + when: cloud_service_provider == "gcloud" + tags: - upload-RC-schema diff --git a/deploy/jenkins/jobs/Deploy/jobs/Vidyadaan/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml b/deploy/jenkins/jobs/Deploy/jobs/Vidyadaan/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml index 0236cab0eb..1363bd3fcf 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/Vidyadaan/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/Vidyadaan/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml @@ -83,7 +83,7 @@ return """<b>This parameter is not used</b>""" - upload-batch + upload-csv-template @@ -102,15 +102,6 @@ return """<b>This parameter is not used</b>""" false - - upload_storage - - - - content-service - - - source_path diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadChatbotConfig/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadChatbotConfig/config.xml index defc3a0ddd..79d963a0e7 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadChatbotConfig/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadChatbotConfig/config.xml @@ -84,15 +84,6 @@ return """<b>This parameter is not used</b>""" master false - - upload_storage - - - - chatbot - - - source_path @@ -169,4 +160,4 @@ return """<b>This parameter is not used</b>""" false - \ No newline at end of file + diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml index d87aac4ee3..72d310489e 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadCollectionHierarchyCSV/config.xml @@ -83,7 +83,7 @@ return """<b>This parameter is not used</b>""" - upload-batch + upload-csv-template @@ -102,15 +102,6 @@ return """<b>This parameter is not used</b>""" false - - upload_storage - - - - sourcing - - - source_path @@ -175,4 +166,4 @@ return """<b>This parameter is not used</b>""" false - \ No newline at end of file + diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadDiscussionUIDocs/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadDiscussionUIDocs/config.xml index a801645925..66d749e86a 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadDiscussionUIDocs/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadDiscussionUIDocs/config.xml @@ -159,15 +159,6 @@ return """<b>This parameter is not used</b>""" ET_FORMATTED_HTML true - - upload_storage - - - - discussion-ui - - - source_path @@ -191,7 +182,7 @@ return """<b>This parameter is not used</b>""" - upload-batch + upload-discussion-ui diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadFAQs/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadFAQs/config.xml index 85b7c81efb..9a6fccc4b5 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadFAQs/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadFAQs/config.xml @@ -71,15 +71,6 @@ return """<b>This parameter is not used</b>""" ET_FORMATTED_HTML true - - upload_storage - - - - public - - - tag diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadPortalLabel/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadPortalLabel/config.xml index a75d9ee220..a1b8680986 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadPortalLabel/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Kubernetes/jobs/UploadPortalLabel/config.xml @@ -71,15 +71,6 @@ return """<b>This parameter is not used</b>""" ET_FORMATTED_HTML true - - upload_storage - - - - label - - - destination_path diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/UploadRCSchema/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/UploadRCSchema/config.xml index ea47b8d14e..1ff2974d6d 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/UploadRCSchema/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Sunbird-RC/jobs/UploadRCSchema/config.xml @@ -71,15 +71,6 @@ return """<b>This parameter is not used</b>""" ET_FORMATTED_HTML true - - upload_storage - - - - sunbird-content-dev - - - tag diff --git a/pipelines/deploy/desktop-faq/Jenkinsfile b/pipelines/deploy/desktop-faq/Jenkinsfile index d282ec2884..1b1a8d7f0d 100644 --- a/pipelines/deploy/desktop-faq/Jenkinsfile +++ b/pipelines/deploy/desktop-faq/Jenkinsfile @@ -25,7 +25,7 @@ node() { jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() currentWs = sh(returnStdout: true, script: 'pwd').trim() ansiblePlaybook = "${currentWs}/ansible/desktop-faq-upload.yml" - ansibleExtraArgs = "--extra-vars \" upload_storage=${params.upload_storage} src_file_path=${params.src_file_path} destination_path=${params.destination_path} env_name=$envDir\" --vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag}" + ansibleExtraArgs = "--extra-vars \" src_file_path=${params.src_file_path} destination_path=${params.destination_path} env_name=$envDir\" --vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag}" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) diff --git a/pipelines/upload/chatbot/Jenkinsfile b/pipelines/upload/chatbot/Jenkinsfile index c97597c44c..da0774f382 100644 --- a/pipelines/upload/chatbot/Jenkinsfile +++ b/pipelines/upload/chatbot/Jenkinsfile @@ -38,7 +38,7 @@ node() { jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() currentWs = sh(returnStdout: true, script: 'pwd').trim() ansiblePlaybook = "${currentWs}/ansible/desktop-faq-upload.yml" - ansibleExtraArgs = "--extra-vars \" upload_storage=${params.upload_storage} src_file_path=${params.source_path} destination_path=${params.destination_path}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag}" + ansibleExtraArgs = "--extra-vars \" src_file_path=${params.source_path} destination_path=${params.destination_path}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag}" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) diff --git a/pipelines/upload/discussion-UI/Jenkinsfile b/pipelines/upload/discussion-UI/Jenkinsfile index c4d794fb3e..067158e445 100644 --- a/pipelines/upload/discussion-UI/Jenkinsfile +++ b/pipelines/upload/discussion-UI/Jenkinsfile @@ -30,7 +30,7 @@ node() { unzip ${artifact} """ ansiblePlaybook = "${currentWs}/ansible/desktop-faq-upload.yml" - ansibleExtraArgs = "--extra-vars \" upload_storage=${params.upload_storage} src_file_path=${params.source_path} destination_path=${params.destination_path}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag}" + ansibleExtraArgs = "--extra-vars \" src_file_path=${params.source_path} destination_path=${params.destination_path}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag}" values.put('ansiblePlaybook', ansiblePlaybook) values.put('ansibleExtraArgs', ansibleExtraArgs) println values diff --git a/pipelines/upload/faqs/Jenkinsfile b/pipelines/upload/faqs/Jenkinsfile index 4f18801b4e..f44c1b5020 100644 --- a/pipelines/upload/faqs/Jenkinsfile +++ b/pipelines/upload/faqs/Jenkinsfile @@ -25,7 +25,7 @@ node() { jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() currentWs = sh(returnStdout: true, script: 'pwd').trim() ansiblePlaybook = "${currentWs}/ansible/uploadFAQs.yml" - ansibleExtraArgs = "--tags ${params.tag} --extra-vars \"upload_storage=${params.upload_storage} source_folder=${params.source_folder}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" + ansibleExtraArgs = "--tags ${params.tag} --extra-vars \" source_folder=${params.source_folder}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) diff --git a/pipelines/upload/portal-csv/Jenkinsfile b/pipelines/upload/portal-csv/Jenkinsfile index 6e8453d3e2..502fadcdbb 100644 --- a/pipelines/upload/portal-csv/Jenkinsfile +++ b/pipelines/upload/portal-csv/Jenkinsfile @@ -27,7 +27,7 @@ node() { jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() currentWs = sh(returnStdout: true, script: 'pwd').trim() ansiblePlaybook = "${currentWs}/ansible/desktop-faq-upload.yml" - ansibleExtraArgs = "--extra-vars \" upload_storage=${params.upload_storage} src_file_path=${params.source_path} destination_path=${params.destination_path}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag}" + ansibleExtraArgs = "--extra-vars \" src_file_path=${params.source_path} destination_path=${params.destination_path}\" --vault-password-file /var/lib/jenkins/secrets/vault-pass --tags ${params.tag}" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) From 8f5efcd39ec159cd90593895d679bcf25c9d85ec Mon Sep 17 00:00:00 2001 From: Kumar Gauraw Date: Mon, 12 Dec 2022 15:22:04 +0530 Subject: [PATCH 131/616] Issue #IQ-193 feat: updated config of assessment --- ansible/roles/stack-sunbird/defaults/main.yml | 12 +++++- .../assessment-service_application.conf | 39 ++++++++++--------- 2 files changed, 32 insertions(+), 19 deletions(-) diff --git a/ansible/roles/stack-sunbird/defaults/main.yml b/ansible/roles/stack-sunbird/defaults/main.yml index 0c8c86242d..036fda51bd 100644 --- a/ansible/roles/stack-sunbird/defaults/main.yml +++ b/ansible/roles/stack-sunbird/defaults/main.yml @@ -1052,4 +1052,14 @@ kong_desktop_device_consumer_names_for_opa: '["desktop"]' cloudstorage_relative_path_prefix_content: "CONTENT_STORAGE_BASE_PATH" cloudstorage_relative_path_prefix_dial: "DIAL_STORAGE_BASE_PATH" -cloudstorage_metadata_list: '["appIcon", "artifactUrl", "posterImage", "previewUrl", "thumbnail", "assetsMap", "certTemplate", "itemSetPreviewUrl", "grayScaleAppIcon", "sourceURL", "variants", "downloadUrl", "streamingUrl", "toc_url", "data", "question", "solutions", "editorState", "media", "pdfUrl"]' \ No newline at end of file +cloudstorage_metadata_list: '["appIcon", "artifactUrl", "posterImage", "previewUrl", "thumbnail", "assetsMap", "certTemplate", "itemSetPreviewUrl", "grayScaleAppIcon", "sourceURL", "variants", "downloadUrl", "streamingUrl", "toc_url", "data", "question", "solutions", "editorState", "media", "pdfUrl"]' + +### inQuiry assessment service default values +inquiry_schema_path: "{{ kp_schema_base_path }}" +inquiry_kafka_urls: "{{ kafka_urls }}" +inquiry_assessment_import_kafka_topic_name: "{{ env_name }}.object.import.request" +inquiry_assessment_publish_kafka_topic_name: "{{ env_name }}.assessment.publish.request" +inquiry_cassandra_connection: "{{ lp_cassandra_connection }}" +inquiry_cassandra_keyspace_prefix: "{{ lp_cassandra_keyspace_prefix }}" +inquiry_redis_host: "{{ sunbird_lp_redis_host }}" +inquiry_search_service_base_url: "{{ sunbird_search_service_api_base_url }}/v3/search" \ No newline at end of file diff --git a/ansible/roles/stack-sunbird/templates/assessment-service_application.conf b/ansible/roles/stack-sunbird/templates/assessment-service_application.conf index 60d129907a..c06a44f2d8 100644 --- a/ansible/roles/stack-sunbird/templates/assessment-service_application.conf +++ b/ansible/roles/stack-sunbird/templates/assessment-service_application.conf @@ -336,14 +336,13 @@ play.filters { play.http.parser.maxMemoryBuffer = 50MB akka.http.parsing.max-content-length = 50MB -schema.base_path="{{ kp_schema_base_path | default('/home/sunbird/assessment-service-1.0-SNAPSHOT/schemas')}}" +schema.base_path="{{ inquiry_schema_path | default('/home/sunbird/assessment-service-1.0-SNAPSHOT/schemas')}}" # Cassandra Configuration -cassandra.lp.connection="{{ lp_cassandra_connection }}" -content.keyspace = "{{ lp_cassandra_keyspace_prefix }}_content_store" +cassandra.lp.connection="{{ inquiry_cassandra_connection }}" # Redis Configuration -redis.host="{{ sunbird_lp_redis_host }}" +redis.host="{{ inquiry_redis_host }}" redis.port=6379 redis.maxConnections=128 @@ -383,30 +382,24 @@ languageCode { telugu : "te" } -cloud_storage_type: "{{ cloud_service_provider }}" -cloud_storage_key: "{{ cloud_public_storage_accountname }}" -cloud_storage_secret: "{{ cloud_public_storage_secret }}" -cloud_storage_endpoint: "{{ cloud_public_storage_endpoint }}" -cloud_storage_container: "{{ cloud_storage_content_bucketname }}" - kafka { - urls : "{{ kafka_urls }}" + urls : "{{ inquiry_kafka_urls }}" topic.send.enable : true - topics.instruction : "{{ env_name }}.assessment.publish.request" + topics.instruction : "{{ inquiry_assessment_publish_kafka_topic_name }}" } -objectcategorydefinition.keyspace="{{ lp_cassandra_keyspace_prefix }}_category_store" -question.keyspace="{{ lp_cassandra_keyspace_prefix }}_question_store" -questionset.keyspace="{{ lp_cassandra_keyspace_prefix }}_hierarchy_store" +objectcategorydefinition.keyspace="{{ inquiry_cassandra_keyspace_prefix }}_category_store" +question.keyspace="{{ inquiry_cassandra_keyspace_prefix }}_question_store" +questionset.keyspace="{{ inquiry_cassandra_keyspace_prefix }}_hierarchy_store" composite { search { - url : "{{ sunbird_search_service_api_base_url }}/v3/search" + url : "{{ inquiry_search_service_base_url }}" } } import { request_size_limit : 300 - output_topic_name : "{{ env_name }}.object.import.request" + output_topic_name : "{{ inquiry_assessment_import_kafka_topic_name }}" required_props { question : ["name", "code", "mimeType", "framework", "channel"] questionset : ["name", "code", "mimeType", "framework", "channel"] @@ -426,4 +419,14 @@ assessment.copy.props_to_remove=["downloadUrl", "artifactUrl", "variants", "LastPublishedBy", "rejectReasons", "rejectComment", "gradeLevel", "subject", "medium", "board", "topic", "purpose", "subtopic", "contentCredits", "owner", "collaborators", "creators", "contributors", "badgeAssertions", "dialcodes", - "concepts", "keywords", "reservedDialcodes", "dialcodeRequired", "leafNodes", "sYS_INTERNAL_LAST_UPDATED_ON", "prevStatus", "lastPublishedBy", "streamingUrl"] \ No newline at end of file + "concepts", "keywords", "reservedDialcodes", "dialcodeRequired", "leafNodes", "sYS_INTERNAL_LAST_UPDATED_ON", "prevStatus", "lastPublishedBy", "streamingUrl"] + +cloud_storage_container: "{{ cloud_storage_content_bucketname }}" + +cloudstorage { + metadata.replace_absolute_path={{ cloudstorage_replace_absolute_path | default('false') }} + metadata.list={{ cloudstorage_metadata_list }} + relative_path_prefix="{{ cloudstorage_relative_path_prefix | default('CLOUD_STORAGE_BASE_PATH') }}" + read_base_path="{{ cloudstorage_base_path }}" + write_base_path={{ valid_cloudstorage_base_urls }} +} \ No newline at end of file From 08abafc3e0ae706d7ad8b3715cbac627f3579271 Mon Sep 17 00:00:00 2001 From: Ashwiniev95 <52481775+Ashwiniev95@users.noreply.github.com> Date: Mon, 12 Dec 2022 18:03:09 +0530 Subject: [PATCH 132/616] Add a new variable (#3661) --- ansible/roles/ml-analytics-service/defaults/main.yml | 1 + ansible/roles/ml-analytics-service/templates/config.j2 | 1 + 2 files changed, 2 insertions(+) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index a480b01ccd..6c9ea27e79 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -112,3 +112,4 @@ cloud_storage_telemetry_bucketname: "{{ cloud_storage_telemetry_bucketname }}" cloud_private_storage_secret: "{{ cloud_private_storage_secret }}" cloud_private_storage_region: "{{ cloud_private_storage_region }}" cloud_private_storage_endpoint: "{{ cloud_private_storage_endpoint }}" +ml_analytics_project_program : "{{ WORKDIR }}/ml-analytics-service/projects/program_ids.txt" diff --git a/ansible/roles/ml-analytics-service/templates/config.j2 b/ansible/roles/ml-analytics-service/templates/config.j2 index 70160c64c0..aa365dbb0c 100644 --- a/ansible/roles/ml-analytics-service/templates/config.j2 +++ b/ansible/roles/ml-analytics-service/templates/config.j2 @@ -232,6 +232,7 @@ survey_sub_ids = {{ml_analytics_survey_submission_id_filepath}} survey_druid_data = {{ml_analytics_survey_batchupdate_output_dir}} +program_text_file = {{ml_analytics_project_program}} [SLACK] From 0c14ae91c89dec4ce69ec3e7cdb728d992c14db4 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Tue, 13 Dec 2022 16:22:16 +0530 Subject: [PATCH 133/616] Update main.yml --- ansible/roles/ml-analytics-service/defaults/main.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index 6c9ea27e79..6c40b8b6c0 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -102,7 +102,7 @@ ml_analytics_survey_batchupdate_output_dir : "{{ WORKDIR }}/source/survey/" ml_analytics_druid_interval_list: '["1901-01-01T00:00:00+00:00/2020-01-01T00:00:00+00:00","2020-01-01T00:00:00+00:00/2020-06-01T00:00:00+00:00","2020-06-01T00:00:00+00:00/2021-01-01T00:00:00+00:00","2021-01-01T00:00:00+00:00/2021-06-01T00:00:00+00:00","2021-06-01T00:00:00+00:00/2022-01-01T00:00:00+00:00","2022-01-01T00:00:00+00:00/2022-03-01T00:00:00+00:00","2022-03-01T00:00:00+00:00/2022-06-01T00:00:00+00:00","2022-06-01T00:00:00+00:00/2022-09-01T00:00:00+00:00","2022-09-01T00:00:00+00:00/2023-01-01T00:00:00+00:00"]' ML_Cloud_Service_Provider: "{{ cloud_service_provider | default('azure') }}" ## Valid options - ORACLE, gcloud, aws & azure ml_analytics_cloud_package_path: "{{ config_path }}/cloud_storage" -ml_analytics_cname_url: "https://{{ cloud_private_storage_accountname }}.blob.core.windows.net/{{ cloud_storage_telemetry_bucketname }}" +ml_analytics_cname_url: "{{ cloud_storage_url }}/{{ cloud_storage_samiksha_bucketname }}" ml_Cloud_secret_json_file: "cloud_secrets.json" ml_Cloud_Secrets: account_name: "{{ cloud_private_storage_accountname }}" @@ -113,3 +113,4 @@ cloud_private_storage_secret: "{{ cloud_private_storage_secret }}" cloud_private_storage_region: "{{ cloud_private_storage_region }}" cloud_private_storage_endpoint: "{{ cloud_private_storage_endpoint }}" ml_analytics_project_program : "{{ WORKDIR }}/ml-analytics-service/projects/program_ids.txt" +ml_analytics_projects_program_filename: "{{ config_path }}/projects/program_ids.txt" From f4f545de049adfdf53501fd59669c17acc6cf73b Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Tue, 13 Dec 2022 16:23:11 +0530 Subject: [PATCH 134/616] Update shell_script_config.j2 --- .../ml-analytics-service/templates/shell_script_config.j2 | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ansible/roles/ml-analytics-service/templates/shell_script_config.j2 b/ansible/roles/ml-analytics-service/templates/shell_script_config.j2 index 6ecdeba31a..97e35a4db0 100644 --- a/ansible/roles/ml-analytics-service/templates/shell_script_config.j2 +++ b/ansible/roles/ml-analytics-service/templates/shell_script_config.j2 @@ -1,2 +1 @@ -mongo_url={{ ml_analytics_mongodb_url }} -mongo_db_name={{ ml_analytics_mongo_db_name }} +projects_program_filename={{ ml_analytics_projects_program_filename }} From ddeeff6721ef6460b7de5fd40d4f4c299c755aa7 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Tue, 13 Dec 2022 17:01:35 +0530 Subject: [PATCH 135/616] Update Ingestion Spec --- ansible/roles/ml-analytics-service/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index 6c40b8b6c0..06fb010cdf 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -82,7 +82,7 @@ ml_analytics_projects_distinctCnt_prglevel_output_dir: "{{ WORKDIR }}/source/pro ml_analytics_projects_distinctCnt_prglevel_cloud_blob_path: "projects/distinctCountPrglevel/" ml_analytics_survey_status_output_dir : "{{ WORKDIR }}/source/survey/status/output" ml_analytics_survey_cloud_blob_path : "survey/status/" -ml_analytics_druid_survey_status_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/survey/status/sl_survey_status.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-survey-status","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":true,"segmentGranularity":"DAY"},"timestampSpec":{"column":"updatedAt","format":"iso"},"dimensionsSpec":{"dimensions":["survey_submission_id", "submission_status", "user_id", "user_sub_type", "user_type", "state_externalId", "block_externalId", "district_externalId", "cluster_externalId", "school_externalId", "state_name", "block_name", "district_name", "cluster_name", "school_name", "board_name", "organisation_id", "organisation_name", "program_externalId", "program_id", "program_name", "survey_name", "survey_id", "survey_externalId", "created_date", "submission_date", "updatedAt", "parent_channel", "solution_name", "solution_id","private_program"]},"metricsSpec":[]}}}' +ml_analytics_druid_survey_status_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/survey/status/sl_survey_status.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-survey-status","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":true,"segmentGranularity":"DAY"},"timestampSpec":{"column":"updatedAt","format":"iso"},"dimensionsSpec":{"dimensions":["survey_submission_id", "submission_status", "user_id", "user_sub_type", "user_type", "state_externalId", "block_externalId", "district_externalId", "cluster_externalId", "school_externalId", "state_name", "block_name", "district_name", "cluster_name", "school_name", "board_name", "organisation_id", "organisation_name", "program_externalId", "program_id", "program_name", "survey_name", "survey_id", "survey_externalId", "created_date", "submission_date", "updatedAt", "parent_channel", "solution_name", "solution_id","private_program","state_code","school_code","district_code","block_code","cluster_code"]},"metricsSpec":[]}}}' ml_analytics_slack_token: "{{ ml_slack_token | default('') }}" ml_analytics_channel_name: "{{ ml_slack_channel | default('') }}" ml_analytics_program_dashboard_cloud_blob_path: "{{ ml_program_blob_path | default('') }}" From fb320160c4fa076197b974d9d6308d21239bb813 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Wed, 14 Dec 2022 09:36:22 +0530 Subject: [PATCH 136/616] Remove SAS token --- ansible/roles/ml-analytics-service/templates/config.j2 | 2 -- 1 file changed, 2 deletions(-) diff --git a/ansible/roles/ml-analytics-service/templates/config.j2 b/ansible/roles/ml-analytics-service/templates/config.j2 index aa365dbb0c..c0ec68fd3e 100644 --- a/ansible/roles/ml-analytics-service/templates/config.j2 +++ b/ansible/roles/ml-analytics-service/templates/config.j2 @@ -192,8 +192,6 @@ bucket_name = {{ cloud_storage_telemetry_bucketname }} account_name = {{ cloud_private_storage_accountname }} -sas_token = {{ cloud_private_storage_secret }} - container_name = {{ cloud_storage_telemetry_bucketname }} account_key = {{ cloud_private_storage_secret }} From c0873da4e7fbcba65ca315179f98e5dd26c39c00 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Wed, 14 Dec 2022 13:06:34 +0530 Subject: [PATCH 137/616] Missing vars update (#3666) --- ansible/roles/ml-analytics-service/defaults/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index 06fb010cdf..33d37d02a6 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -83,8 +83,8 @@ ml_analytics_projects_distinctCnt_prglevel_cloud_blob_path: "projects/distinctCo ml_analytics_survey_status_output_dir : "{{ WORKDIR }}/source/survey/status/output" ml_analytics_survey_cloud_blob_path : "survey/status/" ml_analytics_druid_survey_status_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/survey/status/sl_survey_status.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-survey-status","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":true,"segmentGranularity":"DAY"},"timestampSpec":{"column":"updatedAt","format":"iso"},"dimensionsSpec":{"dimensions":["survey_submission_id", "submission_status", "user_id", "user_sub_type", "user_type", "state_externalId", "block_externalId", "district_externalId", "cluster_externalId", "school_externalId", "state_name", "block_name", "district_name", "cluster_name", "school_name", "board_name", "organisation_id", "organisation_name", "program_externalId", "program_id", "program_name", "survey_name", "survey_id", "survey_externalId", "created_date", "submission_date", "updatedAt", "parent_channel", "solution_name", "solution_id","private_program","state_code","school_code","district_code","block_code","cluster_code"]},"metricsSpec":[]}}}' -ml_analytics_slack_token: "{{ ml_slack_token | default('') }}" -ml_analytics_channel_name: "{{ ml_slack_channel | default('') }}" +ml_slack_token: "{{ ml_analytics_slack_token | default('') }}" +ml_slack_channel: "{{ ml_analytics_slack_channel | default('') }}" ml_analytics_program_dashboard_cloud_blob_path: "{{ ml_program_blob_path | default('') }}" ml_druid_query_data: "{{ ml_druid_query | default('') }}" ml_program_dashboard_data: "{{ ml_program_data | default('') }}" From c8dd939c932f8989e7874f4d70601ca85d944111 Mon Sep 17 00:00:00 2001 From: PrasadMoka Date: Wed, 14 Dec 2022 14:32:50 +0530 Subject: [PATCH 138/616] LR-110 removed un-necessary prefix --- .../sunbird-RC/registry/schemas/TrainingCertificate.json | 2 +- utils/sunbird-RC/schema/credential_template.json | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/kubernetes/helm_charts/sunbird-RC/registry/schemas/TrainingCertificate.json b/kubernetes/helm_charts/sunbird-RC/registry/schemas/TrainingCertificate.json index 5187b08e81..84dc1d5429 100644 --- a/kubernetes/helm_charts/sunbird-RC/registry/schemas/TrainingCertificate.json +++ b/kubernetes/helm_charts/sunbird-RC/registry/schemas/TrainingCertificate.json @@ -69,6 +69,6 @@ ], "systemFields": ["osCreatedAt", "osUpdatedAt", "osCreatedBy", "osUpdatedBy"], "enableLogin": false, - "credentialTemplate": "https://{{ upstream_url }}/schema/credential_template.json" + "credentialTemplate": "{{ upstream_url }}/schema/credential_template.json" } } diff --git a/utils/sunbird-RC/schema/credential_template.json b/utils/sunbird-RC/schema/credential_template.json index f96a3c0528..60477810b0 100644 --- a/utils/sunbird-RC/schema/credential_template.json +++ b/utils/sunbird-RC/schema/credential_template.json @@ -1,7 +1,7 @@ { "@context": [ - "https://{{ upstream_url }}/schema/v1_context.json", - "https://{{ upstream_url }}/schema/sunbird_context.json" + "{{ upstream_url }}/schema/v1_context.json", + "{{ upstream_url }}/schema/sunbird_context.json" ], "type": [ "VerifiableCredential" From 9c6b12f8cdb789744ef74c762899cc6130053450 Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Wed, 14 Dec 2022 15:03:51 +0530 Subject: [PATCH 139/616] Release 5.1.0 - csp changes (#3667) --- ansible/assets-upload.yml | 1 - ansible/deploy-plugins.yml | 1 - .../tasks/delete-using-azcopy.yml | 12 +++++++++++- .../tasks/upload-using-azcopy.yml | 12 +++++++++++- ansible/roles/cassandra-backup/tasks/main.yml | 2 +- .../postgres-managed-service-restore/tasks/main.yml | 2 +- 6 files changed, 24 insertions(+), 6 deletions(-) diff --git a/ansible/assets-upload.yml b/ansible/assets-upload.yml index 2d8d4b1bc2..09e7df6ceb 100644 --- a/ansible/assets-upload.yml +++ b/ansible/assets-upload.yml @@ -13,7 +13,6 @@ blob_container_folder_path: "" storage_account_name: "{{ cloud_public_storage_accountname }}" storage_account_key: "{{ cloud_public_storage_secret }}" - storage_account_sas_token: "{{ azure_public_storage_account_sas }}" - name: delete files and folders from azure storage using azcopy include_role: diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index 6d048b18c4..a78ce1c640 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -19,7 +19,6 @@ container_public_access: "container" storage_account_name: "{{ cloud_public_storage_accountname }}" storage_account_key: "{{ cloud_public_storage_secret }}" - storage_account_sas_token: "{{ azure_public_storage_account_sas }}" tags: - always no_log: True diff --git a/ansible/roles/azure-cloud-storage/tasks/delete-using-azcopy.yml b/ansible/roles/azure-cloud-storage/tasks/delete-using-azcopy.yml index 236169e86c..196de9c9b3 100644 --- a/ansible/roles/azure-cloud-storage/tasks/delete-using-azcopy.yml +++ b/ansible/roles/azure-cloud-storage/tasks/delete-using-azcopy.yml @@ -1,6 +1,16 @@ --- +- name: generate SAS token for azcopy + shell: | + sas_expiry=`date -u -d "1 hour" '+%Y-%m-%dT%H:%MZ'` + sas_token=?`az storage container generate-sas -n {{ blob_container_name }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }} --https-only --permissions dlrw --expiry $sas_expiry -o tsv` + echo $sas_token + register: sas_token + +- set_fact: + container_sas_token: "{{ sas_token.stdout}}" + - name: delete files and folders from azure storage using azcopy - shell: "azcopy rm 'https://{{ storage_account_name }}.blob.core.windows.net/{{ blob_container_name }}{{ blob_container_folder_path }}{{ storage_account_sas_token }}' --recursive" + shell: "azcopy rm 'https://{{ storage_account_name }}.blob.core.windows.net/{{ blob_container_name }}{{ blob_container_folder_path }}{{ container_sas_token }}' --recursive" environment: AZCOPY_CONCURRENT_FILES: "10" async: 10800 diff --git a/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml b/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml index affbc8c002..95da584c9b 100644 --- a/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml +++ b/ansible/roles/azure-cloud-storage/tasks/upload-using-azcopy.yml @@ -1,4 +1,14 @@ --- +- name: generate SAS token for azcopy + shell: | + sas_expiry=`date -u -d "1 hour" '+%Y-%m-%dT%H:%MZ'` + sas_token=?`az storage container generate-sas -n {{ blob_container_name }} --account-name {{ storage_account_name }} --account-key {{ storage_account_key }} --https-only --permissions dlrw --expiry $sas_expiry -o tsv` + echo $sas_token + register: sas_token + +- set_fact: + container_sas_token: "{{ sas_token.stdout}}" + - name: create container in azure storage if it doesn't exist include_role: name: azure-cloud-storage @@ -6,7 +16,7 @@ when: create_container == True - name: upload files and folders to azure storage using azcopy - shell: "azcopy copy {{ local_file_or_folder_path }} 'https://{{ storage_account_name }}.blob.core.windows.net/{{ blob_container_name }}{{ blob_container_folder_path }}{{ storage_account_sas_token }}' --recursive" + shell: "azcopy copy {{ local_file_or_folder_path }} 'https://{{ storage_account_name }}.blob.core.windows.net/{{ blob_container_name }}{{ blob_container_folder_path }}{{ container_sas_token }}' --recursive" environment: AZCOPY_CONCURRENT_FILES: "10" async: 10800 diff --git a/ansible/roles/cassandra-backup/tasks/main.yml b/ansible/roles/cassandra-backup/tasks/main.yml index ce0e646662..0e5ae87477 100755 --- a/ansible/roles/cassandra-backup/tasks/main.yml +++ b/ansible/roles/cassandra-backup/tasks/main.yml @@ -42,7 +42,7 @@ blob_container_folder_path: "" local_file_or_folder_path: "/data/cassandra/backup/{{ cassandra_backup_folder_name }}" storage_account_name: "{{ cloud_management_storage_accountname }}" - storage_account_sas_token: "{{ azure_management_storage_account_sas }}" + storage_account_key: "{{ cloud_management_storage_secret }}" when: cloud_service_provider == "azure" - name: upload backup to S3 diff --git a/ansible/roles/postgres-managed-service-restore/tasks/main.yml b/ansible/roles/postgres-managed-service-restore/tasks/main.yml index c3d518db56..58d2c53482 100644 --- a/ansible/roles/postgres-managed-service-restore/tasks/main.yml +++ b/ansible/roles/postgres-managed-service-restore/tasks/main.yml @@ -26,7 +26,7 @@ name: aws-cloud-storage tasks_from: download.yml vars: - s3_bucket_name: "{{ cloud_storage_management_bucketname }}" + s3_bucket_name: "{{ cloud_storage_postgresqlbackup_bucketname }}" aws_access_key_id: "{{ cloud_management_storage_accountname }}" aws_secret_access_key: "{{ cloud_management_storage_secret }}" aws_default_region: "{{ cloud_public_storage_region }}" From 061a993f8c85635eaad2388090aa5725b0cea239 Mon Sep 17 00:00:00 2001 From: VISHNUDAS <95604247+VISHNUDAS-tunerlabs@users.noreply.github.com> Date: Wed, 14 Dec 2022 15:26:06 +0530 Subject: [PATCH 140/616] Add logging level configuration for registry service (#3669) Add logging level configuration for registry service --- kubernetes/helm_charts/sunbird-RC/registry/values.j2 | 1 + 1 file changed, 1 insertion(+) diff --git a/kubernetes/helm_charts/sunbird-RC/registry/values.j2 b/kubernetes/helm_charts/sunbird-RC/registry/values.j2 index c582ae8941..433edbfc92 100644 --- a/kubernetes/helm_charts/sunbird-RC/registry/values.j2 +++ b/kubernetes/helm_charts/sunbird-RC/registry/values.j2 @@ -56,6 +56,7 @@ rccoreenv: KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: {{ registry_listener_security_protocol_map|default('INTERNAL:PLAINTEXT,OUTSIDE:PLAINTEXT')}} KAFKA_INTER_BROKER_LISTENER_NAME: {{ registry_inter_broker_listener_name|default('INTERNAL')}} KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: {{ registry_offsets_topic_replication_factor|default('1')}} + logging.level.root : {{ registry_logging_level|default('INFO')}} {# The below should get enabled once the service has probes implemented #} {# {{ registry_liveness_readiness | to_nice_yaml }} #} From fadcdc00c7c0d2199d89b84df21076d7994d4c7b Mon Sep 17 00:00:00 2001 From: PrasadMoka Date: Wed, 14 Dec 2022 16:51:27 +0530 Subject: [PATCH 141/616] LR-110 added default context --- utils/sunbird-RC/schema/credential_template.json | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/sunbird-RC/schema/credential_template.json b/utils/sunbird-RC/schema/credential_template.json index 60477810b0..a999eca8da 100644 --- a/utils/sunbird-RC/schema/credential_template.json +++ b/utils/sunbird-RC/schema/credential_template.json @@ -1,5 +1,6 @@ { "@context": [ + "https://www.w3.org/2018/credentials/v1", "{{ upstream_url }}/schema/v1_context.json", "{{ upstream_url }}/schema/sunbird_context.json" ], From 95a83a1a3c3000dbeb4d823370b057e7c01bf2e1 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Thu, 15 Dec 2022 15:53:08 +0530 Subject: [PATCH 142/616] Update config.j2 (#3673) --- .../ml-analytics-service/defaults/main.yml | 12 ++++++------ .../ml-analytics-service/templates/config.j2 | 18 +++++++++--------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index 33d37d02a6..65274b1182 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -105,12 +105,12 @@ ml_analytics_cloud_package_path: "{{ config_path }}/cloud_storage" ml_analytics_cname_url: "{{ cloud_storage_url }}/{{ cloud_storage_samiksha_bucketname }}" ml_Cloud_secret_json_file: "cloud_secrets.json" ml_Cloud_Secrets: - account_name: "{{ cloud_private_storage_accountname }}" - account_key: "{{ cloud_private_storage_secret }}" -cloud_private_storage_accountname: "{{ cloud_private_storage_accountname }}" + account_name: "{{ cloud_public_storage_accountname }}" + account_key: "{{ cloud_public_storage_secret }}" +cloud_public_storage_accountname: "{{ cloud_public_storage_accountname }}" cloud_storage_telemetry_bucketname: "{{ cloud_storage_telemetry_bucketname }}" -cloud_private_storage_secret: "{{ cloud_private_storage_secret }}" -cloud_private_storage_region: "{{ cloud_private_storage_region }}" -cloud_private_storage_endpoint: "{{ cloud_private_storage_endpoint }}" +cloud_public_storage_secret: "{{ cloud_public_storage_secret }}" +cloud_public_storage_region: "{{ cloud_public_storage_region }}" +cloud_public_storage_endpoint: "{{ cloud_public_storage_endpoint }}" ml_analytics_project_program : "{{ WORKDIR }}/ml-analytics-service/projects/program_ids.txt" ml_analytics_projects_program_filename: "{{ config_path }}/projects/program_ids.txt" diff --git a/ansible/roles/ml-analytics-service/templates/config.j2 b/ansible/roles/ml-analytics-service/templates/config.j2 index c0ec68fd3e..52927ec957 100644 --- a/ansible/roles/ml-analytics-service/templates/config.j2 +++ b/ansible/roles/ml-analytics-service/templates/config.j2 @@ -154,13 +154,13 @@ survey_streaming_error = {{ ml_analytics_survey_log_folder_path }}/error.log [ORACLE] -endpoint_url = {{ cloud_private_storage_endpoint }} +endpoint_url = {{ cloud_public_storage_endpoint }} -access_key = {{ cloud_private_storage_accountname }} +access_key = {{ cloud_public_storage_accountname }} -secret_access_key = {{ cloud_private_storage_secret }} +secret_access_key = {{ cloud_public_storage_secret }} -region_name = {{ cloud_private_storage_region }} +region_name = {{ cloud_public_storage_region }} bucket_name = {{ cloud_storage_telemetry_bucketname }} @@ -178,11 +178,11 @@ bucket_name = {{ cloud_storage_telemetry_bucketname }} service_name = S3 -access_key = {{ cloud_private_storage_accountname }} +access_key = {{ cloud_public_storage_accountname }} -secret_access_key = {{ cloud_private_storage_secret }} +secret_access_key = {{ cloud_public_storage_secret }} -region_name = {{ cloud_private_storage_region }} +region_name = {{ cloud_public_storage_region }} bucket_name = {{ cloud_storage_telemetry_bucketname }} @@ -190,11 +190,11 @@ bucket_name = {{ cloud_storage_telemetry_bucketname }} [AZURE] -account_name = {{ cloud_private_storage_accountname }} +account_name = {{ cloud_public_storage_accountname }} container_name = {{ cloud_storage_telemetry_bucketname }} -account_key = {{ cloud_private_storage_secret }} +account_key = {{ cloud_public_storage_secret }} {% endif %} From a807916a3952fcbcced5141da58e42b5e2444b14 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Thu, 15 Dec 2022 20:54:11 +0530 Subject: [PATCH 143/616] fix: ED-573 jenkins job to kill spark jobs Signed-off-by: Keshav Prasad --- ansible/kill_spark_jobs.yaml | 12 ++++++ pipelines/ops/kill-spark-jobs/Jenkinsfile | 51 +++++++++++++++++++++++ 2 files changed, 63 insertions(+) create mode 100644 ansible/kill_spark_jobs.yaml create mode 100644 pipelines/ops/kill-spark-jobs/Jenkinsfile diff --git a/ansible/kill_spark_jobs.yaml b/ansible/kill_spark_jobs.yaml new file mode 100644 index 0000000000..01c01c5bcf --- /dev/null +++ b/ansible/kill_spark_jobs.yaml @@ -0,0 +1,12 @@ +--- +- hosts: spark + become: yes + tasks: + - name: get pids of job manager which may be orphaned + shell: ps -ef | grep [j]ob. | awk '{print $2}' + register: pids_of_jobmanager + + - name: kill the orphan job manager pids + shell: "kill -9 {{ item | int }}" + with_items: + - "{{ pids_of_jobmanager.stdout_lines }}" diff --git a/pipelines/ops/kill-spark-jobs/Jenkinsfile b/pipelines/ops/kill-spark-jobs/Jenkinsfile new file mode 100644 index 0000000000..37bad74c46 --- /dev/null +++ b/pipelines/ops/kill-spark-jobs/Jenkinsfile @@ -0,0 +1,51 @@ +@Library('deploy-conf') _ +node() { + try { + String ANSI_GREEN = "\u001B[32m" + String ANSI_NORMAL = "\u001B[0m" + String ANSI_BOLD = "\u001B[1m" + String ANSI_RED = "\u001B[31m" + String ANSI_YELLOW = "\u001B[33m" + + stage('checkout public repo') { + folder = new File("$WORKSPACE/.git") + if (folder.exists()) + { + println "Found .git folder. Clearing it.." + sh'git clean -fxd' + } + checkout scm + } + + ansiColor('xterm') { + stage('deploy'){ + values = [:] + currentWs = sh(returnStdout: true, script: 'pwd').trim() + envDir = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-3].trim() + module = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-2].trim() + jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() + ansiblePlaybook = "${currentWs}/ansible/kill_spark_jobs.yaml" + ansibleExtraArgs = "-v" + values.put('currentWs', currentWs) + values.put('env', envDir) + values.put('module', module) + values.put('jobName', jobName) + values.put('ansiblePlaybook', ansiblePlaybook) + values.put('ansibleExtraArgs', ansibleExtraArgs) + println values + ansible_playbook_run(values) + currentBuild.result = 'SUCCESS' + currentBuild.description = "Private: ${params.private_branch}, Public: ${params.branch_or_tag}" + } + } + summary() + } + catch (err) { + currentBuild.result = 'FAILURE' + throw err + } + finally { + slack_notify(currentBuild.result) + email_notify() + } +} From ece7ed3685bab5395e4333927c9fdff1282fbc27 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Fri, 16 Dec 2022 11:30:33 +0530 Subject: [PATCH 144/616] Update ingestion specs (#3677) --- ansible/roles/ml-analytics-service/defaults/main.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index 65274b1182..d73099451d 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -53,12 +53,12 @@ ml_analytics_project_output_dir: "{{ WORKDIR }}/source/projects/output" ml_analytics_observation_status_output_dir: "{{ WORKDIR }}/source/observations/status/output" ml_analytics_api_authorization_key: "{{ml_api_auth_token | default('sunbird_api_auth_token')}}" ml_analytics_api_access_token: "{{ml_api_access_token | default('ml_core_internal_access_token')}}" -ml_analytics_druid_observation_status_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type": "azure","uris": ["azure://telemetry-data-store/observation/status/sl_observation_status.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"sl-observation-status","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"updatedAt","format":"auto"},"dimensionsSpec":{"dimensions":["status","entity_externalId","entity_id","entity_type","solution_id","solution_externalId","submission_id","entity_name","solution_name","role_title","school_name","school_code","school_externalId","state_name","state_code","state_externalId","district_name","district_code","district_externalId","block_name","block_code","block_externalId","cluster_name","cluster_code","cluster_externalId","completedDate","channel","parent_channel","program_id","program_externalId","program_name","app_name","user_id","private_program","solution_type","organisation_name","ecm_marked_na","board_name","updatedAt","organisation_id","user_type","observed_school_name","observed_school_id","observed_school_code","observed_state_name","observed_state_id","observed_state_code","observed_district_name","observed_district_id","observed_district_code","observed_block_name","observed_block_id","observed_block_code","observed_cluster_name","observed_cluster_id","observed_cluster_code"]},"metricsSpec":[]}}}' +ml_analytics_druid_observation_status_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type": "azure","uris": ["azure://telemetry-data-store/observation/status/sl_observation_status.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"sl-observation-status","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"updatedAt","format":"auto"},"dimensionsSpec":{"dimensions":["status","entity_externalId","entity_id","entity_type","solution_id","solution_externalId","submission_id","entity_name","solution_name","role_title","school_name","school_code","school_externalId","state_name","state_code","state_externalId","district_name","district_code","district_externalId","block_name","block_code","block_externalId","cluster_name","cluster_code","cluster_externalId","completedDate","channel","parent_channel","program_id","program_externalId","program_name","app_name","user_id","private_program","solution_type","organisation_name","ecm_marked_na","board_name","updatedAt","organisation_id","user_type","observed_school_name","observed_school_id","observed_school_code","observed_state_name","observed_state_id","observed_state_code","observed_district_name","observed_district_id","observed_district_code","observed_block_name","observed_block_id","observed_block_code","observed_cluster_name","observed_cluster_id","observed_cluster_code","isRubricDriven","criteriaLevelReport"]},"metricsSpec":[]}}}' ml_analytics_druid_project_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type": "azure","uris": ["azure://telemetry-data-store/projects/sl_projects.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"sl-project","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"project_updated_date","format":"auto"},"dimensionsSpec":{"dimensions":[]},"metricsSpec":[]}}}' ml_analytics_druid_distinctCnt_obs_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/distinctCount/ml_observation_distinctCount_status.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-obs-status","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"solution_name"},{"type":"string","name":"solution_id"},{"type":"string","name":"status"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"parent_channel"},{"type":"string","name":"solution_type"},{"type":"string","name":"private_program"},{"type":"long","name":"unique_submissions"},{"type":"long","name":"unique_entities"},{"type":"long","name":"unique_users"},{"type":"long","name":"unique_solution"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' ml_analytics_druid_distinctCnt_obs_domain_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/distinctCount_domain/ml_observation_distinctCount_domain.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-obs-domain","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"solution_name"},{"type":"string","name":"solution_id"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"parent_channel"},{"type":"string","name":"solution_type"},{"type":"string","name":"private_program"},{"type":"string","name":"domain_name"},{"type":"string","name":"domain_externalId"},{"type":"string","name":"domain_level"},{"type":"long","name":"unique_submissions"},{"type":"long","name":"unique_entities"},{"type":"long","name":"unique_users"},{"type":"long","name":"unique_solution"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' ml_analytics_druid_distinctCnt_obs_domain_criteria_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/distinctCount_domain_criteria/ml_observation_distinctCount_domain_criteria.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-obs-domain-criteria","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"solution_name"},{"type":"string","name":"solution_id"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"parent_channel"},{"type":"string","name":"solution_type"},{"type":"string","name":"private_program"},{"type":"string","name":"domain_name"},{"type":"string","name":"domain_externalId"},{"type":"string","name":"domain_level"},{"type":"string","name":"criteria_name"},{"type":"string","name":"criteria_score"},{"type":"string","name":"criteria_id"},{"type":"long","name":"unique_submissions"},{"type":"long","name":"unique_entities"},{"type":"long","name":"unique_users"},{"type":"long","name":"unique_solution"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' -ml_analytics_druid_distinctCnt_projects_status_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/projects/distinctCount/ml_projects_distinctCount.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-project-status","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"project_title"},{"type":"string","name":"solution_id"},{"type":"string","name":"status_of_project"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"private_program"},{"type":"string","name":"project_created_type"},{"type":"string","name":"parent_channel"},{"type":"long","name":"unique_projects"},{"type":"long","name":"unique_users"},{"type":"long","name":"unique_solution"},{"type":"long","name":"no_of_imp_with_evidence"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' +ml_analytics_druid_distinctCnt_projects_status_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/projects/distinctCount/ml_projects_distinctCount.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-project-status","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"project_title"},{"type":"string","name":"solution_id"},{"type":"string","name":"status_of_project"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"district_name"},{"type":"string","name":"district_externalId"},{"type":"string","name":"block_name"},{"type":"string","name":"block_externalId"},{"type":"string","name":"organisation_name"},{"type":"string","name":"organisation_id"},{"type":"string","name":"private_program"},{"type":"string","name":"project_created_type"},{"type":"string","name":"parent_channel"},{"type":"long","name":"unique_projects"},{"type":"long","name":"unique_users"},{"type":"long","name":"unique_solution"},{"type":"long","name":"no_of_imp_with_evidence"},{"type":"string","name":"time_stamp"},{"type":"long","name":"no_of_certificate_issued"}]},"metricsSpec":[]}}}' ml_analytics_obs_distinctCnt_cloud_blob_path: "observation/distinctCount/" ml_analytics_obs_distinctCnt_domain_cloud_blob_path: "observation/distinctCount_domain/" ml_analytics_obs_distinctCnt_domain_criteria_cloud_blob_path: "observation/distinctCount_domain_criteria/" @@ -77,7 +77,7 @@ ml_analytics_observation_status_rollup_output_dir: "/opt/sparkjobs/source/observ ml_analytics_druid_project_rollup_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/projects/rollup/projects_rollup.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-project-status-rollup","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"project_updated_date","format":"iso"},"dimensionsSpec":{"dimensions":["project_title","project_goal","area_of_improvement","status_of_project","tasks_name","tasks_status","designation","task_evidence_status","project_id","task_id","project_created_type","parent_channel","program_id","program_name","project_updated_date","createdBy","program_externalId","private_program","task_deleted_flag","project_terms_and_condition","state_externalId","block_externalId","district_externalId","cluster_externalId","school_externalId","state_name","block_name","district_name","cluster_name","school_name","board_name","organisation_name","solution_id","organisation_id",{"name":"status_code","type":"long"}]},"metricsSpec":[{"name":"count","type":"count"},{"name":"sum___v","type":"longSum","fieldName":"__v"},{"name":"sum_status_code","type":"longMax","fieldName":"status_code"},{"type":"HLLSketchBuild","name":"count_of_createBy","fieldName":"createdBy"},{"type":"HLLSketchBuild","name":"count_of_project_id","fieldName":"project_id"},{"type":"HLLSketchBuild","name":"count_of_solution_id","fieldName":"solution_id"},{"type":"HLLSketchBuild","name":"count_of_program_id","fieldName":"program_id"}]}}}' ml_analytics_druid_observation_status_rollup_injestion_spec: '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/observation/rollup/observation_status_rollup.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-observation-status-rollup","granularitySpec":{"type":"uniform","queryGranularity":"DAY","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"updatedAt","format":"iso"},"dimensionsSpec":{"dimensions":["status","user_id","solution_id","submission_id","entity_name","completedDate","program_id","private_program","solution_type","updatedAt","role_title","solution_name","program_name","channel","parent_channel","block_name","district_name","school_name","cluster_name","state_name","organisation_name","board_name","district_externalId","state_externalId","block_externalId","cluster_externalId","school_externalId","organisation_id",{"type":"long","name":"status_code"}]},"metricsSpec":[{"type":"count","name":"count"},{"type":"longSum","name":"sum___v","fieldName":"__v","expression":null},{"type":"HLLSketchBuild","name":"count_distinct_solution","fieldName":"solution_id","lgK":12,"tgtHllType":"HLL_4","round":false},{"type":"HLLSketchBuild","name":"count_distinct_submission_id","fieldName":"submission_id","lgK":12,"tgtHllType":"HLL_4","round":false},{"type":"HLLSketchBuild","name":"count_distinct_user_id","fieldName":"user_id","lgK":12,"tgtHllType":"HLL_4","round":false}]}}}' ml_analytics_druid_rollup_url: "{{groups['druid'][0]}}:8081" -ml_analytics_druid_distinctCnt_prglevel_projects_status_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/projects/distinctCountPrglevel/ml_projects_distinctCount_prgmlevel.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-project-programLevel-status","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"status_of_project"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"private_program"},{"type":"string","name":"project_created_type"},{"type":"string","name":"parent_channel"},{"type":"long","name":"unique_projects"},{"type":"long","name":"unique_users"},{"type":"long","name":"no_of_imp_with_evidence"},{"type":"string","name":"time_stamp"}]},"metricsSpec":[]}}}' +ml_analytics_druid_distinctCnt_prglevel_projects_status_injestion_spec : '{"type":"index","spec":{"ioConfig":{"type":"index","inputSource":{"type":"azure","uris":["azure://telemetry-data-store/projects/distinctCountPrglevel/ml_projects_distinctCount_prgmlevel.json"]},"inputFormat":{"type":"json"}},"tuningConfig":{"type":"index","partitionsSpec":{"type":"dynamic"}},"dataSchema":{"dataSource":"ml-project-programLevel-status","granularitySpec":{"type":"uniform","queryGranularity":"none","rollup":false,"segmentGranularity":"DAY"},"timestampSpec":{"column":"time_stamp","format":"auto"},"dimensionsSpec":{"dimensions":[{"type":"string","name":"program_name"},{"type":"string","name":"program_id"},{"type":"string","name":"status_of_project"},{"type":"string","name":"state_name"},{"type":"string","name":"state_externalId"},{"type":"string","name":"private_program"},{"type":"string","name":"project_created_type"},{"type":"string","name":"parent_channel"},{"type":"long","name":"unique_projects"},{"type":"long","name":"unique_users"},{"type":"long","name":"no_of_imp_with_evidence"},{"type":"string","name":"time_stamp"},{"type":"long","name":"no_of_certificate_issued"}]},"metricsSpec":[]}}}' ml_analytics_projects_distinctCnt_prglevel_output_dir: "{{ WORKDIR }}/source/projects/distinctCountPrglevel/output" ml_analytics_projects_distinctCnt_prglevel_cloud_blob_path: "projects/distinctCountPrglevel/" ml_analytics_survey_status_output_dir : "{{ WORKDIR }}/source/survey/status/output" From 6b868b4c347913291b98b4dd48b3af574e7d57a6 Mon Sep 17 00:00:00 2001 From: Akhil <30873558+saiakhil46@users.noreply.github.com> Date: Tue, 20 Dec 2022 10:58:13 +0530 Subject: [PATCH 145/616] updated jenkins version and plugins (#3679) --- deploy/jenkins/jenkins-plugins-setup.sh | 8 ++++---- deploy/jenkins/jenkins-server-setup.sh | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/deploy/jenkins/jenkins-plugins-setup.sh b/deploy/jenkins/jenkins-plugins-setup.sh index ed9a8c1756..c4256b3620 100755 --- a/deploy/jenkins/jenkins-plugins-setup.sh +++ b/deploy/jenkins/jenkins-plugins-setup.sh @@ -17,13 +17,13 @@ fi echo -e "\n\e[0;32m${bold}Downloading and copying jenkins plugin directory to Jenkins ${normal}" if [[ ! -d /var/lib/jenkins/plugins ]]; then -wget https://sunbirdpublic.blob.core.windows.net/installation/plugins.tar -tar -xf plugins.tar +wget https://sunbirdpublic.blob.core.windows.net/installation/plugins-2-319-3.tar +tar -xf plugins-2-319-3.tar mv plugins /var/lib/jenkins/ chown -R jenkins:jenkins /var/lib/jenkins/plugins else -wget https://sunbirdpublic.blob.core.windows.net/installation/plugins.tar -tar -xf plugins.tar +wget https://sunbirdpublic.blob.core.windows.net/installation/plugins-2-319-3.tar +tar -xf plugins-2-319-3.tar cp -rf plugins/* /var/lib/jenkins/plugins/ chown -R jenkins:jenkins /var/lib/jenkins/plugins fi diff --git a/deploy/jenkins/jenkins-server-setup.sh b/deploy/jenkins/jenkins-server-setup.sh index ad2b361671..f2ef322969 100755 --- a/deploy/jenkins/jenkins-server-setup.sh +++ b/deploy/jenkins/jenkins-server-setup.sh @@ -15,7 +15,7 @@ echo -e "\n\e[0;32m${bold}Installating Jenkins${normal}" wget -q -O - https://pkg.jenkins.io/debian-stable/jenkins.io.key | apt-key add - apt-add-repository "deb https://pkg.jenkins.io/debian-stable binary/" apt-get update -apt-get install -y jenkins=2.277.4 +apt-get install -y jenkins=2.319.3 echo -e "\n\e[0;32m${bold}Installating PIP${normal}" apt-get install -y python-pip From b61a35fad0362ea7eb0bb688ff0bc12ffc811571 Mon Sep 17 00:00:00 2001 From: Rajesh Kumaravel Date: Tue, 20 Dec 2022 10:59:31 +0530 Subject: [PATCH 146/616] Issue #ED-592 fix: Pdata version updated (#3678) --- .../artifacts/sunbird/login/resources/js/telemetry_service.js | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ansible/artifacts/sunbird/login/resources/js/telemetry_service.js b/ansible/artifacts/sunbird/login/resources/js/telemetry_service.js index e1f88cc741..f9250fd425 100644 --- a/ansible/artifacts/sunbird/login/resources/js/telemetry_service.js +++ b/ansible/artifacts/sunbird/login/resources/js/telemetry_service.js @@ -2506,7 +2506,7 @@ if(client_id.toLowerCase() === 'android'){ "telemetry": { "pdata": { "id": pdataId, - "ver": "5.0.0", + "ver": "5.1.0", "pid": "sunbird-portal" } } @@ -2687,7 +2687,6 @@ if(client_id.toLowerCase() === 'android'){ function stringToHTML(str) { let parser = new DOMParser(); let doc = parser.parseFromString(str, 'text/html'); - console.log('Doc parse => ', doc); // TODO: log! return doc?.body?.innerText || document.createElement('body'); } From 4193a39c5eaeab7fe58b4fdc93e36cb01cda20c5 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Wed, 21 Dec 2022 11:15:30 +1100 Subject: [PATCH 147/616] added task and role for artifacts upload - oci oss Signed-off-by: Deepak Devadathan --- ansible/artifacts-upload.yml | 10 ++++++++++ .../roles/oci-cloud-storage/defaults/main.yml | 3 +++ .../oci-cloud-storage/tasks/delete-folder.yml | 5 +++++ .../roles/oci-cloud-storage/tasks/delete.yml | 7 +++++++ .../roles/oci-cloud-storage/tasks/download.yml | 7 +++++++ ansible/roles/oci-cloud-storage/tasks/main.yml | 18 ++++++++++++++++++ .../oci-cloud-storage/tasks/upload-folder.yml | 8 ++++++++ .../roles/oci-cloud-storage/tasks/upload.yml | 8 ++++++++ 8 files changed, 66 insertions(+) create mode 100644 ansible/roles/oci-cloud-storage/defaults/main.yml create mode 100644 ansible/roles/oci-cloud-storage/tasks/delete-folder.yml create mode 100644 ansible/roles/oci-cloud-storage/tasks/delete.yml create mode 100644 ansible/roles/oci-cloud-storage/tasks/download.yml create mode 100644 ansible/roles/oci-cloud-storage/tasks/main.yml create mode 100644 ansible/roles/oci-cloud-storage/tasks/upload-folder.yml create mode 100644 ansible/roles/oci-cloud-storage/tasks/upload.yml diff --git a/ansible/artifacts-upload.yml b/ansible/artifacts-upload.yml index 31f2589a68..9591b8fd9f 100644 --- a/ansible/artifacts-upload.yml +++ b/ansible/artifacts-upload.yml @@ -39,3 +39,13 @@ aws_access_key_id: "{{ aws_artifact_bucket_access_key }}" aws_secret_access_key: "{{ aws_artifact_bucket_secret_access_key }}" when: cloud_service_provider == "aws" + + - name: upload artifact to oci oss + include_role: + name: oci-cloud-storage + tasks_from: upload.yml + vars: + local_file_or_folder_path: "{{ artifact_path }}" + oss_bucket_name: "{{ oci_artifact_oss_bucket_name }}" + oss_path: "{{ artifact }}" + when: cloud_service_provider == "oci" \ No newline at end of file diff --git a/ansible/roles/oci-cloud-storage/defaults/main.yml b/ansible/roles/oci-cloud-storage/defaults/main.yml new file mode 100644 index 0000000000..72727de167 --- /dev/null +++ b/ansible/roles/oci-cloud-storage/defaults/main.yml @@ -0,0 +1,3 @@ +oss_bucket_name: "" +oss_path: "" +local_file_or_folder_path: "" diff --git a/ansible/roles/oci-cloud-storage/tasks/delete-folder.yml b/ansible/roles/oci-cloud-storage/tasks/delete-folder.yml new file mode 100644 index 0000000000..6ed4e6b8b4 --- /dev/null +++ b/ansible/roles/oci-cloud-storage/tasks/delete-folder.yml @@ -0,0 +1,5 @@ +--- +- name: delete files and folders recursively + shell: "oci os object bulk-delete -ns {{oss_namespace}} -bn {{oss_bucket_name}} --prefix {{oss_path}} --force" + async: 3600 + poll: 10 diff --git a/ansible/roles/oci-cloud-storage/tasks/delete.yml b/ansible/roles/oci-cloud-storage/tasks/delete.yml new file mode 100644 index 0000000000..65d18843ca --- /dev/null +++ b/ansible/roles/oci-cloud-storage/tasks/delete.yml @@ -0,0 +1,7 @@ +- name: Ensure oci oss bucket exists + command: oci os bucket get --name {{ oss_bucket_name }} + +- name: Upload to oci oss bucket + command: oci os object delete -bn {{ oss_bucket_name }} --name {{ oss_path }} --force + async: 3600 + poll: 10 \ No newline at end of file diff --git a/ansible/roles/oci-cloud-storage/tasks/download.yml b/ansible/roles/oci-cloud-storage/tasks/download.yml new file mode 100644 index 0000000000..63e776c348 --- /dev/null +++ b/ansible/roles/oci-cloud-storage/tasks/download.yml @@ -0,0 +1,7 @@ +- name: Ensure oci oss bucket exists + command: oci os bucket get --name {{ oss_bucket_name }} + +- name: download files from oci oss bucket + command: oci os object bulk-download -bn {{ oss_bucket_name }} --prefix {{ oss_path }} --dest-dir {{ local_file_or_folder_path }} + async: 3600 + poll: 10 \ No newline at end of file diff --git a/ansible/roles/oci-cloud-storage/tasks/main.yml b/ansible/roles/oci-cloud-storage/tasks/main.yml new file mode 100644 index 0000000000..6f9dca6b63 --- /dev/null +++ b/ansible/roles/oci-cloud-storage/tasks/main.yml @@ -0,0 +1,18 @@ +--- +- name: delete files from oci oss bucket + include: delete.yml + +- name: delete folders from oci oss bucket recursively + include: delete-folder.yml + + +- name: download file from oss + include: download.yml + +- name: upload files from a local to oci oss + include: upload.yml + +- name: upload files and folder from local directory to oci oss + include: upload-folder.yml + + diff --git a/ansible/roles/oci-cloud-storage/tasks/upload-folder.yml b/ansible/roles/oci-cloud-storage/tasks/upload-folder.yml new file mode 100644 index 0000000000..6e4d06562c --- /dev/null +++ b/ansible/roles/oci-cloud-storage/tasks/upload-folder.yml @@ -0,0 +1,8 @@ +--- +- name: Ensure oci oss bucket exists + command: oci os bucket get --name {{ oss_bucket_name }} + +- name: Upload folder to oci oss bucket + command: oci os object bulk-upload -bn {{ oss_bucket_name }} --prefix {{ oss_path }} --src-dir {{ local_file_or_folder_path }} --content-type auto + async: 3600 + poll: 10 diff --git a/ansible/roles/oci-cloud-storage/tasks/upload.yml b/ansible/roles/oci-cloud-storage/tasks/upload.yml new file mode 100644 index 0000000000..0edcbc793f --- /dev/null +++ b/ansible/roles/oci-cloud-storage/tasks/upload.yml @@ -0,0 +1,8 @@ +--- +- name: Ensure oci oss bucket exists + command: oci os bucket get --name {{ oss_bucket_name }} + +- name: Upload to oci oss bucket + command: oci os object put -bn {{ oss_bucket_name }} --name {{ oss_path }} --file {{ local_file_or_folder_path }} --content-type auto + async: 3600 + poll: 10 From d7ee16170589cb7f2f1261429443a2c3fdc283a1 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Thu, 22 Dec 2022 13:29:33 +1100 Subject: [PATCH 148/616] overwrite oss file if exists Signed-off-by: Deepak Devadathan --- ansible/roles/oci-cloud-storage/tasks/upload.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/oci-cloud-storage/tasks/upload.yml b/ansible/roles/oci-cloud-storage/tasks/upload.yml index 0edcbc793f..9e1ceb4289 100644 --- a/ansible/roles/oci-cloud-storage/tasks/upload.yml +++ b/ansible/roles/oci-cloud-storage/tasks/upload.yml @@ -3,6 +3,6 @@ command: oci os bucket get --name {{ oss_bucket_name }} - name: Upload to oci oss bucket - command: oci os object put -bn {{ oss_bucket_name }} --name {{ oss_path }} --file {{ local_file_or_folder_path }} --content-type auto + command: oci os object put -bn {{ oss_bucket_name }} --name {{ oss_path }} --file {{ local_file_or_folder_path }} --content-type auto --force async: 3600 poll: 10 From 3e1b3207fed9a61257b17f8cf13e41157149b162 Mon Sep 17 00:00:00 2001 From: SadanandGowda Date: Thu, 22 Dec 2022 14:49:57 +0530 Subject: [PATCH 149/616] csp migration variables update --- .../helm_charts/core/analytics/templates/deployment.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kubernetes/helm_charts/core/analytics/templates/deployment.yaml b/kubernetes/helm_charts/core/analytics/templates/deployment.yaml index 57198cb77b..b0d7aad44b 100644 --- a/kubernetes/helm_charts/core/analytics/templates/deployment.yaml +++ b/kubernetes/helm_charts/core/analytics/templates/deployment.yaml @@ -35,13 +35,13 @@ spec: value: {{ .Values.env.min_heap | quote }} - name: MAX_HEAP value: {{ .Values.env.max_heap | quote }} - - name: cloud_storage_secret + - name: azure_storage_secret value: {{ .Values.env.cloud_private_account_secret | quote }} - - name: cloud_storage_key + - name: azure_storage_key value: {{ .Values.env.cloud_private_account_name | quote }} - - name: public_cloud_storage_secret + - name: public_azure_storage_secret value: {{ .Values.env.cloud_public_account_secret | quote }} - - name: public_cloud_storage_key + - name: public_azure_storage_key value: {{ .Values.env.cloud_public_account_name | quote }} - name: _JAVA_OPTIONS value: -Dlog4j2.formatMsgNoLookups=true From 56ea8f9b418ebe6ee2c566fe7959eb02e1bc3ad2 Mon Sep 17 00:00:00 2001 From: SadanandGowda Date: Thu, 22 Dec 2022 14:51:00 +0530 Subject: [PATCH 150/616] csp migration variables update --- kubernetes/helm_charts/core/analytics/values.j2 | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kubernetes/helm_charts/core/analytics/values.j2 b/kubernetes/helm_charts/core/analytics/values.j2 index f86925ad5c..354dcab3d3 100644 --- a/kubernetes/helm_charts/core/analytics/values.j2 +++ b/kubernetes/helm_charts/core/analytics/values.j2 @@ -8,10 +8,10 @@ env: javaoptions: {{analytics_java_mem_limit|default('-Xmx600m')}} min_heap: {{analytics_min_heap_limit|default('-Xms1g')}} max_heap: {{analytics_max_heap_limit|default('-Xmx2g')}} - cloud_private_account_secret: {{ sunbird_private_storage_account_key }} - cloud_private_account_name: {{ sunbird_private_storage_account_name }} - cloud_public_account_secret: {{ sunbird_public_storage_account_key }} - cloud_public_account_name: {{ sunbird_public_storage_account_name }} + azure_private_account_secret: {{ sunbird_private_storage_account_key }} + azure_private_account_name: {{ sunbird_private_storage_account_name }} + azure_public_account_secret: {{ sunbird_public_storage_account_key }} + azure_public_account_name: {{ sunbird_public_storage_account_name }} replicaCount: {{analytics_replicacount|default(1)}} repository: {{analytics_repository|default('sunbird-analytics-service')}} From 94ec16350315841289c39bca6f703e3f8f6c4d1a Mon Sep 17 00:00:00 2001 From: Santhosh Gandham Date: Thu, 22 Dec 2022 14:54:09 +0530 Subject: [PATCH 151/616] Updated template value for dial ansible group (#3681) --- private_repo/ansible/inventory/dev/KnowledgePlatform/hosts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/private_repo/ansible/inventory/dev/KnowledgePlatform/hosts b/private_repo/ansible/inventory/dev/KnowledgePlatform/hosts index e735ac4c01..c144bc6fa2 100644 --- a/private_repo/ansible/inventory/dev/KnowledgePlatform/hosts +++ b/private_repo/ansible/inventory/dev/KnowledgePlatform/hosts @@ -17,7 +17,7 @@ learning1 redis1 [dial1] -18.3.1.5 +10.0.1.5 [dial:children] dial1 From 6c6eebff0ea49dbf0227d5d4e42f052a2105beba Mon Sep 17 00:00:00 2001 From: SadanandGowda Date: Thu, 22 Dec 2022 15:29:57 +0530 Subject: [PATCH 152/616] csp migration variables update --- kubernetes/helm_charts/core/analytics/values.j2 | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kubernetes/helm_charts/core/analytics/values.j2 b/kubernetes/helm_charts/core/analytics/values.j2 index 354dcab3d3..0a849d99b9 100644 --- a/kubernetes/helm_charts/core/analytics/values.j2 +++ b/kubernetes/helm_charts/core/analytics/values.j2 @@ -8,10 +8,10 @@ env: javaoptions: {{analytics_java_mem_limit|default('-Xmx600m')}} min_heap: {{analytics_min_heap_limit|default('-Xms1g')}} max_heap: {{analytics_max_heap_limit|default('-Xmx2g')}} - azure_private_account_secret: {{ sunbird_private_storage_account_key }} - azure_private_account_name: {{ sunbird_private_storage_account_name }} - azure_public_account_secret: {{ sunbird_public_storage_account_key }} - azure_public_account_name: {{ sunbird_public_storage_account_name }} + azure_private_account_secret: {{ cloud_private_account_secret }} + azure_private_account_name: {{ cloud_private_account_name }} + azure_public_account_secret: {{ cloud_public_account_secret }} + azure_public_account_name: {{ cloud_public_account_name }} replicaCount: {{analytics_replicacount|default(1)}} repository: {{analytics_repository|default('sunbird-analytics-service')}} From afe31e6358d0839145b7d320c0259a6204be7f01 Mon Sep 17 00:00:00 2001 From: SadanandGowda Date: Thu, 22 Dec 2022 15:31:16 +0530 Subject: [PATCH 153/616] csp migration variables update --- .../helm_charts/core/analytics/templates/deployment.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kubernetes/helm_charts/core/analytics/templates/deployment.yaml b/kubernetes/helm_charts/core/analytics/templates/deployment.yaml index b0d7aad44b..0926360f76 100644 --- a/kubernetes/helm_charts/core/analytics/templates/deployment.yaml +++ b/kubernetes/helm_charts/core/analytics/templates/deployment.yaml @@ -36,13 +36,13 @@ spec: - name: MAX_HEAP value: {{ .Values.env.max_heap | quote }} - name: azure_storage_secret - value: {{ .Values.env.cloud_private_account_secret | quote }} + value: {{ .Values.env.azure_private_account_secret | quote }} - name: azure_storage_key - value: {{ .Values.env.cloud_private_account_name | quote }} + value: {{ .Values.env.azure_private_account_name | quote }} - name: public_azure_storage_secret - value: {{ .Values.env.cloud_public_account_secret | quote }} + value: {{ .Values.env.azure_public_account_secret | quote }} - name: public_azure_storage_key - value: {{ .Values.env.cloud_public_account_name | quote }} + value: {{ .Values.env.azure_public_account_name | quote }} - name: _JAVA_OPTIONS value: -Dlog4j2.formatMsgNoLookups=true envFrom: From c7db90c106a7f82540d1cf211d42e419d6d10a75 Mon Sep 17 00:00:00 2001 From: Sadanand <100120230+SadanandGowda@users.noreply.github.com> Date: Thu, 22 Dec 2022 16:41:15 +0530 Subject: [PATCH 154/616] csp migration variables update (#3671) --- kubernetes/helm_charts/core/analytics/values.j2 | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kubernetes/helm_charts/core/analytics/values.j2 b/kubernetes/helm_charts/core/analytics/values.j2 index 354dcab3d3..0a849d99b9 100644 --- a/kubernetes/helm_charts/core/analytics/values.j2 +++ b/kubernetes/helm_charts/core/analytics/values.j2 @@ -8,10 +8,10 @@ env: javaoptions: {{analytics_java_mem_limit|default('-Xmx600m')}} min_heap: {{analytics_min_heap_limit|default('-Xms1g')}} max_heap: {{analytics_max_heap_limit|default('-Xmx2g')}} - azure_private_account_secret: {{ sunbird_private_storage_account_key }} - azure_private_account_name: {{ sunbird_private_storage_account_name }} - azure_public_account_secret: {{ sunbird_public_storage_account_key }} - azure_public_account_name: {{ sunbird_public_storage_account_name }} + azure_private_account_secret: {{ cloud_private_account_secret }} + azure_private_account_name: {{ cloud_private_account_name }} + azure_public_account_secret: {{ cloud_public_account_secret }} + azure_public_account_name: {{ cloud_public_account_name }} replicaCount: {{analytics_replicacount|default(1)}} repository: {{analytics_repository|default('sunbird-analytics-service')}} From 1195db50267581f35163107d7b6c5bea1433eed2 Mon Sep 17 00:00:00 2001 From: SadanandGowda Date: Thu, 22 Dec 2022 16:52:39 +0530 Subject: [PATCH 155/616] csp migration variables update --- kubernetes/helm_charts/core/analytics/values.j2 | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kubernetes/helm_charts/core/analytics/values.j2 b/kubernetes/helm_charts/core/analytics/values.j2 index 0a849d99b9..c3cd3b46f7 100644 --- a/kubernetes/helm_charts/core/analytics/values.j2 +++ b/kubernetes/helm_charts/core/analytics/values.j2 @@ -8,10 +8,10 @@ env: javaoptions: {{analytics_java_mem_limit|default('-Xmx600m')}} min_heap: {{analytics_min_heap_limit|default('-Xms1g')}} max_heap: {{analytics_max_heap_limit|default('-Xmx2g')}} - azure_private_account_secret: {{ cloud_private_account_secret }} - azure_private_account_name: {{ cloud_private_account_name }} - azure_public_account_secret: {{ cloud_public_account_secret }} - azure_public_account_name: {{ cloud_public_account_name }} + azure_private_account_secret: {{ cloud_private_storage_secret }} + azure_private_account_name: {{ cloud_private_storage_accountname }} + azure_public_account_secret: {{ cloud_public_storage_secret }} + azure_public_account_name: {{ cloud_public_storage_accountname }} replicaCount: {{analytics_replicacount|default(1)}} repository: {{analytics_repository|default('sunbird-analytics-service')}} From 35e9deae2ad28fa4334652b079c19588098f2cbc Mon Sep 17 00:00:00 2001 From: NIKHIL VARMA M <63706239+nikhilvarma940@users.noreply.github.com> Date: Fri, 23 Dec 2022 16:55:48 +0530 Subject: [PATCH 156/616] Ansible-postgres-patroni cluster (#3684) * Ansible-postgres-patroni cluster * README.md updated --- ansible/postgresql-patroni-cluster.yaml | 14 + ansible/roles/ansible-etcd/README.md | 100 +++++ ansible/roles/ansible-etcd/defaults/main.yml | 14 + ansible/roles/ansible-etcd/handlers/main.yml | 12 + ansible/roles/ansible-etcd/meta/main.yml | 52 +++ ansible/roles/ansible-etcd/tasks/main.yml | 19 + ansible/roles/ansible-etcd/templates/etcd.j2 | 403 ++++++++++++++++++ ansible/roles/ansible-etcd/vars/main.yml | 2 + ansible/roles/ansible-haproxy/README.md | 100 +++++ .../roles/ansible-haproxy/defaults/main.yml | 2 + .../roles/ansible-haproxy/handlers/main.yml | 12 + ansible/roles/ansible-haproxy/meta/main.yml | 52 +++ ansible/roles/ansible-haproxy/tasks/main.yml | 19 + .../ansible-haproxy/templates/haproxy.cfg.j2 | 26 ++ ansible/roles/ansible-haproxy/vars/main.yml | 2 + .../roles/ansible-postgres_patroni/README.md | 100 +++++ .../defaults/main.yml | 16 + .../handlers/main.yml | 18 + .../ansible-postgres_patroni/meta/main.yml | 52 +++ .../ansible-postgres_patroni/tasks/main.yml | 105 +++++ .../templates/patroni.service.j2 | 17 + .../templates/patroni.yaml.j2 | 58 +++ .../ansible-postgres_patroni/vars/main.yml | 2 + 23 files changed, 1197 insertions(+) create mode 100644 ansible/postgresql-patroni-cluster.yaml create mode 100644 ansible/roles/ansible-etcd/README.md create mode 100644 ansible/roles/ansible-etcd/defaults/main.yml create mode 100644 ansible/roles/ansible-etcd/handlers/main.yml create mode 100644 ansible/roles/ansible-etcd/meta/main.yml create mode 100644 ansible/roles/ansible-etcd/tasks/main.yml create mode 100644 ansible/roles/ansible-etcd/templates/etcd.j2 create mode 100644 ansible/roles/ansible-etcd/vars/main.yml create mode 100644 ansible/roles/ansible-haproxy/README.md create mode 100644 ansible/roles/ansible-haproxy/defaults/main.yml create mode 100644 ansible/roles/ansible-haproxy/handlers/main.yml create mode 100644 ansible/roles/ansible-haproxy/meta/main.yml create mode 100644 ansible/roles/ansible-haproxy/tasks/main.yml create mode 100644 ansible/roles/ansible-haproxy/templates/haproxy.cfg.j2 create mode 100644 ansible/roles/ansible-haproxy/vars/main.yml create mode 100644 ansible/roles/ansible-postgres_patroni/README.md create mode 100644 ansible/roles/ansible-postgres_patroni/defaults/main.yml create mode 100644 ansible/roles/ansible-postgres_patroni/handlers/main.yml create mode 100644 ansible/roles/ansible-postgres_patroni/meta/main.yml create mode 100644 ansible/roles/ansible-postgres_patroni/tasks/main.yml create mode 100644 ansible/roles/ansible-postgres_patroni/templates/patroni.service.j2 create mode 100644 ansible/roles/ansible-postgres_patroni/templates/patroni.yaml.j2 create mode 100644 ansible/roles/ansible-postgres_patroni/vars/main.yml diff --git a/ansible/postgresql-patroni-cluster.yaml b/ansible/postgresql-patroni-cluster.yaml new file mode 100644 index 0000000000..7fcae245a4 --- /dev/null +++ b/ansible/postgresql-patroni-cluster.yaml @@ -0,0 +1,14 @@ +- hosts: etcd + become: yes + roles: + - ansible-etcd + +- hosts: postgresql + become: yes + roles: + - ansible-postgres_patroni + +- hosts: haproxy + become: yes + roles: + - ansible-haproxy diff --git a/ansible/roles/ansible-etcd/README.md b/ansible/roles/ansible-etcd/README.md new file mode 100644 index 0000000000..15c49e336a --- /dev/null +++ b/ansible/roles/ansible-etcd/README.md @@ -0,0 +1,100 @@ +Role Name +========= +``` +postgresql-cluster-ansible +``` +Requirements +------------ +``` +1. comment or uncomment the properties in templates of the roles available as per the requirement. +2. provide the variables where ever required. +``` +Role Variables +-------------- +``` +In hosts files: +1. etcd_ip : +2. postgresql_origin: +3. postgresql_1: +4. postgresql_2: +5. postgresql_3: + + +etcd Role variables: +postgres_patroni_etcd_name: "postgres-etcd" # cluster name +postgres_patroni_etcd_initial_cluster: "{{ etcd_name }}=http://{{ etcd_ip }}:2380" # initial cluster +postgres_patroni_etcd_initial_cluster_state: "postgres" # initial cluster state +postgres_patroni_etcd_initial_cluster_token: "etcd-cluster-postgres" # initial cluster token +postgres_patroni_etcd_initial_advertise_peer_urls: "http://{{ etcd_ip }}:2380" # initial advertise peer urls +postgres_patroni_etcd_listen_peer_urls: "http://{{ etcd_ip }}:2380" # listen peer urls +postgres_patroni_etcd_listen_client_urls: "http://{{ etcd_ip }}:2379,http://127.0.0.1:2379" # listen client urls +postgres_patroni_etcd_advertise_client_urls: "http://{{ etcd_ip }}:2379" # advertise client urls + +Ansible-postgres_patroni role Variables: +#patroni .yaml config +postgres_cluster_name: postgresql-prod # Cluster name + +# users admin password +postgres_patroni_admin_password: admin # Admin Password + +#Authentication +# Replication +postgres_patroni_replication_username: replicator # Replication Username +postgres_patroni_replication_password: password # Replication password + +#SuperUser +postgres_patroni_superuser_username: postgres # Superuser username +postgres_patroni_superuser_password: password # Superuser Password +``` +Architecture +------------ +![Untitled Diagram (1)](https://user-images.githubusercontent.com/63706239/203470986-f8ec3d56-a6d2-4678-b594-dc20a29ec972.jpg) + +``` +Description: +Ansible postgres cluter role is used to setup a postgres cluster with 1 Primary and 2 replicas where we are using the patroni as HA solution for postgres cluster.Patroni can be configured to handle tasks like replication, backups and restorations.We are also using HAProxy load Balancer to route the traffic and Etcd is a fault-tolerant, distributed key-value store that is used to store the state of the Postgres cluster. Via Patroni, all of the Postgres nodes make use of etcd to keep the Postgres cluster up and running. + +Users and applications can access the postgres server using Haproxy IP and Port defined in the haproxy configuration rules. +``` + +Inventory hosts file as shown Below +----------------------------------- +``` +[etcd] +192.168.245.129 etcd_ip=192.168.245.129 ansible_ssh_user=ubuntu + +[postgresql] +192.168.245.129 postgresql_origin=192.168.245.129 postgresql_1=192.168.245.129 postgresql_2=192.168.245.130 postgresql_3=192.168.245.131 etcd_ip=192.168.245.129 ansible_ssh_user=ubuntu + postgresql_origin= postgresql_1= postgresql_2= postgresql_3= etcd_ip=192.168.245.129 ansible_ssh_user=ubuntu + postgresql_origin= postgresql_1= postgresql_2= postgresql_3= etcd_ip=192.168.245.129 ansible_ssh_user=ubuntu + +[haproxy] +192.168.245.129 postgresql_1=192.168.245.129 postgresql_2=192.168.245.130 postgresql_3=192.168.245.131 ansible_ssh_user=ubuntu +``` + +License +------- +``` +BSD +``` +Author Information +------------------ +``` +Nikhil Varma + +Senior DevOps Engineer +``` + +postgres cluster setup using ansible +----------------------------------- + +``` +# Command to run Ansibe-postgresql role + +$ ansible-playbook -i inventory/hosts main.yaml -K --ask-pass + +# Commands to run postgresql roles by using the tags and skipping the tags + +$ ansible-playbook -i inventory/hosts main.yaml -K --ask-pass --tags="" +$ ansible-playbook -i inventory/hosts main.yaml -K --ask-pass --skip-tags="" +``` diff --git a/ansible/roles/ansible-etcd/defaults/main.yml b/ansible/roles/ansible-etcd/defaults/main.yml new file mode 100644 index 0000000000..0478f26652 --- /dev/null +++ b/ansible/roles/ansible-etcd/defaults/main.yml @@ -0,0 +1,14 @@ +--- +# defaults file for ansible-etcd + + +# etcd cluster variables +postgres_patroni_etcd_name: "postgres-etcd" +postgres_patroni_etcd_initial_cluster: "{{ etcd_name }}=http://{{ etcd_ip }}:2380" +postgres_patroni_etcd_initial_cluster_state: "postgres" +postgres_patroni_etcd_initial_cluster_token: "etcd-cluster-postgres" +postgres_patroni_etcd_initial_advertise_peer_urls: "http://{{ etcd_ip }}:2380" +postgres_patroni_etcd_listen_peer_urls: "http://{{ etcd_ip }}:2380" +postgres_patroni_etcd_listen_client_urls: "http://{{ etcd_ip }}:2379,http://127.0.0.1:2379" +postgres_patroni_etcd_advertise_client_urls: "http://{{ etcd_ip }}:2379" +#etcd_data_dir: \ No newline at end of file diff --git a/ansible/roles/ansible-etcd/handlers/main.yml b/ansible/roles/ansible-etcd/handlers/main.yml new file mode 100644 index 0000000000..33d54607a2 --- /dev/null +++ b/ansible/roles/ansible-etcd/handlers/main.yml @@ -0,0 +1,12 @@ +--- +# handlers file for ansible-etcd +- name: Restart etcd systemd + systemd: + name: etcd.service + state: restarted + daemon_reload: yes + +- name: Restart etcd service + systemd: + name: etcd.service + state: restarted \ No newline at end of file diff --git a/ansible/roles/ansible-etcd/meta/main.yml b/ansible/roles/ansible-etcd/meta/main.yml new file mode 100644 index 0000000000..6b5d1fd295 --- /dev/null +++ b/ansible/roles/ansible-etcd/meta/main.yml @@ -0,0 +1,52 @@ +galaxy_info: + author: Nikhil Varma + description: Ansible-etcd for distributed key store for postgresql cluster +# company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.1 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. \ No newline at end of file diff --git a/ansible/roles/ansible-etcd/tasks/main.yml b/ansible/roles/ansible-etcd/tasks/main.yml new file mode 100644 index 0000000000..5e6d329bc1 --- /dev/null +++ b/ansible/roles/ansible-etcd/tasks/main.yml @@ -0,0 +1,19 @@ +--- +# tasks file for ansible-etcd +- name: Install etcd after updating apt + apt: + name: + - etcd + state: present + update_cache: yes + tags: + - etcd Install + +- name: Template configuration file to etcd + template: + src: etcd.j2 + dest: '/etc/default/etcd' + notify: + - Restart etcd service + tags: + - Restart etcd \ No newline at end of file diff --git a/ansible/roles/ansible-etcd/templates/etcd.j2 b/ansible/roles/ansible-etcd/templates/etcd.j2 new file mode 100644 index 0000000000..09e6ad6450 --- /dev/null +++ b/ansible/roles/ansible-etcd/templates/etcd.j2 @@ -0,0 +1,403 @@ +## etcd(1) daemon options +## See "/usr/share/doc/etcd-server/op-guide/configuration.md.gz" + +### Member flags + +##### --name +## Human-readable name for this member. +## This value is referenced as this node's own entries listed in the +## `--initial-cluster` flag (e.g., `default=http://localhost:2380`). This +## needs to match the key used in the flag if using static bootstrapping. When +## using discovery, each member must have a unique name. `Hostname` or +## `machine-id` can be a good choice. +## default: "default" +#ETCD_NAME="postgres-etcd" + +##### --data-dir +## Path to the data directory. +## default: "${name}.etcd" +# ETCD_DATA_DIR="/var/lib/etcd/default" + +##### --wal-dir +## Path to the dedicated wal directory. If this flag is set, etcd will write +## the WAL files to the walDir rather than the dataDir. This allows a +## dedicated disk to be used, and helps avoid io competition between logging +## and other IO operations. +## default: "" +# ETCD_WAL_DIR + +##### --snapshot-count +## Number of committed transactions to trigger a snapshot to disk. +## default: "100000" +# ETCD_SNAPSHOT_COUNT="100000" + +##### --heartbeat-interval +## Time (in milliseconds) of a heartbeat interval. +## default: "100" +# ETCD_HEARTBEAT_INTERVAL="100" + +##### --election-timeout +## Time (in milliseconds) for an election to timeout. See +## /usr/share/doc/etcd-server/tuning.md.gz for details. +## default: "1000" +# ETCD_ELECTION_TIMEOUT="1000" + +##### --listen-peer-urls +## List of URLs to listen on for peer traffic. This flag tells the etcd to +## accept incoming requests from its peers on the specified scheme://IP:port +## combinations. Scheme can be either http or https.If 0.0.0.0 is specified as +## the IP, etcd listens to the given port on all interfaces. If an IP address is +## given as well as a port, etcd will listen on the given port and interface. +## Multiple URLs may be used to specify a number of addresses and ports to listen +## on. The etcd will respond to requests from any of the listed addresses and +## ports. +## default: "http://localhost:2380" +## example: "http://10.0.0.1:2380" +## invalid example: "http://example.com:2380" (domain name is invalid for binding) +#ETCD_LISTEN_PEER_URLS="http://172.51.1.29:2380" + +##### --listen-client-urls +## List of URLs to listen on for client traffic. This flag tells the etcd to +## accept incoming requests from the clients on the specified scheme://IP:port +## combinations. Scheme can be either http or https. If 0.0.0.0 is specified as +## the IP, etcd listens to the given port on all interfaces. If an IP address is +## given as well as a port, etcd will listen on the given port and interface. +## Multiple URLs may be used to specify a number of addresses and ports to listen +## on. The etcd will respond to requests from any of the listed addresses and +## ports. +## default: "http://localhost:2379" +## example: "http://10.0.0.1:2379" +## invalid example: "http://example.com:2379" (domain name is invalid for binding) +#ETCD_LISTEN_CLIENT_URLS="http://172.51.1.29:2379,http://127.0.0.1:2379" + +##### --max-snapshots +## Maximum number of snapshot files to retain (0 is unlimited) +## The default for users on Windows is unlimited, and manual purging down to 5 +## (or some preference for safety) is recommended. +## default: 5 +# ETCD_MAX_SNAPSHOTS="5" + +##### --max-wals +## Maximum number of wal files to retain (0 is unlimited) +## The default for users on Windows is unlimited, and manual purging down to 5 +## (or some preference for safety) is recommended. +## default: 5 +# ETCD_MAX_WALS="5" + +##### --cors +## Comma-separated white list of origins for CORS (cross-origin resource +## sharing). +## default: none +# ETCD_CORS + +#### --quota-backend-bytes +## Raise alarms when backend size exceeds the given quota (0 defaults to low +## space quota). +## default: 0 +# ETCD_QUOTA_BACKEND_BYTES="0" + +#### --backend-batch-limit +## BackendBatchLimit is the maximum operations before commit the backend +## transaction. +## default: 0 +# ETCD_BACKEND_BATCH_LIMIT="0" + +#### --backend-batch-interval +## BackendBatchInterval is the maximum time before commit the backend +## transaction. +## default: 0 +# ETCD_BACKEND_BATCH_INTERVAL="0" + +#### --max-txn-ops +## Maximum number of operations permitted in a transaction. +## default: 128 +# ETCD_MAX_TXN_OPS="128" + +#### --max-request-bytes +## Maximum client request size in bytes the server will accept. +## default: 1572864 +# ETCD_MAX_REQUEST_BYTES="1572864" + +#### --grpc-keepalive-min-time +## Minimum duration interval that a client should wait before pinging server. +## default: 5s +# ETCD_GRPC_KEEPALIVE_MIN_TIME="5" + +#### --grpc-keepalive-interval +## Frequency duration of server-to-client ping to check if a connection is +## alive (0 to disable). +## default: 2h +# ETCD_GRPC_KEEPALIVE_INTERVAL="2h" + +#### --grpc-keepalive-timeout +## Additional duration of wait before closing a non-responsive connection +## (0 to disable). +## default: 20s +# ETCD_GRPC_KEEPALIVE_TIMEOUT="20s" + + +### Clustering flags + +# `--initial` prefix flags are used in bootstrapping (static bootstrap, +# discovery-service bootstrap or runtime reconfiguration) a new member, and +# ignored when restarting an existing member. + +# `--discovery` prefix flags need to be set when using discovery service. + +##### --initial-advertise-peer-urls + +## List of this member's peer URLs to advertise to the rest of the cluster. +## These addresses are used for communicating etcd data around the cluster. At +## least one must be routable to all cluster members. These URLs can contain +## domain names. +## default: "http://localhost:2380" +## example: "http://example.com:2380, http://10.0.0.1:2380" +#ETCD_INITIAL_ADVERTISE_PEER_URLS="http://172.51.1.29:2380" + +##### --initial-cluster +## Initial cluster configuration for bootstrapping. +## The key is the value of the `--name` flag for each node provided. The +## default uses `default` for the key because this is the default for the +## `--name` flag. +## default: "default=http://localhost:2380" +#ETCD_INITIAL_CLUSTER="postgres-etcd=http://172.51.1.29:2380" + +##### --initial-cluster-state +## Initial cluster state ("new" or "existing"). Set to `new` for all members +## present during initial static or DNS bootstrapping. If this option is set to +## `existing`, etcd will attempt to join the existing cluster. If the wrong value +## is set, etcd will attempt to start but fail safely. +## default: "new" +# ETCD_INITIAL_CLUSTER_STATE="new" + +##### --initial-cluster-token +## Initial cluster token for the etcd cluster during bootstrap. +## default: "etcd-cluster" +#ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" + +##### --advertise-client-urls +## List of this member's client URLs to advertise to the rest of the cluster. +## These URLs can contain domain names. +## Be careful if advertising URLs such as http://localhost:2379 from a cluster +## member and are using the proxy feature of etcd. This will cause loops, because +## the proxy will be forwarding requests to itself until its resources (memory, +## file descriptors) are eventually depleted. +## default: "http://localhost:2379" +## example: "http://example.com:2379, http://10.0.0.1:2379" +#ETCD_ADVERTISE_CLIENT_URLS="http://172.51.1.29:2379" + +##### --discovery +## Discovery URL used to bootstrap the cluster. +## default: none +# ETCD_DISCOVERY + +##### --discovery-srv +## DNS srv domain used to bootstrap the cluster. +## default: none +# ETCD_DISCOVERY_SRV + +##### --discovery-fallback +## Expected behavior ("exit" or "proxy") when discovery services fails. "proxy" +## supports v2 API only. +## default: "proxy" +# ETCD_DISCOVERY_FALLBACK="proxy" + +##### --discovery-proxy +## HTTP proxy to use for traffic to discovery service. +## default: none +# ETCD_DISCOVERY_PROXY + +##### --strict-reconfig-check +## Reject reconfiguration requests that would cause quorum loss. +## default: false +# ETCD_STRICT_RECONFIG_CHECK + +##### --auto-compaction-retention +## Auto compaction retention for mvcc key value store in hour. 0 means disable +## auto compaction. +## default: 0 +# ETCD_AUTO_COMPACTION_RETENTION="0" + +##### --enable-v2 +## Accept etcd V2 client requests +## default: true +# ETCD_ENABLE_V2="true" + + +### Proxy flags + +# `--proxy` prefix flags configures etcd to run in proxy mode. "proxy" supports +# v2 API only. + +##### --proxy +## Proxy mode setting ("off", "readonly" or "on"). +## default: "off" +# ETCD_PROXY="off" + +##### --proxy-failure-wait +## Time (in milliseconds) an endpoint will be held in a failed state before +## being reconsidered for proxied requests. +## default: 5000 +# ETCD_PROXY_FAILURE_WAIT="5000" + +##### --proxy-refresh-interval +## Time (in milliseconds) of the endpoints refresh interval. +## default: 30000 +# ETCD_PROXY_REFRESH_INTERVAL="30000" + +##### --proxy-dial-timeout +## Time (in milliseconds) for a dial to timeout or 0 to disable the timeout +## default: 1000 +# ETCD_PROXY_DIAL_TIMEOUT="1000" + +##### --proxy-write-timeout +## Time (in milliseconds) for a write to timeout or 0 to disable the timeout. +## default: 5000 +# ETCD_PROXY_WRITE_TIMEOUT="5000" + +##### --proxy-read-timeout +## Time (in milliseconds) for a read to timeout or 0 to disable the timeout. +## Don't change this value if using watches because use long polling requests. +## default: 0 +# ETCD_PROXY_READ_TIMEOUT="0" + + +### Security flags + +# The security flags help to build a secure etcd cluster. + +##### --ca-file (**DEPRECATED**) +## Path to the client server TLS CA file. `--ca-file ca.crt` could be replaced +## by `--trusted-ca-file ca.crt --client-cert-auth` and etcd will perform the +## same. +## default: none +# ETCD_CA_FILE + +##### --cert-file +## Path to the client server TLS cert file. +## default: none +# ETCD_CERT_FILE + +##### --key-file +## Path to the client server TLS key file. +## default: none +# ETCD_KEY_FILE + +##### --client-cert-auth +## Enable client cert authentication. +## CN authentication is not supported by gRPC-gateway. +## default: false +# ETCD_CLIENT_CERT_AUTH + +#### --client-crl-file +## Path to the client certificate revocation list file. +## default: "" +# ETCD_CLIENT_CRL_FILE + +##### --trusted-ca-file +## Path to the client server TLS trusted CA key file. +## default: none +# ETCD_TRUSTED_CA_FILE + +##### --auto-tls +## Client TLS using generated certificates +## default: false +# ETCD_AUTO_TLS + +##### --peer-ca-file (**DEPRECATED**) +## Path to the peer server TLS CA file. `--peer-ca-file ca.crt` could be +## replaced by `--peer-trusted-ca-file ca.crt --peer-client-cert-auth` and etcd +## will perform the same. +## default: none +# ETCD_PEER_CA_FILE + +##### --peer-cert-file +## Path to the peer server TLS cert file. +## default: none +# ETCD_PEER_CERT_FILE + +##### --peer-key-file +## Path to the peer server TLS key file. +## default: none +# ETCD_PEER_KEY_FILE + +##### --peer-client-cert-auth +## Enable peer client cert authentication. +## default: false +# ETCD_PEER_CLIENT_CERT_AUTH + +#### --peer-crl-file +## Path to the peer certificate revocation list file. +## default: "" +# ETCD_PEER_CRL_FILE + +##### --peer-trusted-ca-file +## Path to the peer server TLS trusted CA file. +## default: none +# ETCD_PEER_TRUSTED_CA_FILE + +##### --peer-auto-tls +## Peer TLS using generated certificates +## default: false +# ETCD_PEER_AUTO_TLS + +#### --peer-cert-allowed-cn +## Allowed CommonName for inter peer authentication. +## default: none +# ETCD_PEER_CERT_ALLOWED_CN + +#### --cipher-suites +## Comma-separated list of supported TLS cipher suites between server/client and +## peers. +## default: "" +# ETCD_CIPHER_SUITES + +#### --experimental-peer-skip-client-san-verification +## Skip verification of SAN field in client certificate for peer connections. +## default: false +#+ ETCD_EXPERIMENTAL_PEER_SKIP_CLIENT_SAN_VERIFICATION + + +### Logging flags + +#### --log-outputs +## Specify 'stdout' or 'stderr' to skip journald logging even when running +## under systemd, or list of comma separated output targets. +## default: default +# ETCD_LOG_OUTPUTS + +##### --debug +## Drop the default log level to DEBUG for all subpackages. +## default: false (INFO for all packages) +# ETCD_DEBUG + +##### --log-package-levels +## Set individual etcd subpackages to specific log levels. An example being +## `etcdserver=WARNING,security=DEBUG` +## default: none (INFO for all packages) +# ETCD_LOG_PACKAGE_LEVELS + + +### Unsafe flags + +# Please be CAUTIOUS when using unsafe flags because it will break the guarantees given by the consensus protocol. +# For example, it may panic if other members in the cluster are still alive. +# Follow the instructions when using these flags. + +##### --force-new-cluster +## Force to create a new one-member cluster. It commits configuration changes +## forcing to remove all existing members in the cluster and add itself. It needs +## to be set to restore a backup. +## default: false +# ETCD_FORCE_NEW_CLUSTER +# +# +ETCD_INITIAL_CLUSTER="{{ postgres_patroni_etcd_initial_cluster }}" +ETCD_INITIAL_CLUSTER_STATE="{{ postgres_patroni_etcd_initial_cluster_state }}" +ETCD_INITIAL_CLUSTER_TOKEN="{{ postgres_patroni_etcd_initial_cluster_token }}" +ETCD_INITIAL_ADVERTISE_PEER_URLS="{{ postgres_patroni_etcd_initial_advertise_peer_urls }}" +#ETCD_DATA_DIR="/var/etcd" +ETCD_LISTEN_PEER_URLS="{{ postgres_patroni_etcd_listen_peer_urls }}" +ETCD_LISTEN_CLIENT_URLS="{{ postgres_patroni_etcd_listen_client_urls }}" +ETCD_ADVERTISE_CLIENT_URLS="{{ postgres_patroni_etcd_advertise_client_urls }}" +ETCD_NAME="{{ postgres_patroni_etcd_name }}" \ No newline at end of file diff --git a/ansible/roles/ansible-etcd/vars/main.yml b/ansible/roles/ansible-etcd/vars/main.yml new file mode 100644 index 0000000000..411544ecd6 --- /dev/null +++ b/ansible/roles/ansible-etcd/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for ansible-etcd \ No newline at end of file diff --git a/ansible/roles/ansible-haproxy/README.md b/ansible/roles/ansible-haproxy/README.md new file mode 100644 index 0000000000..15c49e336a --- /dev/null +++ b/ansible/roles/ansible-haproxy/README.md @@ -0,0 +1,100 @@ +Role Name +========= +``` +postgresql-cluster-ansible +``` +Requirements +------------ +``` +1. comment or uncomment the properties in templates of the roles available as per the requirement. +2. provide the variables where ever required. +``` +Role Variables +-------------- +``` +In hosts files: +1. etcd_ip : +2. postgresql_origin: +3. postgresql_1: +4. postgresql_2: +5. postgresql_3: + + +etcd Role variables: +postgres_patroni_etcd_name: "postgres-etcd" # cluster name +postgres_patroni_etcd_initial_cluster: "{{ etcd_name }}=http://{{ etcd_ip }}:2380" # initial cluster +postgres_patroni_etcd_initial_cluster_state: "postgres" # initial cluster state +postgres_patroni_etcd_initial_cluster_token: "etcd-cluster-postgres" # initial cluster token +postgres_patroni_etcd_initial_advertise_peer_urls: "http://{{ etcd_ip }}:2380" # initial advertise peer urls +postgres_patroni_etcd_listen_peer_urls: "http://{{ etcd_ip }}:2380" # listen peer urls +postgres_patroni_etcd_listen_client_urls: "http://{{ etcd_ip }}:2379,http://127.0.0.1:2379" # listen client urls +postgres_patroni_etcd_advertise_client_urls: "http://{{ etcd_ip }}:2379" # advertise client urls + +Ansible-postgres_patroni role Variables: +#patroni .yaml config +postgres_cluster_name: postgresql-prod # Cluster name + +# users admin password +postgres_patroni_admin_password: admin # Admin Password + +#Authentication +# Replication +postgres_patroni_replication_username: replicator # Replication Username +postgres_patroni_replication_password: password # Replication password + +#SuperUser +postgres_patroni_superuser_username: postgres # Superuser username +postgres_patroni_superuser_password: password # Superuser Password +``` +Architecture +------------ +![Untitled Diagram (1)](https://user-images.githubusercontent.com/63706239/203470986-f8ec3d56-a6d2-4678-b594-dc20a29ec972.jpg) + +``` +Description: +Ansible postgres cluter role is used to setup a postgres cluster with 1 Primary and 2 replicas where we are using the patroni as HA solution for postgres cluster.Patroni can be configured to handle tasks like replication, backups and restorations.We are also using HAProxy load Balancer to route the traffic and Etcd is a fault-tolerant, distributed key-value store that is used to store the state of the Postgres cluster. Via Patroni, all of the Postgres nodes make use of etcd to keep the Postgres cluster up and running. + +Users and applications can access the postgres server using Haproxy IP and Port defined in the haproxy configuration rules. +``` + +Inventory hosts file as shown Below +----------------------------------- +``` +[etcd] +192.168.245.129 etcd_ip=192.168.245.129 ansible_ssh_user=ubuntu + +[postgresql] +192.168.245.129 postgresql_origin=192.168.245.129 postgresql_1=192.168.245.129 postgresql_2=192.168.245.130 postgresql_3=192.168.245.131 etcd_ip=192.168.245.129 ansible_ssh_user=ubuntu + postgresql_origin= postgresql_1= postgresql_2= postgresql_3= etcd_ip=192.168.245.129 ansible_ssh_user=ubuntu + postgresql_origin= postgresql_1= postgresql_2= postgresql_3= etcd_ip=192.168.245.129 ansible_ssh_user=ubuntu + +[haproxy] +192.168.245.129 postgresql_1=192.168.245.129 postgresql_2=192.168.245.130 postgresql_3=192.168.245.131 ansible_ssh_user=ubuntu +``` + +License +------- +``` +BSD +``` +Author Information +------------------ +``` +Nikhil Varma + +Senior DevOps Engineer +``` + +postgres cluster setup using ansible +----------------------------------- + +``` +# Command to run Ansibe-postgresql role + +$ ansible-playbook -i inventory/hosts main.yaml -K --ask-pass + +# Commands to run postgresql roles by using the tags and skipping the tags + +$ ansible-playbook -i inventory/hosts main.yaml -K --ask-pass --tags="" +$ ansible-playbook -i inventory/hosts main.yaml -K --ask-pass --skip-tags="" +``` diff --git a/ansible/roles/ansible-haproxy/defaults/main.yml b/ansible/roles/ansible-haproxy/defaults/main.yml new file mode 100644 index 0000000000..2b616ee2c8 --- /dev/null +++ b/ansible/roles/ansible-haproxy/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for ansible-haproxy diff --git a/ansible/roles/ansible-haproxy/handlers/main.yml b/ansible/roles/ansible-haproxy/handlers/main.yml new file mode 100644 index 0000000000..0f55472f28 --- /dev/null +++ b/ansible/roles/ansible-haproxy/handlers/main.yml @@ -0,0 +1,12 @@ +--- +# handlers file for ansible-haproxy +- name: Restart haproxy systemd + systemd: + name: haproxy.service + state: restarted + daemon_reload: yes + +- name: Restart haproxy service + systemd: + name: haproxy.service + state: restarted \ No newline at end of file diff --git a/ansible/roles/ansible-haproxy/meta/main.yml b/ansible/roles/ansible-haproxy/meta/main.yml new file mode 100644 index 0000000000..bb6de485a4 --- /dev/null +++ b/ansible/roles/ansible-haproxy/meta/main.yml @@ -0,0 +1,52 @@ +galaxy_info: + author: Nikhil Varma + description: Ansible HAProxy for postgresql cluster + #company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.1 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. diff --git a/ansible/roles/ansible-haproxy/tasks/main.yml b/ansible/roles/ansible-haproxy/tasks/main.yml new file mode 100644 index 0000000000..2c01238c57 --- /dev/null +++ b/ansible/roles/ansible-haproxy/tasks/main.yml @@ -0,0 +1,19 @@ +--- +# tasks file for ansible-haproxy +- name: Install HaProxy after updating apt + apt: + name: + - haproxy + state: present + update_cache: yes + tags: + - HaProxy Install + +- name: Template configuration file to haproxy.cfg + template: + src: haproxy.cfg.j2 + dest: '/etc/haproxy/haproxy.cfg' + notify: + - Restart haproxy service + tags: + - Restart haproxy \ No newline at end of file diff --git a/ansible/roles/ansible-haproxy/templates/haproxy.cfg.j2 b/ansible/roles/ansible-haproxy/templates/haproxy.cfg.j2 new file mode 100644 index 0000000000..0a85d2b27b --- /dev/null +++ b/ansible/roles/ansible-haproxy/templates/haproxy.cfg.j2 @@ -0,0 +1,26 @@ +global + maxconn 100 + +defaults + log global + mode tcp + retries 2 + timeout client 30m + timeout connect 4s + timeout server 30m + timeout check 5s + +listen stats + mode http + bind *:7000 + stats enable + stats uri / + +listen postgres + bind *:5000 + option httpchk + http-check expect status 200 + default-server inter 3s fall 3 rise 2 on-marked-down shutdown-sessions + server postgresql_{{ postgresql_1 }}_5432 {{ postgresql_1 }}:5432 maxconn 100 check port 8008 + server postgresql_{{ postgresql_2 }}_5432 {{ postgresql_2 }}:5432 maxconn 100 check port 8008 + server postgresql_{{ postgresql_3 }}_5432 {{ postgresql_3 }}:5432 maxconn 100 check port 8008 \ No newline at end of file diff --git a/ansible/roles/ansible-haproxy/vars/main.yml b/ansible/roles/ansible-haproxy/vars/main.yml new file mode 100644 index 0000000000..2070e21bba --- /dev/null +++ b/ansible/roles/ansible-haproxy/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for ansible-haproxy diff --git a/ansible/roles/ansible-postgres_patroni/README.md b/ansible/roles/ansible-postgres_patroni/README.md new file mode 100644 index 0000000000..37f2988184 --- /dev/null +++ b/ansible/roles/ansible-postgres_patroni/README.md @@ -0,0 +1,100 @@ +Role Name +========= +``` +postgresql-cluster-ansible +``` +Requirements +------------ +``` +1. comment or uncomment the properties in templates of the roles available as per the requirement. +2. provide the variables where ever required. +``` +Role Variables +-------------- +``` +In hosts files: +1. etcd_ip : +2. postgresql_origin: +3. postgresql_1: +4. postgresql_2: +5. postgresql_3: + + +etcd Role variables: +postgres_patroni_etcd_name: "postgres-etcd" # cluster name +postgres_patroni_etcd_initial_cluster: "{{ etcd_name }}=http://{{ etcd_ip }}:2380" # initial cluster +postgres_patroni_etcd_initial_cluster_state: "postgres" # initial cluster state +postgres_patroni_etcd_initial_cluster_token: "etcd-cluster-postgres" # initial cluster token +postgres_patroni_etcd_initial_advertise_peer_urls: "http://{{ etcd_ip }}:2380" # initial advertise peer urls +postgres_patroni_etcd_listen_peer_urls: "http://{{ etcd_ip }}:2380" # listen peer urls +postgres_patroni_etcd_listen_client_urls: "http://{{ etcd_ip }}:2379,http://127.0.0.1:2379" # listen client urls +postgres_patroni_etcd_advertise_client_urls: "http://{{ etcd_ip }}:2379" # advertise client urls + +Ansible-postgres_patroni role Variables: +#patroni .yaml config +Postgres_cluster_name: postgresql-prod # Cluster name + +# users admin password +postgres_patroni_admin_password: admin # Admin Password + +#Authentication +# Replication +postgres_patroni_replication_username: replicator # Replication Username +postgres_patroni_replication_password: password # Replication password + +#SuperUser +postgres_patroni_superuser_username: postgres # Superuser username +postgres_patroni_superuser_password: password # Superuser Password +``` +Architecture +------------ +![Untitled Diagram (1)](https://user-images.githubusercontent.com/63706239/203470986-f8ec3d56-a6d2-4678-b594-dc20a29ec972.jpg) + +``` +Description: +Ansible postgres cluter role is used to setup a postgres cluster with 1 Primary and 2 replicas where we are using the patroni as HA solution for postgres cluster.Patroni can be configured to handle tasks like replication, backups and restorations.We are also using HAProxy load Balancer to route the traffic and Etcd is a fault-tolerant, distributed key-value store that is used to store the state of the Postgres cluster. Via Patroni, all of the Postgres nodes make use of etcd to keep the Postgres cluster up and running. + +Users and applications can access the postgres server using Haproxy IP and Port defined in the haproxy configuration rules. +``` + +Inventory hosts file as shown Below +----------------------------------- +``` +[etcd] +192.168.245.129 etcd_ip=192.168.245.129 ansible_ssh_user=ubuntu + +[postgresql] +192.168.245.129 postgresql_origin=192.168.245.129 postgresql_1=192.168.245.129 postgresql_2=192.168.245.130 postgresql_3=192.168.245.131 etcd_ip=192.168.245.129 ansible_ssh_user=ubuntu + postgresql_origin= postgresql_1= postgresql_2= postgresql_3= etcd_ip=192.168.245.129 ansible_ssh_user=ubuntu + postgresql_origin= postgresql_1= postgresql_2= postgresql_3= etcd_ip=192.168.245.129 ansible_ssh_user=ubuntu + +[haproxy] +192.168.245.129 postgresql_1=192.168.245.129 postgresql_2=192.168.245.130 postgresql_3=192.168.245.131 ansible_ssh_user=ubuntu +``` + +License +------- +``` +BSD +``` +Author Information +------------------ +``` +Nikhil Varma + +Senior DevOps Engineer +``` + +postgres cluster setup using ansible +----------------------------------- + +``` +# Command to run Ansibe-postgresql role + +$ ansible-playbook -i inventory/hosts main.yaml -K --ask-pass + +# Commands to run postgresql roles by using the tags and skipping the tags + +$ ansible-playbook -i inventory/hosts main.yaml -K --ask-pass --tags="" +$ ansible-playbook -i inventory/hosts main.yaml -K --ask-pass --skip-tags="" +``` diff --git a/ansible/roles/ansible-postgres_patroni/defaults/main.yml b/ansible/roles/ansible-postgres_patroni/defaults/main.yml new file mode 100644 index 0000000000..5257a8524d --- /dev/null +++ b/ansible/roles/ansible-postgres_patroni/defaults/main.yml @@ -0,0 +1,16 @@ +--- +# defaults file for ansible-postgres_patroni +#patroni .yaml config +postgres_cluster_name: postgresql-prod + +# users admin password +postgres_patroni_admin_password: admin + +#Authentication +# Replication +postgres_patroni_replication_username: replicator +postgres_patroni_replication_password: password + +#SuperUser +postgres_patroni_superuser_username: postgres +postgres_patroni_superuser_password: password \ No newline at end of file diff --git a/ansible/roles/ansible-postgres_patroni/handlers/main.yml b/ansible/roles/ansible-postgres_patroni/handlers/main.yml new file mode 100644 index 0000000000..91f2ff6304 --- /dev/null +++ b/ansible/roles/ansible-postgres_patroni/handlers/main.yml @@ -0,0 +1,18 @@ +--- +# handlers file for ansible-postgres_patroni +- name: Restart patroni systemd + systemd: + name: patroni.service + state: restarted + daemon_reload: yes + +- name: Restart patroni service + systemd: + name: patroni.service + state: restarted + +- name: Start the postgresql service + systemd: + name: postgresql.service + state: started + enabled: yes \ No newline at end of file diff --git a/ansible/roles/ansible-postgres_patroni/meta/main.yml b/ansible/roles/ansible-postgres_patroni/meta/main.yml new file mode 100644 index 0000000000..0538e5f1cd --- /dev/null +++ b/ansible/roles/ansible-postgres_patroni/meta/main.yml @@ -0,0 +1,52 @@ +galaxy_info: + author: Nikhil Varma + description: Ansible role for setting up postgresql cluster + #company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.1 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. diff --git a/ansible/roles/ansible-postgres_patroni/tasks/main.yml b/ansible/roles/ansible-postgres_patroni/tasks/main.yml new file mode 100644 index 0000000000..75f16a30d5 --- /dev/null +++ b/ansible/roles/ansible-postgres_patroni/tasks/main.yml @@ -0,0 +1,105 @@ +--- +# tasks file for ansible-postgres_patroni + +- name: Install postgresql after updating apt + apt: + name: + - postgresql + - postgresql-contrib + state: present + update_cache: yes + tags: + - postgresql Install + +- name: Stop the postgresql service + systemd: + name: postgresql.service + state: stopped + enabled: yes + tags: + - postgresql_service + +- name: creating softlink for postgres + ansible.builtin.shell: + cmd: ln -s /usr/lib/postgresql/15/bin/* /usr/sbin/ + tags: + - softlink + +- name: Install and update python and pip + apt: + name: + - python3-pip + - python3-dev + - libpq-dev + state: present + tags: + - pip_python + +- name: Upgrade pip to latest vesion + pip: + name: pip + extra_args: --upgrade + state: latest + tags: + - upgrade_pip + +- name: Install patroni and dependencies + pip: + name: + - patroni + - python-etcd + - psycopg2 + state: present + tags: + - install patroni + +- name: Creates data directory for patroni + file: + path: /data + state: directory + mode: 0700 + owner: postgres + group: postgres + tags: + - create_data_dir + +- name: Creates data directory for patroni + file: + path: /data/patroni + state: directory + mode: 0700 + owner: postgres + group: postgres + tags: + - create_patroni_dir + + +- name: Template patroni systemd service file to /etc/systemd/system/patroni.service + template: + src: patroni.service.j2 + dest: /etc/systemd/system/patroni.service + tags: + - patroni_service + +- name: Restart patroni systemd + systemd: + name: patroni.service + state: restarted + daemon_reload: yes + +- name: Template configuration file to patroni.yaml + template: + src: patroni.yaml.j2 + dest: '/etc/patroni.yaml' + tags: + - patroni_config + +- name: Restart patroni service + systemd: + name: patroni.service + state: restarted + +- name: Restart postgres service + systemd: + name: postgresql.service + state: restarted diff --git a/ansible/roles/ansible-postgres_patroni/templates/patroni.service.j2 b/ansible/roles/ansible-postgres_patroni/templates/patroni.service.j2 new file mode 100644 index 0000000000..d2bbe844e0 --- /dev/null +++ b/ansible/roles/ansible-postgres_patroni/templates/patroni.service.j2 @@ -0,0 +1,17 @@ +[Unit] +Description=Runners to orchestrate a high-availability PostgreSQL +After=syslog.target network.target + +[Service] +Type=simple + +User=postgres +Group=postgres + +ExecStart=/usr/local/bin/patroni /etc/patroni.yaml +KillMode=process +TimeoutSec=30 +Restart=no + +[Install] +WantedBy=multi-user.targ \ No newline at end of file diff --git a/ansible/roles/ansible-postgres_patroni/templates/patroni.yaml.j2 b/ansible/roles/ansible-postgres_patroni/templates/patroni.yaml.j2 new file mode 100644 index 0000000000..92d5635e46 --- /dev/null +++ b/ansible/roles/ansible-postgres_patroni/templates/patroni.yaml.j2 @@ -0,0 +1,58 @@ +scope: postgres +namespace: /db/ +name: {{ postgres_cluster_name }} + +restapi: + listen: {{ postgresql_origin }}:8008 + connect_address: {{ postgresql_origin }}:8008 + +etcd: + host: {{ etcd_ip }}:2379 + +bootstrap: + dcs: + ttl: 30 + loop_wait: 10 + retry_timeout: 10 + maximum_lag_on_failover: 1048576 + postgresql: + use_pg_rewind: true + + initdb: + - encoding: UTF8 + - data-checksums + + pg_hba: + - host replication replicator 127.0.0.1/32 md5 + - host replication replicator {{ postgresql_1 }}/0 md5 + - host replication replicator {{ postgresql_2 }}/0 md5 + - host replication replicator {{ postgresql_3 }}/0 md5 + - host all all 0.0.0.0/0 md5 + + users: + admin: + password: {{ postgres_patroni_admin_password }} + options: + - createrole + - createdb + +postgresql: + listen: {{ postgresql_origin }}:5432 + connect_address: {{ postgresql_origin }}:5432 + data_dir: /data/patroni + pgpass: /tmp/pgpass + authentication: + replication: + username: {{ postgres_patroni_replication_username }} + password: {{ postgres_patroni_replication_password }} + superuser: + username: {{ postgres_patroni_superuser_username }} + password: {{ postgres_patroni_superuser_password }} + parameters: + unix_socket_directories: '.' + +tags: + nofailover: false + noloadbalance: false + clonefrom: false + nosync: false \ No newline at end of file diff --git a/ansible/roles/ansible-postgres_patroni/vars/main.yml b/ansible/roles/ansible-postgres_patroni/vars/main.yml new file mode 100644 index 0000000000..bf8074823c --- /dev/null +++ b/ansible/roles/ansible-postgres_patroni/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for ansible-postgres_patroni From ba01495ad7aafcf3556ad053ba02563bd2e29975 Mon Sep 17 00:00:00 2001 From: Rekha Date: Mon, 26 Dec 2022 16:34:25 +0530 Subject: [PATCH 157/616] Prashnavali reminder query added --- .../roles/postgres-migration/files/sunbird_programs/V5.1.0.sql | 1 + 1 file changed, 1 insertion(+) create mode 100644 ansible/roles/postgres-migration/files/sunbird_programs/V5.1.0.sql diff --git a/ansible/roles/postgres-migration/files/sunbird_programs/V5.1.0.sql b/ansible/roles/postgres-migration/files/sunbird_programs/V5.1.0.sql new file mode 100644 index 0000000000..1780b3118b --- /dev/null +++ b/ansible/roles/postgres-migration/files/sunbird_programs/V5.1.0.sql @@ -0,0 +1 @@ +INSERT INTO "public"."configuration" ("key", "value", "status") VALUES ('PrashnavaliReminder', ' VidyaDaan: Reminder to kindly create or review pending questions for the Project:$projectName by $projectDate. Log in via https://vdn.diksha.gov.in/contribute. Please ignore if work has already been completed.', 'active'); From 8a73f684521c48c35c0a3253131c3b055ab8330e Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Tue, 27 Dec 2022 12:04:46 +0530 Subject: [PATCH 158/616] added new jenkins adhoc job for ml-analytics #ED-569 (#3675) --- ansible/ml-analytics-adhoc.yaml | 4 + .../ml-analytics-adhoc-jobs/tasks/main.yaml | 106 ++++++++++++++ .../roles/ml-analytics-service/tasks/main.yml | 2 +- .../jobs/ml-analytics-adhoc/config.xml | 130 ++++++++++++++++++ .../deploy/ml-analytics-adhoc/Jenkinsfile | 47 +++++++ 5 files changed, 288 insertions(+), 1 deletion(-) create mode 100644 ansible/ml-analytics-adhoc.yaml create mode 100644 ansible/roles/ml-analytics-adhoc-jobs/tasks/main.yaml create mode 100644 deploy/jenkins/jobs/Deploy/jobs/dev/jobs/managed-learn/jobs/ml-analytics-adhoc/config.xml create mode 100644 pipelines/deploy/ml-analytics-adhoc/Jenkinsfile diff --git a/ansible/ml-analytics-adhoc.yaml b/ansible/ml-analytics-adhoc.yaml new file mode 100644 index 0000000000..f870c5434e --- /dev/null +++ b/ansible/ml-analytics-adhoc.yaml @@ -0,0 +1,4 @@ +- hosts: ml-analytics-service + become: yes + roles: + - ml-analytics-adhoc-jobs diff --git a/ansible/roles/ml-analytics-adhoc-jobs/tasks/main.yaml b/ansible/roles/ml-analytics-adhoc-jobs/tasks/main.yaml new file mode 100644 index 0000000000..abfb50e3bb --- /dev/null +++ b/ansible/roles/ml-analytics-adhoc-jobs/tasks/main.yaml @@ -0,0 +1,106 @@ +- name: Fetch Config file + synchronize: src="{{ item }}" dest="../output/" mode=pull recursive=yes rsync_path=rsync + with_items: + - "/opt/sparkjobs/ml-analytics-service/config.ini" + tags: + - fetch-config + +- name: Execute run.sh + become: yes + become_user: data-pipeline + shell: "/opt/sparkjobs/ml-analytics-service/run.sh > /opt/sparkjobs/ml-analytics-service/run_job.log" + tags: + - run-job + +- name: Fetch run_job.log + synchronize: src="{{ item }}" dest="../output/" mode=pull recursive=yes rsync_path=rsync + with_items: + - "/opt/sparkjobs/ml-analytics-service/run_job.log" + tags: + - run-job + +- name: Execute run_weekly.sh + become: yes + become_user: data-pipeline + shell: "/opt/sparkjobs/ml-analytics-service/run_weekly.sh > /opt/sparkjobs/ml-analytics-service/run_weekly_job.log" + tags: + - run-weekly + +- name: Fetch run_weekly.log + synchronize: src="{{ item }}" dest="../output/" mode=pull recursive=yes rsync_path=rsync + with_items: + - "/opt/sparkjobs/ml-analytics-service/run_weekly_job.log" + tags: + - run-weekly + +- name: Execute run_program.sh + become: yes + become_user: data-pipeline + shell: "/opt/sparkjobs/ml-analytics-service/run_program.sh > /opt/sparkjobs/ml-analytics-service/run_program_job.log" + tags: + - run-program + +- name: Fetch run_program_job.log + synchronize: src="{{ item }}" dest="../output/" mode=pull recursive=yes rsync_path=rsync + with_items: + - "/opt/sparkjobs/ml-analytics-service/run_program_job.log" + tags: + - run-program + +- name: Execute Observation ingest/refresh + become: yes + become_user: data-pipeline + shell: "source /opt/sparkjobs/spark_venv/bin/activate && /opt/sparkjobs/spark_venv/lib/python3.8/site-packages/pyspark/bin/spark-submit --driver-memory 50g --executor-memory 50g /opt/sparkjobs/ml-analytics-service/observations/pyspark_observation_status_batch.py" + register: out + tags: + - observation-refresh-ingest + +- debug: + var: out.stdout_lines + tags: + - observation-refresh-ingest + +- name: Execute Survey ingest/refresh + become: yes + become_user: data-pipeline + shell: "source /opt/sparkjobs/spark_venv/bin/activate && /opt/sparkjobs/spark_venv/lib/python3.8/site-packages/pyspark/bin/spark-submit --driver-memory 50g --executor-memory 50g /opt/sparkjobs/ml-analytics-service/survey/pyspark_survey_status.py" + register: out + args: + executable: /bin/bash + tags: + - survey-refresh-ingest + +- debug: + var: out.stdout_lines + tags: + - survey-refresh-ingest + +- name: Execute Project Refresh + become: yes + become_user: data-pipeline + shell: "source /opt/sparkjobs/spark_venv/bin/activate && /opt/sparkjobs/spark_venv/lib/python3.8/site-packages/pyspark/bin/spark-submit --driver-memory 50g --executor-memory 50g /opt/sparkjobs/ml-analytics-service/projects/pyspark_project_deletion_batch.py" + register: out + args: + executable: /bin/bash + tags: + - project-refresh + +- debug: + var: out.stdout_lines + tags: + - project-refresh + +- name: Execute Project ingest + become: yes + become_user: data-pipeline + shell: "source /opt/sparkjobs/spark_venv/bin/activate && /opt/sparkjobs/spark_venv/lib/python3.8/site-packages/pyspark/bin/spark-submit --driver-memory 50g --executor-memory 50g /opt/sparkjobs/ml-analytics-service/projects/pyspark_project_batch.py" + register: out + args: + executable: /bin/bash + tags: + - project-ingest + +- debug: + var: out.stdout_lines + tags: + - project-ingest diff --git a/ansible/roles/ml-analytics-service/tasks/main.yml b/ansible/roles/ml-analytics-service/tasks/main.yml index ee609b8806..30b61a06cd 100755 --- a/ansible/roles/ml-analytics-service/tasks/main.yml +++ b/ansible/roles/ml-analytics-service/tasks/main.yml @@ -145,4 +145,4 @@ minute: "30" hour: "7" weekday: "4" - job: "{{ BASEPATH }}/run_weekly.sh > {{ BASEPATH }}/ml-analytics-service/nvsk_data_weekly.logs" + job: "{{ BASEPATH }}/ml-analytics-service/run_weekly.sh > {{ BASEPATH }}/ml-analytics-service/nvsk_data_weekly.logs" diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/managed-learn/jobs/ml-analytics-adhoc/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/managed-learn/jobs/ml-analytics-adhoc/config.xml new file mode 100644 index 0000000000..eeb6fdfe2b --- /dev/null +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/managed-learn/jobs/ml-analytics-adhoc/config.xml @@ -0,0 +1,130 @@ + + + + false + + + false + false + + + + + private_branch + choice-parameter-3803648169564146 + 1 + + true + + + + true + + + ml-analytics-adhoc + Deploy/dev/managed-learn/ml-analytics-adhoc + + + ET_FORMATTED_HTML + true + + + branch_or_tag + choice-parameter-3803648170694062 + 1 + + true + + + + true + + + ml-analytics-adhoc + Deploy/dev/managed-learn/ml-analytics-adhoc + + + ET_FORMATTED_HTML + true + + + action + <font color=green size=2><b>Choose the job names to run. Multi-selection is available.</b></font> + choice-parameter-3812862131559945 + 1 + + true + + + + ml-analytics-adhoc + Deploy/dev/managed-learn/ml-analytics-adhoc + + + PT_MULTI_SELECT + false + 1 + + + + + 0 + 0 + + false + project + false + + + + + + + 2 + + + https://github.com/project-sunbird/sunbird-devops.git + github-cred + + + + + ${branch_or_tag} + + + false + + + + pipelines/deploy/ml-analytics-adhoc/Jenkinsfile + false + + + false + diff --git a/pipelines/deploy/ml-analytics-adhoc/Jenkinsfile b/pipelines/deploy/ml-analytics-adhoc/Jenkinsfile new file mode 100644 index 0000000000..8a2f010d6b --- /dev/null +++ b/pipelines/deploy/ml-analytics-adhoc/Jenkinsfile @@ -0,0 +1,47 @@ +@Library('deploy-conf') _ +node() { + try { + String ANSI_GREEN = "\u001B[32m" + String ANSI_NORMAL = "\u001B[0m" + String ANSI_BOLD = "\u001B[1m" + String ANSI_RED = "\u001B[31m" + String ANSI_YELLOW = "\u001B[33m" + + stage('checkout public repo') { + cleanWs() + checkout scm + } + ansiColor('xterm') { + + stage('Deploy') { + values = [:] + sh 'echo "${currentWs} is this"' + currentWs = sh(returnStdout: true, script: 'pwd').trim() + envDir = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-3].trim() + module = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-2].trim() + jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() + values.put('currentWs', currentWs) + values.put('env', envDir) + values.put('module', module) + values.put('jobName', jobName) + ansiblePlaybook = "${currentWs}/ansible/ml-analytics-adhoc.yaml" + ansibleExtraArgs = "--tags ${params.action} --vault-password-file /var/lib/jenkins/secrets/vault-pass" + values.put('ansiblePlaybook', ansiblePlaybook) + values.put('ansibleExtraArgs', ansibleExtraArgs) + println values + ansible_playbook_run(values) + currentBuild.result = "SUCCESS" + currentBuild.description = "Artifact: ${values.artifact_version}, Private: ${params.private_branch}, Public: ${params.branch_or_tag}" + } + summary() + } + } + catch (err) { + currentBuild.result = "FAILURE" + throw err + } + finally { + slack_notify(currentBuild.result) + email_notify() + } +} From b2c76008bb427b2260f8f9ee64d4d46f8ac69332 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Tue, 27 Dec 2022 13:59:06 +0530 Subject: [PATCH 159/616] fix: https:// already part of another url Signed-off-by: Keshav Prasad --- .../helm_charts/core/nginx-public-ingress/values.j2 | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 b/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 index c1331e76a5..1b9a670a37 100644 --- a/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 +++ b/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 @@ -557,7 +557,7 @@ proxyconfig: |- add_header Access-Control-Allow-Origin "*" ; add_header Access-Control-Allow-Methods GET; proxy_set_header X-Request-ID $sb_request_id; - proxy_pass https://$s3_bucket/v3/preview/$url_full; + proxy_pass $s3_bucket/v3/preview/$url_full; } location ~ /content-editor/telemetry|collection-editor/telemetry { rewrite ^/(.*) /$1 break; @@ -604,7 +604,7 @@ proxyconfig: |- add_header Access-Control-Allow-Origin "*" ; add_header Access-Control-Allow-Methods GET; proxy_set_header X-Request-ID $sb_request_id; - proxy_pass https://$s3_bucket/content-editor/$url_full; + proxy_pass $s3_bucket/content-editor/$url_full; } location ~* ^/discussion-ui/(.*) { # Enabling compression @@ -628,7 +628,7 @@ proxyconfig: |- add_header Access-Control-Allow-Origin "*" ; add_header Access-Control-Allow-Methods GET; proxy_set_header X-Request-ID $sb_request_id; - proxy_pass https://$s3_bucket/discussion-ui/$url_full; + proxy_pass $s3_bucket/discussion-ui/$url_full; } location ~* ^/collection-editor/(.*) { # Enabling compression @@ -661,7 +661,7 @@ proxyconfig: |- add_header Access-Control-Allow-Origin "*" ; add_header Access-Control-Allow-Methods GET; proxy_set_header X-Request-ID $sb_request_id; - proxy_pass https://$s3_bucket/collection-editor/$url_full; + proxy_pass $s3_bucket/collection-editor/$url_full; } location ~* ^/generic-editor/(.*) { # Enabling compression @@ -694,7 +694,7 @@ proxyconfig: |- add_header Access-Control-Allow-Origin "*" ; add_header Access-Control-Allow-Methods GET; proxy_set_header X-Request-ID $sb_request_id; - proxy_pass https://$s3_bucket/generic-editor/$url_full; + proxy_pass $s3_bucket/generic-editor/$url_full; } location ~* ^/content-plugins/(.*) { # Enabling cache for Response code 200 @@ -731,7 +731,7 @@ proxyconfig: |- add_header Access-Control-Allow-Origin "*"; add_header Access-Control-Allow-Methods GET; proxy_set_header X-Request-ID $sb_request_id; - proxy_pass https://$s3_bucket/content-plugins/$url_full; + proxy_pass $s3_bucket/content-plugins/$url_full; } location /thirdparty { # Enabling cache for Response code 200 From 3da4be417edb55763bc5a33d470fdb0e6a0ac54d Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Tue, 27 Dec 2022 14:43:55 +0530 Subject: [PATCH 160/616] fix: updated split logic based on url value Signed-off-by: Keshav Prasad --- .../stack-proxy/templates/proxy-default.conf | 16 ++++++------- .../core/nginx-public-ingress/values.j2 | 24 +++++++++---------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/ansible/roles/stack-proxy/templates/proxy-default.conf b/ansible/roles/stack-proxy/templates/proxy-default.conf index f98ba2aae9..9ff34d8dcb 100644 --- a/ansible/roles/stack-proxy/templates/proxy-default.conf +++ b/ansible/roles/stack-proxy/templates/proxy-default.conf @@ -306,7 +306,7 @@ server { set $bucket "{{upstream_url}}"; set $url_full '$1'; proxy_http_version 1.1; - proxy_set_header Host "{{upstream_url.split('/')[0]|lower}}"; + proxy_set_header Host "{{upstream_url.split('/')[2]|lower}}"; proxy_set_header Authorization ''; proxy_hide_header Access-Control-Allow-Origin; proxy_hide_header Access-Control-Allow-Methods; @@ -339,10 +339,10 @@ server { return 200; } - set $s3_bucket "{{plugin_upstream_url}}"; + set $bucket "{{plugin_upstream_url}}"; set $url_full '$1'; proxy_http_version 1.1; - proxy_set_header Host "{{plugin_upstream_url.split('/')[0]|lower}}"; + proxy_set_header Host "{{plugin_upstream_url.split('/')[2]|lower}}"; proxy_set_header Authorization ''; proxy_hide_header Access-Control-Allow-Origin; proxy_hide_header Access-Control-Allow-Methods; @@ -354,7 +354,7 @@ server { proxy_intercept_errors on; add_header Access-Control-Allow-Origin "*" ; add_header Access-Control-Allow-Methods GET; - proxy_pass https://$s3_bucket/v3/preview/$url_full; + proxy_pass https://$bucket/v3/preview/$url_full; } location ~* ^/content-plugins/(.*) { @@ -378,11 +378,11 @@ location ~* ^/content-plugins/(.*) { add_header Content-Type text/plain; return 200; } - set $s3_bucket "{{plugin_upstream_url}}"; + set $bucket "{{plugin_upstream_url}}"; set $url_full '$1'; proxy_http_version 1.1; - proxy_set_header Host "{{plugin_upstream_url.split('/')[0]|lower}}"; + proxy_set_header Host "{{plugin_upstream_url.split('/')[2]|lower}}"; proxy_set_header Authorization ''; proxy_hide_header Access-Control-Allow-Origin; proxy_hide_header Access-Control-Allow-Methods; @@ -394,7 +394,7 @@ location ~* ^/content-plugins/(.*) { proxy_intercept_errors on; add_header Access-Control-Allow-Origin "*"; add_header Access-Control-Allow-Methods GET; - proxy_pass https://$s3_bucket/content-plugins/$url_full; + proxy_pass https://$bucket/content-plugins/$url_full; } location /thirdparty { @@ -448,7 +448,7 @@ location ~* ^/desktop/(.*) { set $url_full '$1'; proxy_http_version 1.1; - proxy_set_header Host "{{sunbird_offline_azure_storage_account_url.split('/')[0]|lower}}"; + proxy_set_header Host "{{sunbird_offline_azure_storage_account_url.split('/')[2]|lower}}"; proxy_set_header Authorization ''; proxy_hide_header Access-Control-Allow-Origin; proxy_hide_header Access-Control-Allow-Methods; diff --git a/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 b/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 index 1b9a670a37..678a12bb4d 100644 --- a/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 +++ b/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 @@ -541,7 +541,7 @@ proxyconfig: |- add_header Content-Type text/plain; return 200; } - set $s3_bucket "{{plugin_upstream_url}}"; + set $bucket "{{plugin_upstream_url}}"; set $url_full '$1'; proxy_http_version 1.1; proxy_set_header Host "{{plugin_upstream_url.split('/')[0]|lower}}"; @@ -557,7 +557,7 @@ proxyconfig: |- add_header Access-Control-Allow-Origin "*" ; add_header Access-Control-Allow-Methods GET; proxy_set_header X-Request-ID $sb_request_id; - proxy_pass $s3_bucket/v3/preview/$url_full; + proxy_pass $bucket/v3/preview/$url_full; } location ~ /content-editor/telemetry|collection-editor/telemetry { rewrite ^/(.*) /$1 break; @@ -588,7 +588,7 @@ proxyconfig: |- add_header Content-Type text/plain; return 200; } - set $s3_bucket "{{plugin_upstream_url}}"; + set $bucket "{{plugin_upstream_url}}"; set $url_full '$1'; proxy_http_version 1.1; proxy_set_header Host "{{plugin_upstream_url.split('/')[0]|lower}}"; @@ -604,7 +604,7 @@ proxyconfig: |- add_header Access-Control-Allow-Origin "*" ; add_header Access-Control-Allow-Methods GET; proxy_set_header X-Request-ID $sb_request_id; - proxy_pass $s3_bucket/content-editor/$url_full; + proxy_pass $bucket/content-editor/$url_full; } location ~* ^/discussion-ui/(.*) { # Enabling compression @@ -612,7 +612,7 @@ proxyconfig: |- gzip_min_length 100000; gzip_proxied expired no-cache no-store private auth; gzip_types application/javascript application/x-javascript text/css text/javascript; - set $s3_bucket "{{discussion_upstream_url}}"; + set $bucket "{{discussion_upstream_url}}"; set $url_full '$1'; proxy_http_version 1.1; proxy_set_header Host "{{discussion_upstream_url.split('/')[0]|lower}}"; @@ -628,7 +628,7 @@ proxyconfig: |- add_header Access-Control-Allow-Origin "*" ; add_header Access-Control-Allow-Methods GET; proxy_set_header X-Request-ID $sb_request_id; - proxy_pass $s3_bucket/discussion-ui/$url_full; + proxy_pass $bucket/discussion-ui/$url_full; } location ~* ^/collection-editor/(.*) { # Enabling compression @@ -645,7 +645,7 @@ proxyconfig: |- add_header Content-Type text/plain; return 200; } - set $s3_bucket "{{plugin_upstream_url}}"; + set $bucket "{{plugin_upstream_url}}"; set $url_full '$1'; proxy_http_version 1.1; proxy_set_header Host "{{plugin_upstream_url.split('/')[0]|lower}}"; @@ -661,7 +661,7 @@ proxyconfig: |- add_header Access-Control-Allow-Origin "*" ; add_header Access-Control-Allow-Methods GET; proxy_set_header X-Request-ID $sb_request_id; - proxy_pass $s3_bucket/collection-editor/$url_full; + proxy_pass $bucket/collection-editor/$url_full; } location ~* ^/generic-editor/(.*) { # Enabling compression @@ -678,7 +678,7 @@ proxyconfig: |- add_header Content-Type text/plain; return 200; } - set $s3_bucket "{{plugin_upstream_url}}"; + set $bucket "{{plugin_upstream_url}}"; set $url_full '$1'; proxy_http_version 1.1; proxy_set_header Host "{{plugin_upstream_url.split('/')[0]|lower}}"; @@ -694,7 +694,7 @@ proxyconfig: |- add_header Access-Control-Allow-Origin "*" ; add_header Access-Control-Allow-Methods GET; proxy_set_header X-Request-ID $sb_request_id; - proxy_pass $s3_bucket/generic-editor/$url_full; + proxy_pass $bucket/generic-editor/$url_full; } location ~* ^/content-plugins/(.*) { # Enabling cache for Response code 200 @@ -715,7 +715,7 @@ proxyconfig: |- add_header Content-Type text/plain; return 200; } - set $s3_bucket "{{plugin_upstream_url}}"; + set $bucket "{{plugin_upstream_url}}"; set $url_full '$1'; proxy_http_version 1.1; proxy_set_header Host "{{plugin_upstream_url.split('/')[0]|lower}}"; @@ -731,7 +731,7 @@ proxyconfig: |- add_header Access-Control-Allow-Origin "*"; add_header Access-Control-Allow-Methods GET; proxy_set_header X-Request-ID $sb_request_id; - proxy_pass $s3_bucket/content-plugins/$url_full; + proxy_pass $bucket/content-plugins/$url_full; } location /thirdparty { # Enabling cache for Response code 200 From d3e9c94d9404b1b1f22000d608ce618255d435bd Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Tue, 27 Dec 2022 14:46:37 +0530 Subject: [PATCH 161/616] fix: removed https:// url string Signed-off-by: Keshav Prasad --- ansible/roles/stack-proxy/templates/proxy-default.conf | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ansible/roles/stack-proxy/templates/proxy-default.conf b/ansible/roles/stack-proxy/templates/proxy-default.conf index 9ff34d8dcb..9f78549da2 100644 --- a/ansible/roles/stack-proxy/templates/proxy-default.conf +++ b/ansible/roles/stack-proxy/templates/proxy-default.conf @@ -318,7 +318,7 @@ server { proxy_intercept_errors on; add_header Access-Control-Allow-Origin "*"; add_header Access-Control-Allow-Methods GET; - proxy_pass https://$bucket/$url_full; + proxy_pass $bucket/$url_full; } @@ -354,7 +354,7 @@ server { proxy_intercept_errors on; add_header Access-Control-Allow-Origin "*" ; add_header Access-Control-Allow-Methods GET; - proxy_pass https://$bucket/v3/preview/$url_full; + proxy_pass $bucket/v3/preview/$url_full; } location ~* ^/content-plugins/(.*) { @@ -394,7 +394,7 @@ location ~* ^/content-plugins/(.*) { proxy_intercept_errors on; add_header Access-Control-Allow-Origin "*"; add_header Access-Control-Allow-Methods GET; - proxy_pass https://$bucket/content-plugins/$url_full; + proxy_pass $bucket/content-plugins/$url_full; } location /thirdparty { @@ -460,7 +460,7 @@ location ~* ^/desktop/(.*) { proxy_intercept_errors on; add_header Access-Control-Allow-Origin "*"; add_header Access-Control-Allow-Methods GET; - proxy_pass https://$offline_bucket/$url_full; + proxy_pass $offline_bucket/$url_full; } location / { From 854398a2f2af283fbf1e5f4319e72ec856b5ec69 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Tue, 27 Dec 2022 14:52:42 +0530 Subject: [PATCH 162/616] fix: update url array index Signed-off-by: Keshav Prasad --- .../core/nginx-public-ingress/values.j2 | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 b/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 index 678a12bb4d..cbc2e40f4e 100644 --- a/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 +++ b/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 @@ -511,7 +511,7 @@ proxyconfig: |- set $bucket "{{upstream_url}}"; set $url_full '$1'; proxy_http_version 1.1; - proxy_set_header Host "{{upstream_url.split('/')[0]|lower}}"; + proxy_set_header Host "{{upstream_url.split('/')[2]|lower}}"; proxy_set_header Authorization ''; proxy_hide_header Access-Control-Allow-Origin; proxy_hide_header Access-Control-Allow-Methods; @@ -544,7 +544,7 @@ proxyconfig: |- set $bucket "{{plugin_upstream_url}}"; set $url_full '$1'; proxy_http_version 1.1; - proxy_set_header Host "{{plugin_upstream_url.split('/')[0]|lower}}"; + proxy_set_header Host "{{plugin_upstream_url.split('/')[2]|lower}}"; proxy_set_header Authorization ''; proxy_hide_header Access-Control-Allow-Origin; proxy_hide_header Access-Control-Allow-Methods; @@ -591,7 +591,7 @@ proxyconfig: |- set $bucket "{{plugin_upstream_url}}"; set $url_full '$1'; proxy_http_version 1.1; - proxy_set_header Host "{{plugin_upstream_url.split('/')[0]|lower}}"; + proxy_set_header Host "{{plugin_upstream_url.split('/')[2]|lower}}"; proxy_set_header Authorization ''; proxy_hide_header Access-Control-Allow-Origin; proxy_hide_header Access-Control-Allow-Methods; @@ -615,7 +615,7 @@ proxyconfig: |- set $bucket "{{discussion_upstream_url}}"; set $url_full '$1'; proxy_http_version 1.1; - proxy_set_header Host "{{discussion_upstream_url.split('/')[0]|lower}}"; + proxy_set_header Host "{{discussion_upstream_url.split('/')[2]|lower}}"; proxy_set_header Authorization ''; proxy_hide_header Access-Control-Allow-Origin; proxy_hide_header Access-Control-Allow-Methods; @@ -648,7 +648,7 @@ proxyconfig: |- set $bucket "{{plugin_upstream_url}}"; set $url_full '$1'; proxy_http_version 1.1; - proxy_set_header Host "{{plugin_upstream_url.split('/')[0]|lower}}"; + proxy_set_header Host "{{plugin_upstream_url.split('/')[2]|lower}}"; proxy_set_header Authorization ''; proxy_hide_header Access-Control-Allow-Origin; proxy_hide_header Access-Control-Allow-Methods; @@ -681,7 +681,7 @@ proxyconfig: |- set $bucket "{{plugin_upstream_url}}"; set $url_full '$1'; proxy_http_version 1.1; - proxy_set_header Host "{{plugin_upstream_url.split('/')[0]|lower}}"; + proxy_set_header Host "{{plugin_upstream_url.split('/')[2]|lower}}"; proxy_set_header Authorization ''; proxy_hide_header Access-Control-Allow-Origin; proxy_hide_header Access-Control-Allow-Methods; @@ -718,7 +718,7 @@ proxyconfig: |- set $bucket "{{plugin_upstream_url}}"; set $url_full '$1'; proxy_http_version 1.1; - proxy_set_header Host "{{plugin_upstream_url.split('/')[0]|lower}}"; + proxy_set_header Host "{{plugin_upstream_url.split('/')[2]|lower}}"; proxy_set_header Authorization ''; proxy_hide_header Access-Control-Allow-Origin; proxy_hide_header Access-Control-Allow-Methods; @@ -778,7 +778,7 @@ proxyconfig: |- set $offline_bucket "{{ sunbird_offline_azure_storage_account_url }}"; set $url_full '$1'; proxy_http_version 1.1; - proxy_set_header Host "{{sunbird_offline_azure_storage_account_url.split('/')[0]|lower}}"; + proxy_set_header Host "{{sunbird_offline_azure_storage_account_url.split('/')[2]|lower}}"; proxy_set_header Authorization ''; proxy_hide_header Access-Control-Allow-Origin; proxy_hide_header Access-Control-Allow-Methods; From 045d158e1199dfed0908325ee0a7fb6be4a332a8 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Tue, 27 Dec 2022 17:37:04 +0530 Subject: [PATCH 163/616] fix: remove hardcoded https from proxy_pass value (#3688) --- kubernetes/helm_charts/core/nginx-public-ingress/values.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 b/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 index cbc2e40f4e..e0699cdb50 100644 --- a/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 +++ b/kubernetes/helm_charts/core/nginx-public-ingress/values.j2 @@ -524,7 +524,7 @@ proxyconfig: |- add_header Access-Control-Allow-Origin "*"; add_header Access-Control-Allow-Methods GET; proxy_set_header X-Request-ID $sb_request_id; - proxy_pass https://$bucket/$url_full; + proxy_pass $bucket/$url_full; } location ~* ^/content/preview/(.*) { # Enabling compression From c37645deddd0a6a401b3e8f680feeacc09bc065f Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Wed, 28 Dec 2022 16:19:49 +1100 Subject: [PATCH 164/616] added the switch for getting oci deployed image tag Signed-off-by: Deepak Devadathan --- kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml b/kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml index 09e96cf25e..7624b3f06e 100644 --- a/kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml +++ b/kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml @@ -91,6 +91,12 @@ - name: Get deployed image name shell: kubectl get deployments.apps {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[0].image | split("/")[1]' register: deployed_image + when: cloud_service_provider != "oci" + +- name: Get deployed image name + shell: kubectl get deployments.apps {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[0].image | split("/")[3]' + register: deployed_image + when: cloud_service_provider == "oci" - set_fact: metadata_image: "{{ image_name }}:{{ image_tag }}" From fff6051deea037ce6e3069fdc4fb9158ae97bd6b Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Wed, 28 Dec 2022 16:22:43 +1100 Subject: [PATCH 165/616] added oci in the task name for getting image version Signed-off-by: Deepak Devadathan --- kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml b/kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml index 7624b3f06e..4e01454d38 100644 --- a/kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml +++ b/kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml @@ -93,7 +93,7 @@ register: deployed_image when: cloud_service_provider != "oci" -- name: Get deployed image name +- name: Get deployed image name for oci shell: kubectl get deployments.apps {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[0].image | split("/")[3]' register: deployed_image when: cloud_service_provider == "oci" From 7a3833a9acf304b614c5c03e08c16ade587a409b Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Wed, 28 Dec 2022 18:08:03 +1100 Subject: [PATCH 166/616] added helm values for oci for internal lb provisioning Signed-off-by: Deepak Devadathan --- .../core/nginx-private-ingress/templates/configmap.yaml | 9 +++++++-- .../helm_charts/core/nginx-private-ingress/values.j2 | 1 + 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/kubernetes/helm_charts/core/nginx-private-ingress/templates/configmap.yaml b/kubernetes/helm_charts/core/nginx-private-ingress/templates/configmap.yaml index 0dd8167be2..b59cebe9a0 100644 --- a/kubernetes/helm_charts/core/nginx-private-ingress/templates/configmap.yaml +++ b/kubernetes/helm_charts/core/nginx-private-ingress/templates/configmap.yaml @@ -39,8 +39,9 @@ data: server { listen 80; listen [::]:80; - server_name {{ .Values.nginx_private_ingress_ip }}; - +{{- if and .Values.nginx_private_ingress_ip (ne .Values.csp "oci") }} + server_name: {{ .Values.nginx_private_ingress_ip }}; +{{- end }} resolver {{ .Values.kube_dns_ip }}; location /learner/ { @@ -125,7 +126,9 @@ data: set $target http://report-service.{{ .Values.namespace }}.svc.cluster.local:3030; rewrite ^/report/(.*) /$1 break; proxy_http_version 1.1; +{{- if and .Values.nginx_private_ingress_ip (ne .Values.csp "oci") }} proxy_set_header Host $server_name; +{{- end }} proxy_pass $target; } location /search/ { @@ -244,7 +247,9 @@ data: set $target http://registry-service.{{ .Values.namespace }}.svc.cluster.local:8081; rewrite ^/registry-service/(.*) /$1 break; proxy_http_version 1.1; +{{- if and .Values.nginx_private_ingress_ip (ne .Values.csp "oci") }} proxy_set_header Host $server_name; +{{- end }} proxy_pass $target; } location /ml-projects/ { diff --git a/kubernetes/helm_charts/core/nginx-private-ingress/values.j2 b/kubernetes/helm_charts/core/nginx-private-ingress/values.j2 index c1c2491446..393476d491 100644 --- a/kubernetes/helm_charts/core/nginx-private-ingress/values.j2 +++ b/kubernetes/helm_charts/core/nginx-private-ingress/values.j2 @@ -38,3 +38,4 @@ autoscaling: targetCPUUtilizationPercentage: {{ nginx_private_ingress_autoscaling_targetCPUUtilizationPercentage|default(60) }} targetMemoryUtilizationPercentage: {{ nginx_private_ingress_autoscaling_targetMemoryUtilizationPercentage|default('') }} +csp: {{cloud_service_provider}} \ No newline at end of file From cb96fe23add17f87a120ee554de06a2cb332350f Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Wed, 28 Dec 2022 18:17:41 +1100 Subject: [PATCH 167/616] getting image tag for oci Signed-off-by: Deepak Devadathan --- kubernetes/ansible/roles/helm-deploy/tasks/main.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/kubernetes/ansible/roles/helm-deploy/tasks/main.yml b/kubernetes/ansible/roles/helm-deploy/tasks/main.yml index 8f4881089a..53b452274d 100644 --- a/kubernetes/ansible/roles/helm-deploy/tasks/main.yml +++ b/kubernetes/ansible/roles/helm-deploy/tasks/main.yml @@ -120,6 +120,12 @@ - name: Get deployed image name - deployments shell: kubectl get deployments.apps {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[0].image | split("/")[1]' register: image + when: cloud_service_provider != "oci" + +- name: Get deployed image name - deployments for OCI + shell: kubectl get deployments.apps {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[0].image | split("/")[3]' + register: image + when: cloud_service_provider == "oci" - set_fact: deployed_image: "{{ image }}" From b87d0d3c3843891baf465ba6afa48c11c50e5a72 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Wed, 28 Dec 2022 13:41:06 +0530 Subject: [PATCH 168/616] fix: remove https as its included in the var Signed-off-by: Keshav Prasad --- ansible/roles/stack-sunbird/templates/sunbird_player.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/stack-sunbird/templates/sunbird_player.env b/ansible/roles/stack-sunbird/templates/sunbird_player.env index 9cb6473418..30d4e1830f 100644 --- a/ansible/roles/stack-sunbird/templates/sunbird_player.env +++ b/ansible/roles/stack-sunbird/templates/sunbird_player.env @@ -195,7 +195,7 @@ sunbird_telemetry_service_local_url={{sunbird_telemetry_service_local_url | defa #Release-4.4.0 sunbird_portal_video_max_size={{sunbird_portal_video_max_size | default(150)}} sunbird_default_file_size={{sunbird_default_file_size | default(150)}} -sunbird_portal_uci_blob_url={{ sunbird_portal_uci_blob_url | default('https://' + cloud_storage_url + '/uci') }} +sunbird_portal_uci_blob_url={{ sunbird_portal_uci_blob_url | default(cloud_storage_url + '/uci') }} portal_redirect_error_callback_domain={{portal_redirect_error_callback_domain | default("https://"+domain_name)}} sunbird_portal_uci_bot_phone_number={{sunbird_portal_uci_bot_phone_number | default(+912249757677)}} From 15394c46e23de68a5aa74b818e82b1e4ff9eccc6 Mon Sep 17 00:00:00 2001 From: VISHNUDAS <95604247+VISHNUDAS-tunerlabs@users.noreply.github.com> Date: Wed, 28 Dec 2022 21:59:39 +0530 Subject: [PATCH 169/616] Project certificate download uris edited (#3691) typo correction --- ansible/roles/kong-api/defaults/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/roles/kong-api/defaults/main.yml b/ansible/roles/kong-api/defaults/main.yml index d7589ae22e..7af41e2149 100644 --- a/ansible/roles/kong-api/defaults/main.yml +++ b/ansible/roles/kong-api/defaults/main.yml @@ -9795,7 +9795,7 @@ kong_apis: config.enabled: true - name: getProjectRCCertificate - uris: "{{ registry_service_prefix }}/projetCertificate/v1/download" + uris: "{{ registry_service_prefix }}/projectCertificate/v1/download" upstream_url: "{{ registry_service_url }}/api/v1/ProjectCertificate" strip_uri: true plugins: @@ -10000,4 +10000,4 @@ kong_apis: config.allowed_payload_size: "{{ small_request_size_limit }}" - name: opa-checks config.required: true - config.enabled: true \ No newline at end of file + config.enabled: true From 30ed77ad03342d5fb80f3b79e4e4c9f184311728 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Thu, 29 Dec 2022 10:07:17 +1100 Subject: [PATCH 170/616] added oci oss upload for certificate template Signed-off-by: Deepak Devadathan --- ansible/roles/cert-templates/tasks/main.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/ansible/roles/cert-templates/tasks/main.yml b/ansible/roles/cert-templates/tasks/main.yml index 0700f1e61a..d292210e9d 100644 --- a/ansible/roles/cert-templates/tasks/main.yml +++ b/ansible/roles/cert-templates/tasks/main.yml @@ -66,3 +66,12 @@ gcp_path: "" local_file_or_folder_path: "{{ cert_location }}/cert-templates/certUtilScripts/out" when: cloud_service_provider == "gcloud" + +- name: upload batch of files to oci oss + include_role: + name: oci-cloud-storage + tasks_from: upload-folder.yml + vars: + oci_bucket_name: "{{ cert_service_container_name }}" + local_file_or_folder_path: "{{ cert_location }}/cert-templates/certUtilScripts/out" + when: cloud_service_provider == "oci" \ No newline at end of file From 79b1ec556821f202cddddedcd6b5ed97f79b857c Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Thu, 29 Dec 2022 10:13:50 +1100 Subject: [PATCH 171/616] corrected typo in the role input variable Signed-off-by: Deepak Devadathan --- ansible/roles/cert-templates/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/cert-templates/tasks/main.yml b/ansible/roles/cert-templates/tasks/main.yml index d292210e9d..617bfe44cd 100644 --- a/ansible/roles/cert-templates/tasks/main.yml +++ b/ansible/roles/cert-templates/tasks/main.yml @@ -72,6 +72,6 @@ name: oci-cloud-storage tasks_from: upload-folder.yml vars: - oci_bucket_name: "{{ cert_service_container_name }}" + oss_bucket_name: "{{ cert_service_container_name }}" local_file_or_folder_path: "{{ cert_location }}/cert-templates/certUtilScripts/out" when: cloud_service_provider == "oci" \ No newline at end of file From 79fb6d90a580d1f0543b55042da6f474e2a699d2 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Thu, 29 Dec 2022 10:21:45 +1100 Subject: [PATCH 172/616] removed prefix flag from bulk upload Signed-off-by: Deepak Devadathan --- ansible/roles/oci-cloud-storage/tasks/upload-folder.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/roles/oci-cloud-storage/tasks/upload-folder.yml b/ansible/roles/oci-cloud-storage/tasks/upload-folder.yml index 6e4d06562c..7802cefd48 100644 --- a/ansible/roles/oci-cloud-storage/tasks/upload-folder.yml +++ b/ansible/roles/oci-cloud-storage/tasks/upload-folder.yml @@ -3,6 +3,6 @@ command: oci os bucket get --name {{ oss_bucket_name }} - name: Upload folder to oci oss bucket - command: oci os object bulk-upload -bn {{ oss_bucket_name }} --prefix {{ oss_path }} --src-dir {{ local_file_or_folder_path }} --content-type auto + command: oci os object bulk-upload -bn {{ oss_bucket_name }} --src-dir {{ local_file_or_folder_path }} --content-type auto async: 3600 - poll: 10 + poll: 10 From 5e9f0eb7b400ba968689be7b0d68962096dbc9c5 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Thu, 29 Dec 2022 11:00:34 +1100 Subject: [PATCH 173/616] added oci oss task for chatbot upload Signed-off-by: Deepak Devadathan --- ansible/desktop-faq-upload.yml | 45 +++++++++++++++++++++++++++++++++- 1 file changed, 44 insertions(+), 1 deletion(-) diff --git a/ansible/desktop-faq-upload.yml b/ansible/desktop-faq-upload.yml index d36b0e3721..72f3d63f50 100644 --- a/ansible/desktop-faq-upload.yml +++ b/ansible/desktop-faq-upload.yml @@ -143,4 +143,47 @@ - upload-chatbot-config - upload-batch when: cloud_service_provider == "aws" - \ No newline at end of file + + +##### OCI Tasks #### + + - name: this block consists of tasks related to oci oss + block: + - name: set common oci variables + set_fact: + local_file_or_folder_path: "{{ playbook_dir }}/../{{ src_file_path }}" + oss_path: "{{ upload_storage }}/{{ destination_path }}" + tags: + - always + + - block: + - name: upload file to oci oss public bucket + include_role: + name: oci-cloud-storage + tasks_from: upload.yml + vars: + oss_bucket_name: "{{ oci_public_oss_bucket_name }}" + tags: + - upload-desktop-faq + + - block: + - name: upload file to oci oss private bucket + include_role: + name: oci-cloud-storage + tasks_from: upload.yml + vars: + oss_bucket_name: "{{ oci_private_oss_bucket_name }}" + tags: + - upload-label + + - block: + - name: upload folder to oci oss + include_role: + name: oci-cloud-storage + tasks_from: upload-folder.yml + vars: + oss_bucket_name: "{{ oci_public_oss_bucket_name }}" + tags: + - upload-chatbot-config + - upload-batch + when: cloud_service_provider == "oci" From a019854221171575904636cb1c693062865bbb58 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Thu, 29 Dec 2022 11:24:17 +1100 Subject: [PATCH 174/616] updated the prefix logic for folder upload to oss Signed-off-by: Deepak Devadathan --- ansible/desktop-faq-upload.yml | 8 ++------ ansible/roles/oci-cloud-storage/tasks/upload-folder.yml | 7 +++++++ 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/ansible/desktop-faq-upload.yml b/ansible/desktop-faq-upload.yml index 72f3d63f50..0fbff4be3a 100644 --- a/ansible/desktop-faq-upload.yml +++ b/ansible/desktop-faq-upload.yml @@ -152,7 +152,7 @@ - name: set common oci variables set_fact: local_file_or_folder_path: "{{ playbook_dir }}/../{{ src_file_path }}" - oss_path: "{{ upload_storage }}/{{ destination_path }}" + oss_bucket_name: "{{upload_storage}}" tags: - always @@ -161,8 +161,6 @@ include_role: name: oci-cloud-storage tasks_from: upload.yml - vars: - oss_bucket_name: "{{ oci_public_oss_bucket_name }}" tags: - upload-desktop-faq @@ -171,8 +169,6 @@ include_role: name: oci-cloud-storage tasks_from: upload.yml - vars: - oss_bucket_name: "{{ oci_private_oss_bucket_name }}" tags: - upload-label @@ -182,7 +178,7 @@ name: oci-cloud-storage tasks_from: upload-folder.yml vars: - oss_bucket_name: "{{ oci_public_oss_bucket_name }}" + oss_path: "{{ destination_path }}/" tags: - upload-chatbot-config - upload-batch diff --git a/ansible/roles/oci-cloud-storage/tasks/upload-folder.yml b/ansible/roles/oci-cloud-storage/tasks/upload-folder.yml index 7802cefd48..acb8841ff5 100644 --- a/ansible/roles/oci-cloud-storage/tasks/upload-folder.yml +++ b/ansible/roles/oci-cloud-storage/tasks/upload-folder.yml @@ -6,3 +6,10 @@ command: oci os object bulk-upload -bn {{ oss_bucket_name }} --src-dir {{ local_file_or_folder_path }} --content-type auto async: 3600 poll: 10 + when: oss_path is undefined + +- name: Upload folder to oci oss bucket + command: oci os object bulk-upload -bn {{ oss_bucket_name }} --prefix {{oss_path}} --src-dir {{ local_file_or_folder_path }} --content-type auto + async: 3600 + poll: 10 + when: oss_path is defined From bb3812e73a203117c419ca849a14beb5f1e47c14 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Thu, 29 Dec 2022 11:33:28 +1100 Subject: [PATCH 175/616] uploadfaq to oci oss Signed-off-by: Deepak Devadathan --- ansible/uploadFAQs.yml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/ansible/uploadFAQs.yml b/ansible/uploadFAQs.yml index 88d17aba81..a7523d2616 100644 --- a/ansible/uploadFAQs.yml +++ b/ansible/uploadFAQs.yml @@ -57,6 +57,17 @@ with_items: - "{{ source_folder.split(',') }}" when: cloud_service_provider == "gcloud" + + - name: upload batch of files to oci oss + include_role: + name: oci-cloud-storage + tasks_from: upload-folder.yml + vars: + oss_bucket_name: "{{ upload_storage }}" + local_file_or_folder_path: "{{ playbook_dir }}/../utils/{{ item }}" + with_items: + - "{{ source_folder.split(',') }}" + when: cloud_service_provider == "oci" tags: - upload-faqs - upload-RC-schema From c8383332c7a0ec68bd0412fdcf6b0e9f9f830f86 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Thu, 29 Dec 2022 11:40:18 +1100 Subject: [PATCH 176/616] added a temp debug Signed-off-by: Deepak Devadathan --- ansible/roles/oci-cloud-storage/tasks/upload-folder.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ansible/roles/oci-cloud-storage/tasks/upload-folder.yml b/ansible/roles/oci-cloud-storage/tasks/upload-folder.yml index acb8841ff5..9970deb02a 100644 --- a/ansible/roles/oci-cloud-storage/tasks/upload-folder.yml +++ b/ansible/roles/oci-cloud-storage/tasks/upload-folder.yml @@ -2,6 +2,11 @@ - name: Ensure oci oss bucket exists command: oci os bucket get --name {{ oss_bucket_name }} +- name: Temporary Debug + debug: + msg: | + "{{oss_path}}" + - name: Upload folder to oci oss bucket command: oci os object bulk-upload -bn {{ oss_bucket_name }} --src-dir {{ local_file_or_folder_path }} --content-type auto async: 3600 From 5ee705137b578feed825a206a0ec7b9ea80b8df2 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Thu, 29 Dec 2022 11:47:38 +1100 Subject: [PATCH 177/616] testing bulk upload logic Signed-off-by: Deepak Devadathan --- ansible/roles/oci-cloud-storage/tasks/upload-folder.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/roles/oci-cloud-storage/tasks/upload-folder.yml b/ansible/roles/oci-cloud-storage/tasks/upload-folder.yml index 9970deb02a..23bc91f0b4 100644 --- a/ansible/roles/oci-cloud-storage/tasks/upload-folder.yml +++ b/ansible/roles/oci-cloud-storage/tasks/upload-folder.yml @@ -11,10 +11,10 @@ command: oci os object bulk-upload -bn {{ oss_bucket_name }} --src-dir {{ local_file_or_folder_path }} --content-type auto async: 3600 poll: 10 - when: oss_path is undefined + when: (oss_path is undefined) and (oss_path|length == 0) - name: Upload folder to oci oss bucket command: oci os object bulk-upload -bn {{ oss_bucket_name }} --prefix {{oss_path}} --src-dir {{ local_file_or_folder_path }} --content-type auto async: 3600 poll: 10 - when: oss_path is defined + when: (oss_path is defined) and (oss_path|length > 0) From e2f3c3ae5f76d05c204433ddbfa59356e53e96c3 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Thu, 29 Dec 2022 11:50:31 +1100 Subject: [PATCH 178/616] testing oss_path variable Signed-off-by: Deepak Devadathan --- ansible/roles/oci-cloud-storage/tasks/upload-folder.yml | 4 ++-- ansible/uploadFAQs.yml | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/ansible/roles/oci-cloud-storage/tasks/upload-folder.yml b/ansible/roles/oci-cloud-storage/tasks/upload-folder.yml index 23bc91f0b4..3fba5366b0 100644 --- a/ansible/roles/oci-cloud-storage/tasks/upload-folder.yml +++ b/ansible/roles/oci-cloud-storage/tasks/upload-folder.yml @@ -11,10 +11,10 @@ command: oci os object bulk-upload -bn {{ oss_bucket_name }} --src-dir {{ local_file_or_folder_path }} --content-type auto async: 3600 poll: 10 - when: (oss_path is undefined) and (oss_path|length == 0) + when: oss_path|length == 0 - name: Upload folder to oci oss bucket command: oci os object bulk-upload -bn {{ oss_bucket_name }} --prefix {{oss_path}} --src-dir {{ local_file_or_folder_path }} --content-type auto async: 3600 poll: 10 - when: (oss_path is defined) and (oss_path|length > 0) + when: oss_path|length > 0 diff --git a/ansible/uploadFAQs.yml b/ansible/uploadFAQs.yml index a7523d2616..144cdc718a 100644 --- a/ansible/uploadFAQs.yml +++ b/ansible/uploadFAQs.yml @@ -64,6 +64,7 @@ tasks_from: upload-folder.yml vars: oss_bucket_name: "{{ upload_storage }}" + oss_path: "" local_file_or_folder_path: "{{ playbook_dir }}/../utils/{{ item }}" with_items: - "{{ source_folder.split(',') }}" From 5598a31f18ad44069d568f7137e14a28f4f46fcd Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Thu, 29 Dec 2022 11:52:55 +1100 Subject: [PATCH 179/616] testing with a defined prefix Signed-off-by: Deepak Devadathan --- ansible/uploadFAQs.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/uploadFAQs.yml b/ansible/uploadFAQs.yml index 144cdc718a..f8f1ffd6e9 100644 --- a/ansible/uploadFAQs.yml +++ b/ansible/uploadFAQs.yml @@ -64,7 +64,7 @@ tasks_from: upload-folder.yml vars: oss_bucket_name: "{{ upload_storage }}" - oss_path: "" + oss_path: "temp/" local_file_or_folder_path: "{{ playbook_dir }}/../utils/{{ item }}" with_items: - "{{ source_folder.split(',') }}" From 9fdf0f3f7bfb9417fa9ffff61d79fd6816fca966 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Thu, 29 Dec 2022 11:55:00 +1100 Subject: [PATCH 180/616] removed debug line and oss_path value Signed-off-by: Deepak Devadathan --- ansible/roles/oci-cloud-storage/tasks/upload-folder.yml | 5 ----- ansible/uploadFAQs.yml | 2 +- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/ansible/roles/oci-cloud-storage/tasks/upload-folder.yml b/ansible/roles/oci-cloud-storage/tasks/upload-folder.yml index 3fba5366b0..838e2ea84e 100644 --- a/ansible/roles/oci-cloud-storage/tasks/upload-folder.yml +++ b/ansible/roles/oci-cloud-storage/tasks/upload-folder.yml @@ -2,11 +2,6 @@ - name: Ensure oci oss bucket exists command: oci os bucket get --name {{ oss_bucket_name }} -- name: Temporary Debug - debug: - msg: | - "{{oss_path}}" - - name: Upload folder to oci oss bucket command: oci os object bulk-upload -bn {{ oss_bucket_name }} --src-dir {{ local_file_or_folder_path }} --content-type auto async: 3600 diff --git a/ansible/uploadFAQs.yml b/ansible/uploadFAQs.yml index f8f1ffd6e9..144cdc718a 100644 --- a/ansible/uploadFAQs.yml +++ b/ansible/uploadFAQs.yml @@ -64,7 +64,7 @@ tasks_from: upload-folder.yml vars: oss_bucket_name: "{{ upload_storage }}" - oss_path: "temp/" + oss_path: "" local_file_or_folder_path: "{{ playbook_dir }}/../utils/{{ item }}" with_items: - "{{ source_folder.split(',') }}" From 7f4477edc0b5db1eb0b58d8fdecece94371d762a Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Thu, 29 Dec 2022 11:58:04 +1100 Subject: [PATCH 181/616] added overwrite flag for bulk upload Signed-off-by: Deepak Devadathan --- ansible/roles/oci-cloud-storage/tasks/upload-folder.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/roles/oci-cloud-storage/tasks/upload-folder.yml b/ansible/roles/oci-cloud-storage/tasks/upload-folder.yml index 838e2ea84e..2e0d45bcb7 100644 --- a/ansible/roles/oci-cloud-storage/tasks/upload-folder.yml +++ b/ansible/roles/oci-cloud-storage/tasks/upload-folder.yml @@ -3,13 +3,13 @@ command: oci os bucket get --name {{ oss_bucket_name }} - name: Upload folder to oci oss bucket - command: oci os object bulk-upload -bn {{ oss_bucket_name }} --src-dir {{ local_file_or_folder_path }} --content-type auto + command: oci os object bulk-upload -bn {{ oss_bucket_name }} --src-dir {{ local_file_or_folder_path }} --content-type auto --overwrite async: 3600 poll: 10 when: oss_path|length == 0 - name: Upload folder to oci oss bucket - command: oci os object bulk-upload -bn {{ oss_bucket_name }} --prefix {{oss_path}} --src-dir {{ local_file_or_folder_path }} --content-type auto + command: oci os object bulk-upload -bn {{ oss_bucket_name }} --prefix {{oss_path}} --src-dir {{ local_file_or_folder_path }} --content-type auto --overwrite async: 3600 poll: 10 when: oss_path|length > 0 From bfe702dc89a698a5a58664b434f9f84da2ff9682 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Thu, 29 Dec 2022 12:04:29 +1100 Subject: [PATCH 182/616] oci oss changes for uploadschema Signed-off-by: Deepak Devadathan --- ansible/kp_upload-schema.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/ansible/kp_upload-schema.yml b/ansible/kp_upload-schema.yml index 2b09dac310..19996b5a66 100644 --- a/ansible/kp_upload-schema.yml +++ b/ansible/kp_upload-schema.yml @@ -45,3 +45,13 @@ gcp_path: "{{ schemas/local" local_file_or_folder_path: "{{ source_name }}" when: cloud_service_provider == "gcloud" + + - name: upload batch of files to oci oss + include_role: + name: oci-cloud-storage + tasks_from: upload-folder.yml + vars: + oss_bucket_name: "{{ plugin_storage }}" + local_file_or_folder_path: "{{ source_name }}" + oss_path: "schemas/local/" + when: cloud_service_provider == "aws" \ No newline at end of file From e1a49ce625406373e7f0b372fd0022215dc5d83b Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Thu, 29 Dec 2022 12:06:29 +1100 Subject: [PATCH 183/616] typo in the csp switch Signed-off-by: Deepak Devadathan --- ansible/kp_upload-schema.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/kp_upload-schema.yml b/ansible/kp_upload-schema.yml index 19996b5a66..3a38f3d09c 100644 --- a/ansible/kp_upload-schema.yml +++ b/ansible/kp_upload-schema.yml @@ -54,4 +54,4 @@ oss_bucket_name: "{{ plugin_storage }}" local_file_or_folder_path: "{{ source_name }}" oss_path: "schemas/local/" - when: cloud_service_provider == "aws" \ No newline at end of file + when: cloud_service_provider == "oci" \ No newline at end of file From 154ced45ddc28ec6e872f3d7cfe18dc9f90f9e08 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Thu, 29 Dec 2022 15:25:43 +1100 Subject: [PATCH 184/616] oci oss changes for plugins upload Signed-off-by: Deepak Devadathan --- ansible/deploy-plugins.yml | 66 +++++++++++++++++++ .../tasks/oss-delete-batch-no-poll.yml | 5 ++ .../tasks/oss-upload-batch-no-poll.yml | 5 ++ 3 files changed, 76 insertions(+) create mode 100644 ansible/roles/oci-cloud-storage/tasks/oss-delete-batch-no-poll.yml create mode 100644 ansible/roles/oci-cloud-storage/tasks/oss-upload-batch-no-poll.yml diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index ae7f21637a..190cd89c0f 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -228,3 +228,69 @@ tags: - plugins when: cloud_service_provider == "aws" + + +################################### OCI tasks ######################### + - name: this block consists of tasks related to oci oss + block: + - name: set common oci variables + set_fact: + oss_bucket_name: "{{ oci_content_oss_bucket_name }}" + oss_namespace: "{{ oci_namespace }}" + tags: + - always + + - block: + - name: delete files and folders from oci oss + include_role: + name: oci-cloud-storage + tasks_from: delete-folder.yml + vars: + oss_path: "{{ folder_name }}/" + tags: + - content-editor + - collection-editor + - generic-editor + - preview + + - block: + - name: upload folder to oci oss + include_role: + name: oci-cloud-storage + tasks_from: upload-folder.yml + vars: + oss_path: "{{ folder_name }}/" + local_file_or_folder_path: "{{ source_name }}" + tags: + - content-editor + - collection-editor + - generic-editor + - preview + - editor + - core-plugins + + - block: + - name: upload file to oci oss + include_role: + name: oci-cloud-storage + tasks_from: upload.yml + vars: + oss_path: "artefacts/content-player/content-player-{{ player_version_number }}.zip" + local_file_or_folder_path: "{{ source_file_name }}" + tags: + - preview + + - block: + - name: delete and re-upload plugins + include_role: + name: oci-cloud-storage + tasks_from: "{{ item[0] }}" + vars: + object_prefix: "content-plugins/{{ item[1] }}/" + local_file_or_folder_path: "{{ source_folder }}/{{ item[1] }}" + with_nested: + - ['blob-delete-batch-no-poll.yml', 'blob-upload-batch-no-poll.yml'] + - "{{ lookup('file', plugins_to_delete_and_upload).split('\n') }}" + tags: + - plugins + when: cloud_service_provider == "oci" diff --git a/ansible/roles/oci-cloud-storage/tasks/oss-delete-batch-no-poll.yml b/ansible/roles/oci-cloud-storage/tasks/oss-delete-batch-no-poll.yml new file mode 100644 index 0000000000..8ad3f257e3 --- /dev/null +++ b/ansible/roles/oci-cloud-storage/tasks/oss-delete-batch-no-poll.yml @@ -0,0 +1,5 @@ +--- +- name: delete files and folders - deleting {{ oss_bucket_name }}/{{ object_prefix }} + shell: oci os object bulk-delete -bn {{oss_bucket_name}} --prefix {{object_prefix}} --force + async: 1800 + poll: 0 \ No newline at end of file diff --git a/ansible/roles/oci-cloud-storage/tasks/oss-upload-batch-no-poll.yml b/ansible/roles/oci-cloud-storage/tasks/oss-upload-batch-no-poll.yml new file mode 100644 index 0000000000..6d01756dfe --- /dev/null +++ b/ansible/roles/oci-cloud-storage/tasks/oss-upload-batch-no-poll.yml @@ -0,0 +1,5 @@ +--- +- name: upload files and folders - uploading {{ oss_bucket_name }}/{{ object_prefix }} + shell: oci os object bulk-upload -bn {{oss_bucket_name}} --prefix {{object_prefix}} --src-dir {{local_file_or_folder_path}} --overwrite + async: 1800 + poll: 0 \ No newline at end of file From e9978e75870c46a5eb7f33071eb1d9d1d657f949 Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Thu, 29 Dec 2022 11:28:00 +0530 Subject: [PATCH 185/616] Release 5.1.0 - CSP changes (#3692) * Update management bucketname for ES * Update ES snapshot roles --- ansible/roles/es-azure-snapshot/defaults/main.yml | 12 +++--------- ansible/roles/es-gcs-snapshot/defaults/main.yml | 9 ++++++--- ansible/roles/es-s3-snapshot/defaults/main.yml | 9 ++++++--- ansible/roles/es6/tasks/plugins/repository-gcs.yml | 4 ++-- ansible/roles/es6/tasks/plugins/repository-s3.yml | 4 ++-- .../roles/log-es6/tasks/plugins/repository-gcs.yml | 4 ++-- .../roles/log-es6/tasks/plugins/repository-s3.yml | 4 ++-- 7 files changed, 23 insertions(+), 23 deletions(-) diff --git a/ansible/roles/es-azure-snapshot/defaults/main.yml b/ansible/roles/es-azure-snapshot/defaults/main.yml index 396746aa32..df52870977 100644 --- a/ansible/roles/es-azure-snapshot/defaults/main.yml +++ b/ansible/roles/es-azure-snapshot/defaults/main.yml @@ -1,7 +1,7 @@ snapshot_create_request_body: { type: azure, settings: { - container: "{{ es_backup_storage }}", + container: "{{ cloud_storage_esbackup_foldername }}", base_path: "{{ snapshot_base_path }}_{{ base_path_date }}" } } @@ -10,11 +10,5 @@ snapshot_create_request_body: { es_snapshot_host: "localhost" snapshot_base_path: "default" -es_azure_backup_container_name: "elasticsearch-snapshots" - -# This variable is added for the below reason - -# 1. Introduce a common variable for various clouds. In case of azure, it refers to container name, in case of aws / gcp, it refers to folder name -# 2. We want to avoid too many new variable introduction / replacement in first phase. Hence we will reuse the existing variable defined in private repo -# or other default files and just assign the value to the newly introduced common variable -# 3. After few releases, we will remove the older variables and use only the new variables across the repos -es_backup_storage: "{{ es_azure_backup_container_name }}" \ No newline at end of file +cloud_storage_esbackup_bucketname: "{{ cloud_storage_management_bucketname }}" +cloud_storage_esbackup_foldername: "elasticsearch-snapshots" diff --git a/ansible/roles/es-gcs-snapshot/defaults/main.yml b/ansible/roles/es-gcs-snapshot/defaults/main.yml index 5e3cbece6f..23fa7c5ef1 100644 --- a/ansible/roles/es-gcs-snapshot/defaults/main.yml +++ b/ansible/roles/es-gcs-snapshot/defaults/main.yml @@ -1,12 +1,15 @@ snapshot_create_request_body: { type: gcs, settings: { - bucket: "{{ gcs_management_bucket_name }}", - base_path: "{{ es_backup_storage }}/{{ snapshot_base_path }}_{{ base_path_date }}" + bucket: "{{ cloud_storage_management_bucketname }}", + base_path: "{{ cloud_storage_esbackup_foldername }}/{{ snapshot_base_path }}_{{ base_path_date }}" } } # Override these values es_snapshot_host: "localhost" snapshot_base_path: "default" -es_backup_storage: "elasticsearch-snapshots" \ No newline at end of file +es_backup_storage: "elasticsearch-snapshots" + +cloud_storage_esbackup_bucketname: "{{ cloud_storage_management_bucketname }}" +cloud_storage_esbackup_foldername: "elasticsearch-snapshots" diff --git a/ansible/roles/es-s3-snapshot/defaults/main.yml b/ansible/roles/es-s3-snapshot/defaults/main.yml index 7ddda6ebd0..3a55471ccf 100644 --- a/ansible/roles/es-s3-snapshot/defaults/main.yml +++ b/ansible/roles/es-s3-snapshot/defaults/main.yml @@ -1,12 +1,15 @@ snapshot_create_request_body: { type: s3, settings: { - bucket: "{{ aws_management_bucket_name }}", - base_path: "{{ es_backup_storage }}/{{ snapshot_base_path }}_{{ base_path_date }}" + bucket: "{{ cloud_storage_esbackup_bucketname }}", + base_path: "{{ cloud_storage_esbackup_foldername }}/{{ snapshot_base_path }}_{{ base_path_date }}" } } # Override these values es_snapshot_host: "localhost" snapshot_base_path: "default" -es_backup_storage: "elasticsearch-snapshots" \ No newline at end of file +es_backup_storage: "elasticsearch-snapshots" + +cloud_storage_esbackup_bucketname: "{{ cloud_storage_management_bucketname }}" +cloud_storage_esbackup_foldername: "elasticsearch-snapshots" diff --git a/ansible/roles/es6/tasks/plugins/repository-gcs.yml b/ansible/roles/es6/tasks/plugins/repository-gcs.yml index 7d5c32e52e..6a32c0051a 100644 --- a/ansible/roles/es6/tasks/plugins/repository-gcs.yml +++ b/ansible/roles/es6/tasks/plugins/repository-gcs.yml @@ -3,7 +3,7 @@ become: yes copy: dest: "{{ conf_dir }}/gcs_management_bucket_service_account.json" - content: "{{ gcs_management_bucket_service_account }}" + content: "{{ cloud_management_storage_secret }}" - name: Add gcs service account file to keystore become: yes @@ -15,4 +15,4 @@ - name: Remove the service account file file: path: "{{ conf_dir }}/gcs_management_bucket_service_account.json" - state: absent \ No newline at end of file + state: absent diff --git a/ansible/roles/es6/tasks/plugins/repository-s3.yml b/ansible/roles/es6/tasks/plugins/repository-s3.yml index b5897792ab..07655d6746 100644 --- a/ansible/roles/es6/tasks/plugins/repository-s3.yml +++ b/ansible/roles/es6/tasks/plugins/repository-s3.yml @@ -1,14 +1,14 @@ --- - name: Add default aws account name for backups become: yes - shell: echo "{{ aws_management_bucket_user_access_key }}" | {{ es_home }}/bin/elasticsearch-keystore add -f s3.client.default.access_key + shell: echo "{{ cloud_management_storage_accountname }}" | {{ es_home }}/bin/elasticsearch-keystore add -f s3.client.default.access_key no_log: True environment: ES_PATH_CONF: "{{ conf_dir }}" - name: Add default aws account key for backups become: yes - shell: echo "{{ aws_management_bucket_user_secret_key }}" | {{ es_home }}/bin/elasticsearch-keystore add -f s3.client.default.secret_key + shell: echo "{{ cloud_management_storage_secret }}" | {{ es_home }}/bin/elasticsearch-keystore add -f s3.client.default.secret_key no_log: True environment: ES_PATH_CONF: "{{ conf_dir }}" diff --git a/ansible/roles/log-es6/tasks/plugins/repository-gcs.yml b/ansible/roles/log-es6/tasks/plugins/repository-gcs.yml index 81078e173d..7d1c1fbd4a 100644 --- a/ansible/roles/log-es6/tasks/plugins/repository-gcs.yml +++ b/ansible/roles/log-es6/tasks/plugins/repository-gcs.yml @@ -3,7 +3,7 @@ become: yes copy: dest: "{{ es_conf_dir }}/gcs_management_bucket_service_account.json" - content: "{{ gcs_management_bucket_service_account }}" + content: "{{ cloud_management_storage_secret }}" - name: Add gcs service account file to keystore become: yes @@ -15,4 +15,4 @@ - name: Remove the service account file file: path: "{{ es_conf_dir }}/gcs_management_bucket_service_account.json" - state: absent \ No newline at end of file + state: absent diff --git a/ansible/roles/log-es6/tasks/plugins/repository-s3.yml b/ansible/roles/log-es6/tasks/plugins/repository-s3.yml index 344af29e6e..2c05927255 100644 --- a/ansible/roles/log-es6/tasks/plugins/repository-s3.yml +++ b/ansible/roles/log-es6/tasks/plugins/repository-s3.yml @@ -1,14 +1,14 @@ --- - name: Add default aws account name for backups become: yes - shell: echo "{{ aws_management_bucket_user_access_key }}" | {{ es_home }}/bin/elasticsearch-keystore add -f s3.client.default.access_key + shell: echo "{{ cloud_management_storage_accountname }}" | {{ es_home }}/bin/elasticsearch-keystore add -f s3.client.default.access_key no_log: True environment: ES_PATH_CONF: "{{ es_conf_dir }}" - name: Add default aws account key for backups become: yes - shell: echo "{{ aws_management_bucket_user_secret_key }}" | {{ es_home }}/bin/elasticsearch-keystore add -f s3.client.default.secret_key + shell: echo "{{ cloud_management_storage_secret }}" | {{ es_home }}/bin/elasticsearch-keystore add -f s3.client.default.secret_key no_log: True environment: ES_PATH_CONF: "{{ es_conf_dir }}" From 13dfc4709969435a7e735048f098f8e23b2f98bc Mon Sep 17 00:00:00 2001 From: PrasadMoka Date: Thu, 29 Dec 2022 11:27:06 +0530 Subject: [PATCH 186/616] ED-621: added placeholder variable --- ansible/inventory/env/group_vars/all.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index 8dfdd8a43d..d86c71d018 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -133,6 +133,7 @@ cassandra_backup_dir: /data/cassandra/backup cassandra_multi_dc_enabled: false # Release-5.0.1 cloud_storage_base_url: "{{cloud_storage_base_url}}" +cloud_store_base_path_placeholder: "{{ cloud_store_base_path_placeholder }}" keycloak_realm: sunbird sunbird_content_player_url: "http://kong:8000/" From bf68b0d50a4383d90f4ad0b0c1c526eabda8f2bf Mon Sep 17 00:00:00 2001 From: PrasadMoka Date: Thu, 29 Dec 2022 11:41:08 +0530 Subject: [PATCH 187/616] removed variable --- ansible/inventory/env/group_vars/all.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index d86c71d018..8dfdd8a43d 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -133,7 +133,6 @@ cassandra_backup_dir: /data/cassandra/backup cassandra_multi_dc_enabled: false # Release-5.0.1 cloud_storage_base_url: "{{cloud_storage_base_url}}" -cloud_store_base_path_placeholder: "{{ cloud_store_base_path_placeholder }}" keycloak_realm: sunbird sunbird_content_player_url: "http://kong:8000/" From b932f9d54735ce71f74add78388143e0e936d901 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Thu, 29 Dec 2022 21:30:59 +1100 Subject: [PATCH 188/616] testing by adding privileged: true to init container Signed-off-by: Deepak Devadathan --- kubernetes/helm_charts/core/analytics/templates/deployment.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/kubernetes/helm_charts/core/analytics/templates/deployment.yaml b/kubernetes/helm_charts/core/analytics/templates/deployment.yaml index 0926360f76..20d05a3794 100644 --- a/kubernetes/helm_charts/core/analytics/templates/deployment.yaml +++ b/kubernetes/helm_charts/core/analytics/templates/deployment.yaml @@ -131,6 +131,7 @@ spec: - NET_ADMIN runAsNonRoot: false runAsUser: 0 + privileged: true {{- end }} volumes: - name: {{ .Chart.Name }}-config From 45ad9f6b7bf3d01720d0eb55b6147180d1ed5658 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Thu, 29 Dec 2022 21:33:07 +1100 Subject: [PATCH 189/616] removed privileged=true..as it didnt help the deployment Signed-off-by: Deepak Devadathan --- kubernetes/helm_charts/core/analytics/templates/deployment.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/kubernetes/helm_charts/core/analytics/templates/deployment.yaml b/kubernetes/helm_charts/core/analytics/templates/deployment.yaml index 20d05a3794..0926360f76 100644 --- a/kubernetes/helm_charts/core/analytics/templates/deployment.yaml +++ b/kubernetes/helm_charts/core/analytics/templates/deployment.yaml @@ -131,7 +131,6 @@ spec: - NET_ADMIN runAsNonRoot: false runAsUser: 0 - privileged: true {{- end }} volumes: - name: {{ .Chart.Name }}-config From fd9a8f4be0fa8552ff2036b419b95131c4595d61 Mon Sep 17 00:00:00 2001 From: PrasadMoka Date: Thu, 29 Dec 2022 18:20:21 +0530 Subject: [PATCH 190/616] ED-621: added placeholder variable --- ansible/inventory/env/group_vars/all.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index 8dfdd8a43d..153be0f813 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -133,6 +133,7 @@ cassandra_backup_dir: /data/cassandra/backup cassandra_multi_dc_enabled: false # Release-5.0.1 cloud_storage_base_url: "{{cloud_storage_base_url}}" +cloud_store_base_path_placeholder: "$CLOUD_BASE_PATH" keycloak_realm: sunbird sunbird_content_player_url: "http://kong:8000/" From 891b73c7b3c5ee97ec8e93502d8eda804ce746f0 Mon Sep 17 00:00:00 2001 From: anilgupta Date: Thu, 29 Dec 2022 18:35:42 +0530 Subject: [PATCH 191/616] Issue #KN-439 chore: Added the transcripts in cloudstorage_metadata_list. --- ansible/roles/stack-sunbird/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/stack-sunbird/defaults/main.yml b/ansible/roles/stack-sunbird/defaults/main.yml index 036fda51bd..65268e7477 100644 --- a/ansible/roles/stack-sunbird/defaults/main.yml +++ b/ansible/roles/stack-sunbird/defaults/main.yml @@ -1052,7 +1052,7 @@ kong_desktop_device_consumer_names_for_opa: '["desktop"]' cloudstorage_relative_path_prefix_content: "CONTENT_STORAGE_BASE_PATH" cloudstorage_relative_path_prefix_dial: "DIAL_STORAGE_BASE_PATH" -cloudstorage_metadata_list: '["appIcon", "artifactUrl", "posterImage", "previewUrl", "thumbnail", "assetsMap", "certTemplate", "itemSetPreviewUrl", "grayScaleAppIcon", "sourceURL", "variants", "downloadUrl", "streamingUrl", "toc_url", "data", "question", "solutions", "editorState", "media", "pdfUrl"]' +cloudstorage_metadata_list: '["appIcon", "artifactUrl", "posterImage", "previewUrl", "thumbnail", "assetsMap", "certTemplate", "itemSetPreviewUrl", "grayScaleAppIcon", "sourceURL", "variants", "downloadUrl", "streamingUrl", "toc_url", "data", "question", "solutions", "editorState", "media", "pdfUrl", "transcripts"]' ### inQuiry assessment service default values inquiry_schema_path: "{{ kp_schema_base_path }}" From f8bb1c0de25196580304843b0807fa62774e541e Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 30 Dec 2022 10:41:56 +1100 Subject: [PATCH 192/616] deployed image check for oci Signed-off-by: Deepak Devadathan --- kubernetes/ansible/roles/deploy-player/tasks/main.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/kubernetes/ansible/roles/deploy-player/tasks/main.yml b/kubernetes/ansible/roles/deploy-player/tasks/main.yml index 52500df2e3..3fd939d485 100644 --- a/kubernetes/ansible/roles/deploy-player/tasks/main.yml +++ b/kubernetes/ansible/roles/deploy-player/tasks/main.yml @@ -76,6 +76,12 @@ - name: Get deployed image name shell: kubectl get deployments.apps {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[0].image | split("/")[1]' register: deployed_image + when: cloud_service_provider != "oci" + +- name: Get deployed image name + shell: kubectl get deployments.apps {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[0].image | split("/")[3]' + register: deployed_image + when: cloud_service_provider == "oci" - set_fact: metadata_image: "{{ image_name }}:{{ image_tag }}" From 5ced31640371f397a32e7482d5e127e53032c9f4 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 30 Dec 2022 14:00:41 +1100 Subject: [PATCH 193/616] added the switch for selecting api version Signed-off-by: Deepak Devadathan --- .../logging/fluent-bit/templates/serviceaccount.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/kubernetes/helm_charts/logging/fluent-bit/templates/serviceaccount.yaml b/kubernetes/helm_charts/logging/fluent-bit/templates/serviceaccount.yaml index f162d2bc28..83329448f6 100644 --- a/kubernetes/helm_charts/logging/fluent-bit/templates/serviceaccount.yaml +++ b/kubernetes/helm_charts/logging/fluent-bit/templates/serviceaccount.yaml @@ -4,7 +4,11 @@ metadata: name: fluent-bit namespace: {{ default .Values.namespace .Release.Namespace }} --- +{{- if (.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1") }} +apiVersion: rbac.authorization.k8s.io/v1 +{{ else }} apiVersion: rbac.authorization.k8s.io/v1beta1 +{{- end }} kind: ClusterRole metadata: name: fluent-bit-read @@ -15,7 +19,11 @@ rules: - pods verbs: ["get", "list", "watch"] --- +{{- if (.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1") }} +apiVersion: rbac.authorization.k8s.io/v1 +{{ else }} apiVersion: rbac.authorization.k8s.io/v1beta1 +{{- end }} kind: ClusterRoleBinding metadata: name: fluent-bit-read From b87026bcdab282772a3ec179c8563349cd61b106 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 30 Dec 2022 15:21:15 +1100 Subject: [PATCH 194/616] corrected the right task file for plugin del and upl for oci Signed-off-by: Deepak Devadathan --- ansible/deploy-plugins.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index 190cd89c0f..212c7d546c 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -281,7 +281,7 @@ - preview - block: - - name: delete and re-upload plugins + - name: delete and re-upload plugins for oci include_role: name: oci-cloud-storage tasks_from: "{{ item[0] }}" @@ -289,7 +289,7 @@ object_prefix: "content-plugins/{{ item[1] }}/" local_file_or_folder_path: "{{ source_folder }}/{{ item[1] }}" with_nested: - - ['blob-delete-batch-no-poll.yml', 'blob-upload-batch-no-poll.yml'] + - ['oss-delete-batch-no-poll.yml', 'oss-upload-batch-no-poll.yml'] - "{{ lookup('file', plugins_to_delete_and_upload).split('\n') }}" tags: - plugins From 389013a3ff93736d670486ddc8df1f198fb5c1fa Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Fri, 30 Dec 2022 09:58:27 +0530 Subject: [PATCH 195/616] Add gcp vars for service accounts (#3696) --- ansible/artifacts-download.yml | 2 ++ ansible/artifacts-upload.yml | 2 ++ ansible/assets-upload.yml | 2 ++ ansible/deploy-plugins.yml | 2 ++ ansible/desktop-faq-upload.yml | 10 ++++++++++ ansible/dial_upload-schema.yml | 2 ++ ansible/kp_upload-schema.yml | 2 ++ ansible/roles/cassandra-backup/tasks/main.yml | 2 ++ ansible/roles/cassandra-restore/tasks/main.yml | 2 ++ ansible/roles/cert-templates/tasks/main.yml | 2 ++ ansible/roles/desktop-deploy/tasks/main.yml | 2 ++ ansible/roles/gcp-cloud-storage/defaults/main.yml | 5 +++++ ansible/roles/grafana-backup/tasks/main.yml | 2 ++ ansible/roles/jenkins-backup-upload/tasks/main.yml | 2 ++ ansible/roles/mongodb-backup/tasks/main.yml | 2 ++ .../postgres-managed-service-backup/tasks/main.yml | 2 ++ .../postgres-managed-service-restore/tasks/main.yml | 2 ++ ansible/roles/postgresql-backup/tasks/main.yml | 2 ++ ansible/roles/postgresql-restore/tasks/main.yml | 2 ++ ansible/roles/prometheus-backup-v2/tasks/main.yml | 2 ++ ansible/roles/prometheus-backup/tasks/main.yml | 2 ++ ansible/roles/prometheus-restore/tasks/main.yml | 2 ++ ansible/roles/redis-backup/tasks/main.yml | 2 ++ ansible/uploadFAQs.yml | 2 ++ 24 files changed, 59 insertions(+) diff --git a/ansible/artifacts-download.yml b/ansible/artifacts-download.yml index 46167180e4..2fc2748229 100644 --- a/ansible/artifacts-download.yml +++ b/ansible/artifacts-download.yml @@ -21,6 +21,8 @@ name: gcp-cloud-storage tasks_from: download.yml vars: + gcp_storage_service_account_name: "{{ cloud_artifact_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_artifact_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_artifacts_bucketname }}" gcp_path: "{{ artifact }}" local_file_or_folder_path: "{{ artifact_path }}" diff --git a/ansible/artifacts-upload.yml b/ansible/artifacts-upload.yml index 3bdbe73017..305492afc2 100644 --- a/ansible/artifacts-upload.yml +++ b/ansible/artifacts-upload.yml @@ -22,6 +22,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: + gcp_storage_service_account_name: "{{ cloud_artifact_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_artifact_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_artifacts_bucketname }}" gcp_path: "{{ artifact }}" local_file_or_folder_path: "{{ artifact_path }}" diff --git a/ansible/assets-upload.yml b/ansible/assets-upload.yml index 09e7df6ceb..b8d5836cc6 100644 --- a/ansible/assets-upload.yml +++ b/ansible/assets-upload.yml @@ -56,6 +56,8 @@ block: - name: set common gcloud variables set_fact: + gcp_storage_service_account_name: "{{ cloud_public_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_public_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_playercdn_bucketname }}" gcp_path: "" file_delete_pattern: "" diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index a78ce1c640..5774a12454 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -141,6 +141,8 @@ name: gcp-cloud-storage tasks_from: "{{ item[0] }}" vars: + gcp_storage_service_account_name: "{{ cloud_public_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_public_storage_secret }}" file_delete_pattern: "content-plugins/{{ item[1] }}/*" gcp_path: "content-plugins/{{ item[1] }}" local_file_or_folder_path: "{{ source_folder }}/{{ item[1] }}" diff --git a/ansible/desktop-faq-upload.yml b/ansible/desktop-faq-upload.yml index 3683202043..a95e8828e7 100644 --- a/ansible/desktop-faq-upload.yml +++ b/ansible/desktop-faq-upload.yml @@ -95,6 +95,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: + gcp_storage_service_account_name: "{{ cloud_public_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_public_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_public_bucketname }}" tags: - upload-desktop-faq @@ -105,6 +107,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: + gcp_storage_service_account_name: "{{ cloud_private_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_private_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_label_bucketname }}" tags: - upload-label @@ -115,6 +119,8 @@ name: gcp-cloud-storage tasks_from: upload-batch.yml vars: + gcp_storage_service_account_name: "{{ cloud_public_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_public_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_chatbot_bucketname }}" tags: - upload-chatbot-config @@ -125,6 +131,8 @@ name: gcp-cloud-storage tasks_from: upload-batch.yml vars: + gcp_storage_service_account_name: "{{ cloud_public_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_public_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_sourcing_bucketname }}" tags: - upload-csv-template @@ -135,6 +143,8 @@ name: gcp-cloud-storage tasks_from: upload-batch.yml vars: + gcp_storage_service_account_name: "{{ cloud_public_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_public_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_discussionui_bucketname }}" tags: - upload-discussion-ui diff --git a/ansible/dial_upload-schema.yml b/ansible/dial_upload-schema.yml index 757a80f6e5..75609bde68 100644 --- a/ansible/dial_upload-schema.yml +++ b/ansible/dial_upload-schema.yml @@ -50,6 +50,8 @@ name: gcp-cloud-storage tasks_from: upload-batch.yml vars: + gcp_storage_service_account_name: "{{ cloud_public_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_public_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_dial_bucketname }}" gcp_path: "schemas/local" local_file_or_folder_path: "dial_schema_template_files" diff --git a/ansible/kp_upload-schema.yml b/ansible/kp_upload-schema.yml index d12b74433d..0f029d7cbd 100644 --- a/ansible/kp_upload-schema.yml +++ b/ansible/kp_upload-schema.yml @@ -34,6 +34,8 @@ name: gcp-cloud-storage tasks_from: upload-batch.yml vars: + gcp_storage_service_account_name: "{{ cloud_public_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_public_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_content_bucketname }}" gcp_path: "{{ schemas/local" local_file_or_folder_path: "{{ source_name }}" diff --git a/ansible/roles/cassandra-backup/tasks/main.yml b/ansible/roles/cassandra-backup/tasks/main.yml index 0e5ae87477..54941f8343 100755 --- a/ansible/roles/cassandra-backup/tasks/main.yml +++ b/ansible/roles/cassandra-backup/tasks/main.yml @@ -63,6 +63,8 @@ name: gcp-cloud-storage tasks_from: upload-batch.yml vars: + gcp_storage_service_account_name: "{{ cloud_management_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_management_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_cassandrabackup_bucketname }}" gcp_path: "{{ cloud_storage_cassandrabackup_foldername }}" local_file_or_folder_path: "/data/cassandra/backup/{{ cassandra_backup_folder_name }}" diff --git a/ansible/roles/cassandra-restore/tasks/main.yml b/ansible/roles/cassandra-restore/tasks/main.yml index 4bd8c05991..8d74c4c695 100755 --- a/ansible/roles/cassandra-restore/tasks/main.yml +++ b/ansible/roles/cassandra-restore/tasks/main.yml @@ -37,6 +37,8 @@ name: gcp-cloud-storage tasks_from: download.yml vars: + gcp_storage_service_account_name: "{{ cloud_management_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_management_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_cassandrabackup_bucketname }}" gcp_path: "{{ cloud_storage_cassandrabackup_foldername }}/{{ cassandra_restore_gzip_file_name }}" local_file_or_folder_path: "{{ cassandra_restore_gzip_file_path }}" diff --git a/ansible/roles/cert-templates/tasks/main.yml b/ansible/roles/cert-templates/tasks/main.yml index 0caf2b1bfe..893e0776f4 100644 --- a/ansible/roles/cert-templates/tasks/main.yml +++ b/ansible/roles/cert-templates/tasks/main.yml @@ -62,6 +62,8 @@ name: gcp-cloud-storage tasks_from: upload-batch.yml vars: + gcp_storage_service_account_name: "{{ cloud_private_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_private_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_certservice_bucketname }}" gcp_path: "" local_file_or_folder_path: "{{ cert_location }}/cert-templates/certUtilScripts/out" diff --git a/ansible/roles/desktop-deploy/tasks/main.yml b/ansible/roles/desktop-deploy/tasks/main.yml index ba077b778f..6a01f97b0c 100644 --- a/ansible/roles/desktop-deploy/tasks/main.yml +++ b/ansible/roles/desktop-deploy/tasks/main.yml @@ -108,6 +108,8 @@ block: - name: set common gcloud variables set_fact: + gcp_storage_service_account_name: "{{ cloud_public_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_public_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_offlineinstaller_bucketname }}" - name: upload batch of files to gcloud storage diff --git a/ansible/roles/gcp-cloud-storage/defaults/main.yml b/ansible/roles/gcp-cloud-storage/defaults/main.yml index b0fd847b26..a9f4247d42 100644 --- a/ansible/roles/gcp-cloud-storage/defaults/main.yml +++ b/ansible/roles/gcp-cloud-storage/defaults/main.yml @@ -1,3 +1,8 @@ +# GCP service account name +# Example - +# gcp_storage_service_account_name: test@sunbird.iam.gserviceaccount.com +gcp_storage_service_account_name: "" + # GCP bucket name # Example - # bucket_name: "sunbird-dev-public" diff --git a/ansible/roles/grafana-backup/tasks/main.yml b/ansible/roles/grafana-backup/tasks/main.yml index 90dc3526ca..a41b01c2aa 100644 --- a/ansible/roles/grafana-backup/tasks/main.yml +++ b/ansible/roles/grafana-backup/tasks/main.yml @@ -50,6 +50,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: + gcp_storage_service_account_name: "{{ cloud_management_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_management_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_grafanabackup_bucketname }}" gcp_path: "{{ cloud_storage_grafanabackup_foldername }}/{{ grafana_backup_gzip_file_name }}" local_file_or_folder_path: "{{ grafana_backup_gzip_file_path }}" diff --git a/ansible/roles/jenkins-backup-upload/tasks/main.yml b/ansible/roles/jenkins-backup-upload/tasks/main.yml index 89d8f3e29c..612557a61b 100644 --- a/ansible/roles/jenkins-backup-upload/tasks/main.yml +++ b/ansible/roles/jenkins-backup-upload/tasks/main.yml @@ -43,6 +43,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: + gcp_storage_service_account_name: "{{ cloud_management_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_management_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_jenkinsbackup_bucketname }}" gcp_path: "{{ cloud_storage_jenkinsbackup_foldername }}/{{ LATEST_BACKUP_DIR.stdout }}.zip" local_file_or_folder_path: "/tmp/{{ LATEST_BACKUP_DIR.stdout }}.zip" diff --git a/ansible/roles/mongodb-backup/tasks/main.yml b/ansible/roles/mongodb-backup/tasks/main.yml index f51216b14f..fe0aa286bd 100644 --- a/ansible/roles/mongodb-backup/tasks/main.yml +++ b/ansible/roles/mongodb-backup/tasks/main.yml @@ -45,6 +45,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: + gcp_storage_service_account_name: "{{ cloud_management_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_management_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_mongodbbackup_bucketname }}" gcp_path: "{{ cloud_storage_mongodbbackup_foldername }}/{{ mongo_backup_file_name }}.tar.gz" local_file_or_folder_path: "{{ mongo_backup_file_path }}.tar.gz" diff --git a/ansible/roles/postgres-managed-service-backup/tasks/main.yml b/ansible/roles/postgres-managed-service-backup/tasks/main.yml index ba101e2509..588b8fc5b5 100644 --- a/ansible/roles/postgres-managed-service-backup/tasks/main.yml +++ b/ansible/roles/postgres-managed-service-backup/tasks/main.yml @@ -72,6 +72,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: + gcp_storage_service_account_name: "{{ cloud_management_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_management_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_postgresqlbackup_bucketname }}" gcp_path: "{{ cloud_storage_postgresqlbackup_foldername }}/{{ postgresql_backup_gzip_file_name }}.zip" local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" diff --git a/ansible/roles/postgres-managed-service-restore/tasks/main.yml b/ansible/roles/postgres-managed-service-restore/tasks/main.yml index 58d2c53482..1b499e338f 100644 --- a/ansible/roles/postgres-managed-service-restore/tasks/main.yml +++ b/ansible/roles/postgres-managed-service-restore/tasks/main.yml @@ -39,6 +39,8 @@ name: gcp-cloud-storage tasks_from: download.yml vars: + gcp_storage_service_account_name: "{{ cloud_management_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_management_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_postgresqlbackup_bucketname }}" gcp_path: "{{ cloud_storage_postgresqlbackup_foldername }}/{{ postgres_backup_filename }}" local_file_or_folder_path: "{{ postgres_restore_dir }}/{{ postgres_backup_filepath }}" diff --git a/ansible/roles/postgresql-backup/tasks/main.yml b/ansible/roles/postgresql-backup/tasks/main.yml index 5b3303bf97..fd4da5b8cc 100644 --- a/ansible/roles/postgresql-backup/tasks/main.yml +++ b/ansible/roles/postgresql-backup/tasks/main.yml @@ -44,6 +44,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: + gcp_storage_service_account_name: "{{ cloud_management_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_management_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_postgresqlbackup_bucketname }}" gcp_path: "{{ cloud_storage_postgresqlbackup_foldername }}/{{ postgresql_backup_gzip_file_name }}" local_file_or_folder_path: "{{ postgresql_backup_gzip_file_path }}" diff --git a/ansible/roles/postgresql-restore/tasks/main.yml b/ansible/roles/postgresql-restore/tasks/main.yml index e076590f23..e57a321a29 100644 --- a/ansible/roles/postgresql-restore/tasks/main.yml +++ b/ansible/roles/postgresql-restore/tasks/main.yml @@ -34,6 +34,8 @@ name: gcp-cloud-storage tasks_from: download.yml vars: + gcp_storage_service_account_name: "{{ cloud_management_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_management_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_postgresqlbackup_bucketname }}" gcp_path: "{{ cloud_storage_postgresqlbackup_foldername }}/{{ postgresql_restore_gzip_file_name }}" local_file_or_folder_path: "{{ postgresql_restore_gzip_file_path }}" diff --git a/ansible/roles/prometheus-backup-v2/tasks/main.yml b/ansible/roles/prometheus-backup-v2/tasks/main.yml index 4a65bb6f8f..0323ed4d84 100644 --- a/ansible/roles/prometheus-backup-v2/tasks/main.yml +++ b/ansible/roles/prometheus-backup-v2/tasks/main.yml @@ -47,6 +47,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: + gcp_storage_service_account_name: "{{ cloud_management_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_management_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_prometheusbackup_bucketname }}" gcp_path: "{{ cloud_storage_prometheusbackup_foldername }}/{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" local_file_or_folder_path: "/tmp/{{ prometheus_backup_prefix }}_{{ snapshot_name }}.tar.gz" diff --git a/ansible/roles/prometheus-backup/tasks/main.yml b/ansible/roles/prometheus-backup/tasks/main.yml index 10d8e2fb3b..a665540f8a 100644 --- a/ansible/roles/prometheus-backup/tasks/main.yml +++ b/ansible/roles/prometheus-backup/tasks/main.yml @@ -56,6 +56,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: + gcp_storage_service_account_name: "{{ cloud_management_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_management_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_prometheusbackup_bucketname }}" gcp_path: "{{ cloud_storage_prometheusbackup_foldername }}/{{ prometheus_backup_gzip_file_name }}" local_file_or_folder_path: "{{ prometheus_backup_gzip_file_path }}" diff --git a/ansible/roles/prometheus-restore/tasks/main.yml b/ansible/roles/prometheus-restore/tasks/main.yml index 440b777fe4..60d9bd39bd 100644 --- a/ansible/roles/prometheus-restore/tasks/main.yml +++ b/ansible/roles/prometheus-restore/tasks/main.yml @@ -32,6 +32,8 @@ name: gcp-cloud-storage tasks_from: download.yml vars: + gcp_storage_service_account_name: "{{ cloud_management_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_management_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_prometheusbackup_bucketname }}" gcp_path: "{{ cloud_storage_prometheusbackup_foldername }}/{{ prometheus_backup_filename }}" local_file_or_folder_path: "{{ prometheus_backup_dir }}/{{ prometheus_backup_filepath }}" diff --git a/ansible/roles/redis-backup/tasks/main.yml b/ansible/roles/redis-backup/tasks/main.yml index f1cf35622f..9863fe5f28 100644 --- a/ansible/roles/redis-backup/tasks/main.yml +++ b/ansible/roles/redis-backup/tasks/main.yml @@ -44,6 +44,8 @@ name: gcp-cloud-storage tasks_from: upload.yml vars: + gcp_storage_service_account_name: "{{ cloud_management_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_management_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_redisbackup_bucketname }}" gcp_path: "{{ cloud_storage_redisbackup_foldername }}/{{ redis_backup_file_name }}" local_file_or_folder_path: "{{ redis_backup_file_path }}" diff --git a/ansible/uploadFAQs.yml b/ansible/uploadFAQs.yml index cf90e343d1..b38f2ff99a 100644 --- a/ansible/uploadFAQs.yml +++ b/ansible/uploadFAQs.yml @@ -50,6 +50,8 @@ name: gcp-cloud-storage tasks_from: upload-batch.yml vars: + gcp_storage_service_account_name: "{{ cloud_public_storage_accountname }}" + gcp_storage_key_file: "{{ cloud_public_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_public_bucketname }}" dest_folder_path: "" local_file_or_folder_path: "{{ playbook_dir }}/../utils/{{ item }}" From e997f3ea3fd4d98e4ab91d1aa2c0146a81ce664f Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 30 Dec 2022 15:35:41 +1100 Subject: [PATCH 196/616] added a temporary debug Signed-off-by: Deepak Devadathan --- ansible/deploy-plugins.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index 212c7d546c..962237a7db 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -72,6 +72,10 @@ - preview - block: + - name : Debug + debug: + msg: | + "{{cloud_service_provider}}" - name: delete and re-upload plugins include_role: name: azure-cloud-storage From 6564248969925a93b12d75468967cfe8f8abdd62 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 30 Dec 2022 15:37:06 +1100 Subject: [PATCH 197/616] testing flow Signed-off-by: Deepak Devadathan --- ansible/deploy-plugins.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index 962237a7db..27a34a6326 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -76,6 +76,8 @@ debug: msg: | "{{cloud_service_provider}}" + tags: + - plugins - name: delete and re-upload plugins include_role: name: azure-cloud-storage From 6839a4bd85ca7f3e519fabf2a96ed8ae94adc2c8 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 30 Dec 2022 15:38:30 +1100 Subject: [PATCH 198/616] debug line Signed-off-by: Deepak Devadathan --- ansible/deploy-plugins.yml | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index 27a34a6326..0ff5e09701 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -72,13 +72,7 @@ - preview - block: - - name : Debug - debug: - msg: | - "{{cloud_service_provider}}" - tags: - - plugins - - name: delete and re-upload plugins + - name: delete and re-upload plugins - azure include_role: name: azure-cloud-storage tasks_from: "{{ item[0] }}" From 5a7c2a5b9d6f64597c1a7109b17eb85df4369cb6 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 30 Dec 2022 15:40:28 +1100 Subject: [PATCH 199/616] testing Signed-off-by: Deepak Devadathan --- ansible/deploy-plugins.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index 0ff5e09701..946ba0b06b 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -29,6 +29,7 @@ storage_account_sas_token: "{{ azure_public_storage_account_sas }}" tags: - always + - plugins no_log: True - block: @@ -72,7 +73,7 @@ - preview - block: - - name: delete and re-upload plugins - azure + - name: delete and re-upload plugins include_role: name: azure-cloud-storage tasks_from: "{{ item[0] }}" From 3c9ab202fb9bc1a92fcb267665be3a6658a024c3 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 30 Dec 2022 15:49:57 +1100 Subject: [PATCH 200/616] disable delete and repload plugin temp Signed-off-by: Deepak Devadathan --- ansible/deploy-plugins.yml | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index 946ba0b06b..f18c33e4c6 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -29,7 +29,6 @@ storage_account_sas_token: "{{ azure_public_storage_account_sas }}" tags: - always - - plugins no_log: True - block: @@ -71,21 +70,21 @@ local_file_or_folder_path: "{{ source_file_name }}" tags: - preview - - - block: - - name: delete and re-upload plugins - include_role: - name: azure-cloud-storage - tasks_from: "{{ item[0] }}" - vars: - blob_delete_pattern: "content-plugins/{{ item[1] }}/*" - blob_container_folder_path: "/content-plugins/{{ item[1] }}" - local_file_or_folder_path: "{{ source_folder }}/{{ item[1] }}" - with_nested: - - ['blob-delete-batch-no-poll.yml', 'blob-upload-batch-no-poll.yml'] - - "{{ lookup('file', plugins_to_delete_and_upload).split('\n') }}" - tags: - - plugins +# Disabled tempoararily + # - block: + # - name: delete and re-upload plugins + # include_role: + # name: azure-cloud-storage + # tasks_from: "{{ item[0] }}" + # vars: + # blob_delete_pattern: "content-plugins/{{ item[1] }}/*" + # blob_container_folder_path: "/content-plugins/{{ item[1] }}" + # local_file_or_folder_path: "{{ source_folder }}/{{ item[1] }}" + # with_nested: + # - ['blob-delete-batch-no-poll.yml', 'blob-upload-batch-no-poll.yml'] + # - "{{ lookup('file', plugins_to_delete_and_upload).split('\n') }}" + # tags: + # - plugins when: cloud_service_provider == "azure" ### GCP tasks #### From 4b158b16110169b51ff82cde244cf4fff8b003b5 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 30 Dec 2022 15:57:38 +1100 Subject: [PATCH 201/616] testing Signed-off-by: Deepak Devadathan --- ansible/deploy-plugins.yml | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index f18c33e4c6..345f4c8fc1 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -70,21 +70,20 @@ local_file_or_folder_path: "{{ source_file_name }}" tags: - preview -# Disabled tempoararily - # - block: - # - name: delete and re-upload plugins - # include_role: - # name: azure-cloud-storage - # tasks_from: "{{ item[0] }}" - # vars: - # blob_delete_pattern: "content-plugins/{{ item[1] }}/*" - # blob_container_folder_path: "/content-plugins/{{ item[1] }}" - # local_file_or_folder_path: "{{ source_folder }}/{{ item[1] }}" - # with_nested: - # - ['blob-delete-batch-no-poll.yml', 'blob-upload-batch-no-poll.yml'] - # - "{{ lookup('file', plugins_to_delete_and_upload).split('\n') }}" - # tags: - # - plugins + - block: + - name: delete and re-upload plugins + include_role: + name: azure-cloud-storage + tasks_from: "{{ item[0] }}" + vars: + blob_delete_pattern: "content-plugins/{{ item[1] }}/*" + blob_container_folder_path: "/content-plugins/{{ item[1] }}" + local_file_or_folder_path: "{{ source_folder }}/{{ item[1] }}" + with_nested: + - ['blob-delete-batch-no-poll.yml', 'blob-upload-batch-no-poll.yml'] + - "{{ lookup('file', plugins_to_delete_and_upload).split('\n') }}" + tags: + - plugins when: cloud_service_provider == "azure" ### GCP tasks #### From 24bddeed6b552777ffd0730eabdd5755dbddec28 Mon Sep 17 00:00:00 2001 From: Kenneth Heung Date: Mon, 2 Jan 2023 14:28:38 +0800 Subject: [PATCH 202/616] adding OCI values for player environments --- ansible/roles/stack-sunbird/templates/sunbird_player.env | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ansible/roles/stack-sunbird/templates/sunbird_player.env b/ansible/roles/stack-sunbird/templates/sunbird_player.env index 9cb6473418..0e5e7ceee6 100644 --- a/ansible/roles/stack-sunbird/templates/sunbird_player.env +++ b/ansible/roles/stack-sunbird/templates/sunbird_player.env @@ -228,8 +228,9 @@ sunbird_gcloud_projectId={{gcloud_private_bucket_projectId | default("")}} cloud_service_provider={{cloud_service_provider}} cloud_private_storage_accountname={{cloud_private_storage_accountname | default("")}} cloud_private_storage_secret={{cloud_private_storage_secret | default("")}} -cloud_private_storage_region={{cloud_private_storage_region | default("ap-south-1")}} +cloud_private_storage_region={{cloud_private_storage_region | default("ap-hyderabad-1")}} cloud_private_storage_project={{cloud_private_storage_project | default("")}} +cloud_private_storage_endpoint={{cloud_private_storage_endpoint | default("https://apaccpt03.compat.objectstorage.ap-hyderabad-1.oraclecloud.com")}} cloud_storage_privatereports_bucketname={{cloud_storage_privatereports_bucketname | default("reports")}} cloud_storage_resourceBundle_bucketname={{cloud_storage_label_bucketname | default("label")}} cloud_storage_desktopCrash_bucketname={{cloud_storage_desktopcrash_bucketname | default("desktopappcrashlogs")}} From fe6b33643928c377a2a9a9c66ac07ecf66fed547 Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Mon, 2 Jan 2023 14:23:28 +0530 Subject: [PATCH 203/616] Fix the command to get deployed image (#3698) --- kubernetes/ansible/roles/helm-deploy/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kubernetes/ansible/roles/helm-deploy/tasks/main.yml b/kubernetes/ansible/roles/helm-deploy/tasks/main.yml index 8f4881089a..86c3386a1e 100644 --- a/kubernetes/ansible/roles/helm-deploy/tasks/main.yml +++ b/kubernetes/ansible/roles/helm-deploy/tasks/main.yml @@ -118,7 +118,7 @@ ignore_errors: true - name: Get deployed image name - deployments - shell: kubectl get deployments.apps {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[0].image | split("/")[1]' + shell: kubectl get deployments.apps {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[0].image | split("/")[-1]' register: image - set_fact: From 084eb4feec8d35a7e971e0a3898410a85d0a9f5d Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Mon, 2 Jan 2023 17:23:45 +0530 Subject: [PATCH 204/616] Fix gcp deploy issues (#3699) * Fix typo * Fix the command to get deployed image --- ansible/kp_upload-schema.yml | 2 +- kubernetes/ansible/roles/deploy-player/tasks/main.yml | 2 +- kubernetes/ansible/roles/helm-daemonset/tasks/main.yml | 2 +- kubernetes/ansible/roles/helm-deploy/tasks/main.yml | 4 ++-- kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ansible/kp_upload-schema.yml b/ansible/kp_upload-schema.yml index 0f029d7cbd..3a28ce5782 100644 --- a/ansible/kp_upload-schema.yml +++ b/ansible/kp_upload-schema.yml @@ -37,6 +37,6 @@ gcp_storage_service_account_name: "{{ cloud_public_storage_accountname }}" gcp_storage_key_file: "{{ cloud_public_storage_secret }}" gcp_bucket_name: "{{ cloud_storage_content_bucketname }}" - gcp_path: "{{ schemas/local" + gcp_path: "schemas/local" local_file_or_folder_path: "{{ source_name }}" when: cloud_service_provider == "gcloud" diff --git a/kubernetes/ansible/roles/deploy-player/tasks/main.yml b/kubernetes/ansible/roles/deploy-player/tasks/main.yml index 52500df2e3..0aa27af993 100644 --- a/kubernetes/ansible/roles/deploy-player/tasks/main.yml +++ b/kubernetes/ansible/roles/deploy-player/tasks/main.yml @@ -74,7 +74,7 @@ delay: 30 - name: Get deployed image name - shell: kubectl get deployments.apps {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[0].image | split("/")[1]' + shell: kubectl get deployments.apps {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[0].image | split("/")[-1]' register: deployed_image - set_fact: diff --git a/kubernetes/ansible/roles/helm-daemonset/tasks/main.yml b/kubernetes/ansible/roles/helm-daemonset/tasks/main.yml index e04c4f137f..91fcc9f979 100644 --- a/kubernetes/ansible/roles/helm-daemonset/tasks/main.yml +++ b/kubernetes/ansible/roles/helm-daemonset/tasks/main.yml @@ -25,7 +25,7 @@ delay: 30 - name: Get deployed image name - shell: "kubectl get daemonsets {{ release_name }} -o json -n {{ namespace }} | jq '.spec.template.spec.containers | .[].image' -r | awk -F/ '{print $2}'" + shell: "kubectl get daemonsets {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[].image | split("/")[-1]'" register: deployed_image - set_fact: diff --git a/kubernetes/ansible/roles/helm-deploy/tasks/main.yml b/kubernetes/ansible/roles/helm-deploy/tasks/main.yml index 86c3386a1e..16b62ee9d6 100644 --- a/kubernetes/ansible/roles/helm-deploy/tasks/main.yml +++ b/kubernetes/ansible/roles/helm-deploy/tasks/main.yml @@ -132,7 +132,7 @@ ignore_errors: true - name: Get deployed image name - daemonsets - shell: "kubectl get daemonsets {{ release_name }} -o json -n {{ namespace }} | jq '.spec.template.spec.containers | .[].image' -r | awk -F/ '{print $2}'" + shell: kubectl get daemonsets {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[].image | split("/")[-1]' register: image - set_fact: @@ -146,7 +146,7 @@ ignore_errors: true - name: Get deployed image name - statefulsets - shell: "kubectl get statefulsets {{ release_name }} -o json -n {{ namespace }} | jq '.spec.template.spec.containers | .[].image' -r | awk -F/ '{print $2}'" + shell: kubectl get statefulsets {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[].image | split("/")[-1]' register: image - set_fact: diff --git a/kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml b/kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml index 09e96cf25e..6d0b7ef387 100644 --- a/kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml +++ b/kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml @@ -89,7 +89,7 @@ delay: 30 - name: Get deployed image name - shell: kubectl get deployments.apps {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[0].image | split("/")[1]' + shell: kubectl get deployments.apps {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[0].image | split("/")[-1]' register: deployed_image - set_fact: From 8163c85169b9f1952c142a5fb3853841d6b1e020 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Tue, 3 Jan 2023 07:51:07 +1100 Subject: [PATCH 205/616] updated daemonset deployed image retrieval Signed-off-by: Deepak Devadathan --- kubernetes/ansible/roles/helm-deploy/tasks/main.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kubernetes/ansible/roles/helm-deploy/tasks/main.yml b/kubernetes/ansible/roles/helm-deploy/tasks/main.yml index 53b452274d..d899a7e62e 100644 --- a/kubernetes/ansible/roles/helm-deploy/tasks/main.yml +++ b/kubernetes/ansible/roles/helm-deploy/tasks/main.yml @@ -141,6 +141,10 @@ shell: "kubectl get daemonsets {{ release_name }} -o json -n {{ namespace }} | jq '.spec.template.spec.containers | .[].image' -r | awk -F/ '{print $2}'" register: image +- name: Get deployed image name - daemonsets for OCI + shell: "kubectl get daemonsets {{ release_name }} -o json -n {{ namespace }} | jq '.spec.template.spec.containers | .[].image' -r | awk -F/ '{print $4}'" + register: image + - set_fact: deployed_image: "{{ image if image.stdout_lines | length > 0 else deployed_image }}" From 16e0cd771206f9a035b349f8696ed2aac1e13bc4 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Tue, 3 Jan 2023 17:51:36 +0530 Subject: [PATCH 206/616] Added new variables (#3701) --- ansible/roles/ml-analytics-service/defaults/main.yml | 2 ++ ansible/roles/ml-analytics-service/templates/config.j2 | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/ansible/roles/ml-analytics-service/defaults/main.yml b/ansible/roles/ml-analytics-service/defaults/main.yml index d73099451d..15f9b438c4 100755 --- a/ansible/roles/ml-analytics-service/defaults/main.yml +++ b/ansible/roles/ml-analytics-service/defaults/main.yml @@ -114,3 +114,5 @@ cloud_public_storage_region: "{{ cloud_public_storage_region }}" cloud_public_storage_endpoint: "{{ cloud_public_storage_endpoint }}" ml_analytics_project_program : "{{ WORKDIR }}/ml-analytics-service/projects/program_ids.txt" ml_analytics_projects_program_filename: "{{ config_path }}/projects/program_ids.txt" +ml_analytics_nvsk_imp_projects_data_local_path: "{{ config_path }}/urgent_data_metrics/output/" +ml_analytics_nvsk_imp_projects_data_blob_path: "Manage_Learn_Data/micro_improvement/" diff --git a/ansible/roles/ml-analytics-service/templates/config.j2 b/ansible/roles/ml-analytics-service/templates/config.j2 index 52927ec957..27da8be26b 100644 --- a/ansible/roles/ml-analytics-service/templates/config.j2 +++ b/ansible/roles/ml-analytics-service/templates/config.j2 @@ -279,3 +279,7 @@ observation_batch_ingestion_data_del = {{ ml_analytics_observation_batchupdate_c survey_batch_ingestion_data_del = {{ ml_analytics_survey_batchupdate_cloud_blob_path}} cname_url = {{ ml_analytics_cname_url }} + +nvsk_imp_projects_data_local_path = {{ ml_analytics_nvsk_imp_projects_data_local_path }} + +nvsk_imp_projects_data_blob_path = {{ ml_analytics_nvsk_imp_projects_data_blob_path }} From ea44249610b7107c30e9d5f5f8fa635240a66bd9 Mon Sep 17 00:00:00 2001 From: PrasadMoka Date: Thu, 5 Jan 2023 14:52:30 +0530 Subject: [PATCH 207/616] ED-621:updated configurations for release-5.1.0 --- ansible/inventory/env/group_vars/all.yml | 3 +-- .../stack-sunbird/templates/sunbird_cert-service.env | 12 ++++++------ .../stack-sunbird/templates/sunbird_lms-service.env | 7 +++++-- 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index 153be0f813..6cbd63ec03 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -131,9 +131,8 @@ cassandra_restore_dir: "/home/{{ ansible_ssh_user }}/" cassandra_backup_dir: /data/cassandra/backup ### Release 5.0.0 ### cassandra_multi_dc_enabled: false -# Release-5.0.1 +### Release-5.0.1 ### cloud_storage_base_url: "{{cloud_storage_base_url}}" -cloud_store_base_path_placeholder: "$CLOUD_BASE_PATH" keycloak_realm: sunbird sunbird_content_player_url: "http://kong:8000/" diff --git a/ansible/roles/stack-sunbird/templates/sunbird_cert-service.env b/ansible/roles/stack-sunbird/templates/sunbird_cert-service.env index c43c23171b..19a9a6c46c 100644 --- a/ansible/roles/stack-sunbird/templates/sunbird_cert-service.env +++ b/ansible/roles/stack-sunbird/templates/sunbird_cert-service.env @@ -1,13 +1,13 @@ -CONTAINER_NAME={{cert_service_container_name}} +CONTAINER_NAME={{cloud_storage_certservice_bucketname}} CLOUD_STORAGE_TYPE={{cloud_service_provider}} -PRIVATE_CLOUD_STORAGE_SECRET={{sunbird_private_storage_account_key}} -PRIVATE_CLOUD_STORAGE_KEY={{sunbird_private_storage_account_name}} +PRIVATE_CLOUD_STORAGE_SECRET={{cloud_private_storage_secret}} +PRIVATE_CLOUD_STORAGE_KEY={{cloud_private_storage_accountname}} sunbird_cert_domain_url={{proto}}://{{proxy_server_name}} sunbird_cert_enc_service_url=http://enc-service:8013 download_link_expiry_timeout=600 es_conn_info={{groups['es']|join(':9200,')}}:9200 ITEXT_LICENSE_ENABLED={{itext_license_enabled}} ITEXT_LICENSE_PATH=/home/sunbird/itext_trail_license.xml -PUBLIC_CLOUD_STORAGE_KEY={{sunbird_public_storage_account_name}} -PUBLIC_CLOUD_STORAGE_SECRET={{sunbird_public_storage_account_key}} -PUBLIC_CONTAINER_NAME={{sunbird_cert_qr_container_name}} +PUBLIC_CLOUD_STORAGE_KEY={{cloud_public_storage_accountname}} +PUBLIC_CLOUD_STORAGE_SECRET={{cloud_public_storage_secret}} +PUBLIC_CONTAINER_NAME={{cloud_storage_certqr_bucketname}} diff --git a/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env b/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env index 1b3fdba3ca..946bf3af10 100644 --- a/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env +++ b/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env @@ -80,7 +80,7 @@ sunbird_course_batch_notification_enabled={{sunbird_course_batch_notification_en sunbird_course_batch_notification_signature={{sunbird_course_batch_notification_signature}} sunbird_otp_expiration={{sunbird_otp_expiration}} sunbird_otp_length={{sunbird_otp_length}} -sunbird_content_azure_storage_container={{sunbird_content_azure_storage_container}} +sunbird_content_cloud_storage_container={{cloud_storage_content_bucketname}} # Release-1.14 sunbird_time_zone={{sunbird_time_zone}} # Release-1.15 @@ -143,4 +143,7 @@ enrollment_list_size={{ enrollment_list_size | default(1000) }} # Release-5.0.0 sunbird_cloud_service_provider={{cloud_service_provider}} -isMultiDCEnabled={{cassandra_multi_dc_enabled}} \ No newline at end of file +isMultiDCEnabled={{cassandra_multi_dc_enabled}} + +# Release-5.0.1 +cloud_storage_base_url={{cloud_storage_base_url}} \ No newline at end of file From 1b8fe739a0ec590730f16ae2578f05064464659b Mon Sep 17 00:00:00 2001 From: Akhil <30873558+saiakhil46@users.noreply.github.com> Date: Thu, 5 Jan 2023 17:05:03 +0530 Subject: [PATCH 208/616] Updated private_repo template with CSP changes (#3689) --- .../ansible/inventory/dev/Core/common.yml | 166 +++++++++++++----- .../ansible/inventory/dev/Core/secrets.yml | 72 ++++---- .../inventory/dev/DataPipeline/common.yml | 141 +++++++++++++-- .../inventory/dev/DataPipeline/secrets.yml | 46 +++-- .../dev/KnowledgePlatform/common.yml | 159 ++++++++++++++--- .../dev/KnowledgePlatform/secrets.yml | 45 ++++- .../ansible/inventory/dev/UCI/common.yml | 1 + private_repo/ansible/inventory/dev/UCI/hosts | 1 + .../ansible/inventory/dev/UCI/secrets.yml | 1 + .../inventory/dev/managed-learn/common.yml | 1 + .../ansible/inventory/dev/managed-learn/hosts | 1 + .../inventory/dev/managed-learn/secrets.yml | 1 + 12 files changed, 499 insertions(+), 136 deletions(-) create mode 120000 private_repo/ansible/inventory/dev/UCI/common.yml create mode 120000 private_repo/ansible/inventory/dev/UCI/hosts create mode 120000 private_repo/ansible/inventory/dev/UCI/secrets.yml create mode 120000 private_repo/ansible/inventory/dev/managed-learn/common.yml create mode 120000 private_repo/ansible/inventory/dev/managed-learn/hosts create mode 120000 private_repo/ansible/inventory/dev/managed-learn/secrets.yml diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index 1984bcd2b3..bee6dc7028 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -20,41 +20,97 @@ sunbird_mail_server_from_email: "support@myorg.com" # Email ID that should # List of mail ids to whome the monitoring alerts should be sent. alerts_mailing_list : "devops@myorg.com" # Comma separated mail list for Alerts; eg: user1@mail.com, user2@mail.com - -# Define the below if you are using Azure Cloud -# Note - You can use the same azure account for the below variables or have separate azure accounts -sunbird_public_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing public data (like contents) -sunbird_private_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing private data (like reports, telemetry data) -sunbird_management_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing backup data (like cassandra backups) -sunbird_artifact_storage_account_name: "{{ sunbird_management_storage_account_name }}" # Azure account name for storing artifacts data (like jenkins build zip files) - -azure_public_storage_account_name: "{{ sunbird_public_storage_account_name }}" -azure_private_storage_account_name: "{{ sunbird_private_storage_account_name }}" -azure_management_storage_account_name: "{{ sunbird_management_storage_account_name }}" -azure_artifact_storage_account_name: "{{ sunbird_artifact_storage_account_name }}" - -# Define the below if you are using AWS Cloud -aws_region: "" -aws_management_s3_bucket_name: "" -aws_artifact_s3_bucket_name: "" -aws_public_s3_bucket_name: "" -aws_private_s3_bucket_name: "" - -# Define the below if you are using Google Cloud -gcloud_private_bucket_name: "" -gcloud_public_bucket_name: "" -gcloud_artifact_bucket_name: "" -gcloud_management_bucket_name: "" - -gcloud_private_bucket_projectId: "" +# Cloud Service Provider Variables +# If cloud_service_provider is AWS then update with access key as value +# Example: cloud_public_storage_accountname: "AKSHKSJHFJDHJDSHFKSD" +# If cloud_service_provider is gloud(GCP) then update with service account name +# Example: cloud_public_storage_accountname: "cloudstorage-gcp-test.iam.gserviceaccount.com" +# If cloud_service_provider is AZURE then update with stoage account name +# Example: cloud_public_storage_accountname: "azurestotageaccount" +cloud_public_storage_accountname: "" +# If cloud_service_provider is AWS then update with region +# Example: cloud_public_storage_region: us-east-1 +cloud_public_storage_region: "" +# If cloud_service_provider is gcp then update this variable with project id +# Example: cloud_public_storage_project: "sunbird-gcp-test" +cloud_public_storage_project: "" + + +# Create object storage for each below mentioned variables and update accordingly +# If cloud_service_provider is AWS update with bucket name +# If cloud_service_provider is gcloud(GCP) update with bucket name +# If cloud_service_provider is AZURE update with container name +# Example: cloud_storage_certqr_bucketname: "certqr-storage" +cloud_storage_certqr_bucketname: "" +# This storage contains chatbot related data +# Example: cloud_storage_chatbot_bucketname: "chatbot-storage" +cloud_storage_chatbot_bucketname: "" +# This storage contains dial related data +# Example: cloud_storage_dial_bucketname: "dial-storage" +cloud_storage_dial_bucketname: "" +# This storage contains flink checkpoint data +# Example: cloud_storage_flink_bucketname: "flink-storage" +cloud_storage_flink_bucketname: "" +# This storage contains portal cdn file +# Example: cloud_storage_playercdn_bucketname: "playercdn-storage" +cloud_storage_playercdn_bucketname: "" +# This storage contains public data +# Example: cloud_storage_public_bucketname: "public-storage" +cloud_storage_public_bucketname: "" +# This storage contains public reports data +# Example: cloud_storage_publicreports_bucketname: "publicreports-storage" +cloud_storage_publicreports_bucketname: "" +# This storage contains private reports data +# Example: cloud_storage_privatereports_bucketname: "privatereports-storage" +cloud_storage_privatereports_bucketname: "" +# This storage contains samiksha data +# Example: cloud_storage_samiksha_bucketname: "samiksha-storage" +cloud_storage_samiksha_bucketname: "" +# This storage contains schema data +# Example: cloud_storage_schema_bucketname: "schema-storage" +cloud_storage_schema_bucketname: "" +# This storage contains sourcing related data +# Example: cloud_storage_sourcing_bucketname: "sourcing-storage" +cloud_storage_sourcing_bucketname: "" +# This storage contains desktop app data +# Example: cloud_storage_offlineinstaller_bucketname: "offlineinstaller-storage" +cloud_storage_offlineinstaller_bucketname: "" +# This storage contains public schemas, contents +# Example: cloud_storage_content_bucketname: "content-storage" +cloud_storage_content_bucketname: "" +# This storage contains telemetry data +# Example: cloud_storage_telemetry_bucketname: "telemetry-storage" +cloud_storage_telemetry_bucketname: "" +# This storage contains T & C data +# Example: cloud_storage_termsandcondtions_bucketname: "termsandconditions-storage" +cloud_storage_termsandcondtions_bucketname: "" +# Example: cloud_storage_user_bucketname: "user-storage" +cloud_storage_user_bucketname: "" +# This storage contains crashlogs +# Example: cloud_storage_desktopappcrashlogs_bucketname: "desktopappcrashlogs-storage" +cloud_storage_desktopappcrashlogs_bucketname: "" +# This storage contains labels data +# Example: cloud_storage_label_bucketname: "label-storage" +cloud_storage_label_bucketname: "" +# Example: cloud_storage_certservice_bucketname: "certservice-storage" +cloud_storage_certservice_bucketname: "" +# This storage contains UCI services data +# Example: cloud_storage_uci_bucketname: "uci-storage" +cloud_storage_uci_bucketname: "" +# This storage contains artifacts data +# Example: cloud_storage_artifacts_bucketname: "artifact-storage" +cloud_storage_artifacts_bucketname: "" +# This storage contains backups data +# Example: cloud_storage_management_bucketname: "management-storage" +cloud_storage_management_bucketname: "" # Uncomment the variable based on your cloud provider (as a default we have kept Azure variable uncommented) # GCP -# cloud_storage_url: https://storage.cloud.google.com/{{ gcloud_public_bucket_name }} +# cloud_storage_url: https://storage.googleapis.com # AWS -# cloud_storage_url: "https://{{aws_public_s3_bucket_name}}.s3.{{aws_region}}.amazonaws.com" +# cloud_storage_url: "https://s3.{{ cloud_public_storage_region }}.amazonaws.com" # Azure -cloud_storage_url: "https://{{ sunbird_public_storage_account_name }}.blob.core.windows.net" +cloud_storage_url: "https://{{ cloud_public_storage_accountname }}.blob.core.windows.net" # ------------------------------------------------------------------------------------------------------------ # # Cloud / Infra Specific values - Check these and update accordingly @@ -81,10 +137,6 @@ proto: https # http or https, preferably https sunbird_default_channel: sunbird # default sunbird channel name environment_id: "10000003" # A 8 digit number for example like 1000000, should be same as defined in KP common.yml -# SB-31155 - This should be deprecated in future in favour of content_storage defined in all.yml -sunbird_content_azure_storage_container: contents # Azure container name for storing public data (like contents), should be same as azure_public_container defined in KP common.yml - - # This sms sender id should be verified by your provider. This is the sender id which will be used for `From Address`. For example, # # From: SBSMS @@ -131,19 +183,12 @@ postgres: db_admin_password: "{{core_vault_postgres_password}}" -# Azure account related vars -sunbird_azure_public_storage_account_name: "{{ sunbird_public_storage_account_name }}" -azure_plugin_storage_account_name: "{{sunbird_azure_public_storage_account_name}}" -azure_plugin_storage_account_key: "{{sunbird_public_storage_account_key}}" -plugin_container_name: "{{sunbird_content_azure_storage_container}}" - keycloak_api_management_user_email: "admin@sunbird.org" sunbird_installation_email: "admin@sunbird.org" # Other vars cert_service_container_name: e-credentials # Conatiner name for cert service to store cert templates cert_service_cloud_storage_type: azure # Changes this if you use other clouds like aws, gcp -artifacts_container: artifacts # Azure blob container name to save built artifacts, default it can be arti dataexhaust_super_admin_channel: sunbird dedup_redis_host: "{{ groups['dp-redis'][0] }}" # for router service namespace: "{{ env }}" # required for bot and router, these helm charts should be moved to devops repo @@ -157,6 +202,45 @@ monitor_alerts_mail_from_email: "{{ sunbird_mail_server_from_email }}" ekstep_s3_env: "" # This variable is not used and leave the value as empty freshDesk_token: "" +# Below endpoint is not required in current release +cloud_public_storage_endpoint: "" + +# Update below vars if seperate object storage is required +cloud_private_storage_accountname: "{{ cloud_public_storage_accountname }}" +cloud_private_storage_endpoint: "{{ cloud_public_storage_endpoint }}" +cloud_private_storage_region: "{{ cloud_public_storage_region }}" +cloud_private_storage_project: "{{ cloud_public_storage_project }}" + +cloud_management_storage_accountname: "{{ cloud_public_storage_accountname }}" +cloud_management_storage_endpoint: "{{ cloud_public_storage_endpoint }}" +cloud_management_storage_region: "{{ cloud_public_storage_region }}" +cloud_management_storage_project: "{{ cloud_public_storage_project }}" + +cloud_artifact_storage_accountname: "{{ cloud_public_storage_accountname }}" +cloud_artifact_storage_endpoint: "{{ cloud_public_storage_endpoint }}" +cloud_artifact_storage_region: "{{ cloud_public_storage_region }}" +cloud_artifact_storage_project: "{{ cloud_public_storage_project }}" + +## Enable below vars to upload database backups in seperate buckets +# cloud_storage_cassandrabackup_bucketname: "" +# cloud_storage_dpcassandrabackup_bucketname: "" +# cloud_storage_dppostgresbackup_bucketname: "" +# cloud_storage_dpredisbackup_bucketname: "" +# cloud_storage_esbackup_bucketname: "" +# cloud_storage_influxdbbackup_bucketname: "" +# cloud_storage_jenkinsbackup_bucketname: "" +# cloud_storage_mongobackup_bucketname: "" +# cloud_storage_neo4jbackup_bucketname: "" +# cloud_storage_redisbackup_bucketname: "" + +# Building block vars +cloud_storage_base_url: "{{ cloud_storage_url }}" +cloudstorage_base_path: "{{ cloud_storage_url }}" +valid_cloudstorage_base_urls: '["{{ cloud_storage_url }}"]' +cloudstorage_relative_path_prefix: "CONTENT_STORAGE_BASE_PATH" + + + # Provide the admin-api consumer access to all API's - The public repo restricts this for security reasons # If you dont want to key to have access to all API's, please remove the variables kong_all_consumer_groups and kong_consumers or edit the groups to have a smaller subset kong_all_consumer_groups: diff --git a/private_repo/ansible/inventory/dev/Core/secrets.yml b/private_repo/ansible/inventory/dev/Core/secrets.yml index e8e48bf801..9b8f0f43a5 100644 --- a/private_repo/ansible/inventory/dev/Core/secrets.yml +++ b/private_repo/ansible/inventory/dev/Core/secrets.yml @@ -8,42 +8,38 @@ core_vault_docker_registry_url: "change.docker.url" # for docker hub "https core_vault_docker_registry_user: "change.docker.username" core_vault_docker_registry_password: "change.docker.password" -# Run the below command in shell -# date +'%Y-%m-%dT%H:%m:%SZ' -d '+1 year' -# sas_token=?`az storage account generate-sas --account-name "{{ azure_plugin_storage_account_name }}" --account-key "{{ azure_plugin_storage_account_key }}" --expiry $sas_expire_time --https-only --permissions acdlpruw --resource-types sco --services bfqt | xargs` -# generate a sas for the blob for entire storage accout with write and read access -sunbird_artifact_storage_account_sas: "change.azure.storage.account.sas" # SAS token value generated from above command -sunbird_public_storage_account_sas: "change.azure.storage.account.sas" # SAS token value generated from above command -sunbird_management_storage_account_sas: "change.azure.storage.account.sas" # SAS token value generated from above command - -sunbird_public_storage_account_key: "change.azure.storage.account.key" -sunbird_private_storage_account_key: "change.azure.storage.account.key" -sunbird_management_storage_account_key: "change.azure.storage.account.key" -sunbird_artifact_storage_account_key: "{{ sunbird_management_storage_account_key }}" - -# Define the below if you are using Azure Cloud -azure_public_storage_account_key: "{{ sunbird_public_storage_account_key }}" -azure_private_storage_account_key: "{{ sunbird_private_storage_account_key }}" -azure_management_storage_account_key: "{{ sunbird_management_storage_account_key }}" -azure_artifact_storage_account_key: "{{ sunbird_artifact_storage_account_key }}" -azure_public_storage_account_sas: "{{ sunbird_public_storage_account_sas }}" -azure_management_storage_account_sas: "{{ sunbird_management_storage_account_sas }}" - -# Define the below if you are using AWS Cloud -aws_management_bucket_access_key: "" -aws_artifact_bucket_access_key: "" -aws_public_bucket_access_key: "" -aws_private_bucket_access_key: "" - -aws_management_bucket_secret_access_key: "" -aws_artifact_bucket_secret_access_key: "" -aws_public_bucket_secret_access_key: "" -aws_private_bucket_secret_access_key: "" - -# Define the below if you are using Google Cloud -gcp_storage_service_account_name: "" -gcp_storage_key_file: "" # gcloud service account key - refer: https://cloud.google.com/iam/docs/creating-managing-service-account-keys - +# Cloud Service Provider Secret Variables +# If cloud_service_provider is aws then update secret access key +# Example: cloud_management_storage_secret: "xkxjfjsdsfjdfjdhgjfhgjdhfgjdgjdfgdjgjdgdjhgjhd" + +# If cloud_service_provider is gcp then update with service account json file content +# Example: cloud_management_storage_secret: | +# { +# "type": "service_account", +# "project_id": "your-project-id", +# "private_key_id": "...", +# "private_key": "-----BEGIN PRIVATE KEY-----\n...\n-----END PRIVATE KEY-----\n", +# "client_email": "service-account-for-your-repository@your-project-id.iam.gserviceaccount.com", +# "client_id": "...", +# "auth_uri": "https://accounts.google.com/o/oauth2/auth", +# "token_uri": "https://accounts.google.com/o/oauth2/token", +# "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", +# "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/your-bucket@your-project-id.iam.gserviceaccount.com" +# } + +# If cloud_service_provider is azure then update with storage account key +# Example: cloud_management_storage_secret: "BfoOQwA6UFI7URwkFjkjdjsdhjhjhjsdhjfM//lsTbXDmHOFR5GqEwAwzyrKXOUDywojCpOC/g==" +cloud_management_storage_secret: "" + +# If cloud_service_provider is aws then update secret access key +# Example: cloud_public_storage_secret: "xkxjfjsdsfjdfjdhgjfhgjdhfgjdgjdfgdjgjdgdjhgjhd" + +# If cloud_service_provider is gcp then update with private-key which is in service account json file +# Example: cloud_public_storage_secret: "-----BEGIN PRIVATE KEY-----\n...\n-----END PRIVATE KEY-----\n" + +# If cloud_service_provider is azure then update with storage account key +# Example: cloud_public_storage_secret: "BfoOQwA6UFI7URwkFjkjdjsdhjhjhjsdhjfM//lsTbXDmHOFR5GqEwAwzyrKXOUDywojCpOC/g==" +cloud_public_storage_secret: "" # The proxy key and crt values should be padded to the right by a couple of spaces # Example: @@ -184,3 +180,7 @@ ml_analytics_druid_observation_status_injestion_spec: DruidObeservationStatusIng ml_analytics_api_access_token: ApiAccessToken # ML authorization key ml_analytics_api_authorization_key: ApiAuthorizationKey + +# update if seperate object storage is used +cloud_private_storage_secret: "{{ cloud_public_storage_secret }}" +cloud_artifact_storage_secret: "{{ cloud_public_storage_secret }}" \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/DataPipeline/common.yml b/private_repo/ansible/inventory/dev/DataPipeline/common.yml index ef8432539b..715e9cc13a 100644 --- a/private_repo/ansible/inventory/dev/DataPipeline/common.yml +++ b/private_repo/ansible/inventory/dev/DataPipeline/common.yml @@ -5,15 +5,98 @@ domain_name: "" # your domain name like example.com # docker hub details dockerhub: "change.docker.url" # docker hub username or url incase of private registry private_ingressgateway_ip: "" # your private kubernetes load balancer ip -# Note - You can use the same azure account for the below variables or have separate azure accounts -sunbird_private_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing private data (like reports, telemetry data) -sunbird_public_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing public data (like contents) -sunbird_druid_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing druid data (like query results) -sunbird_artifact_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing artifacts data (like jenkins build zip files) -sunbird_management_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing backup data (like cassandra backups) +# Cloud Service Provider Variables +# If cloud_service_provider is AWS then update with access key as value +# Example: cloud_public_storage_accountname: "AKSHKSJHFJDHJDSHFKSD" +# If cloud_service_provider is gloud(GCP) then update with service account name +# Example: cloud_public_storage_accountname: "cloudstorage-gcp-test.iam.gserviceaccount.com" +# If cloud_service_provider is AZURE then update with stoage account name +# Example: cloud_public_storage_accountname: "azurestotageaccount" +cloud_public_storage_accountname: "" +# If cloud_service_provider is AWS then update with region +# Example: cloud_public_storage_region: us-east-1 +cloud_public_storage_region: "" +# If cloud_service_provider is gcp then update this variable with project id +# Example: cloud_public_storage_project: "sunbird-gcp-test" +cloud_public_storage_project: "" +# Create object storage for each below mentioned variables and update accordingly +# If cloud_service_provider is AWS update with bucket name +# If cloud_service_provider is gcloud(GCP) update with bucket name +# If cloud_service_provider is AZURE update with container name +# Example: cloud_storage_certqr_bucketname: "certqr-storage" +cloud_storage_certqr_bucketname: "" +# This storage contains chatbot related data +# Example: cloud_storage_chatbot_bucketname: "chatbot-storage" +cloud_storage_chatbot_bucketname: "" +# This storage contains dial related data +# Example: cloud_storage_dial_bucketname: "dial-storage" +cloud_storage_dial_bucketname: "" +# This storage contains flink checkpoint data +# Example: cloud_storage_flink_bucketname: "flink-storage" +cloud_storage_flink_bucketname: "" +# This storage contains portal cdn file +# Example: cloud_storage_playercdn_bucketname: "playercdn-storage" +cloud_storage_playercdn_bucketname: "" +# This storage contains public data +# Example: cloud_storage_public_bucketname: "public-storage" +cloud_storage_public_bucketname: "" +# This storage contains public reports data +# Example: cloud_storage_publicreports_bucketname: "publicreports-storage" +cloud_storage_publicreports_bucketname: "" +# This storage contains private reports data +# Example: cloud_storage_privatereports_bucketname: "privatereports-storage" +cloud_storage_privatereports_bucketname: "" +# This storage contains samiksha data +# Example: cloud_storage_samiksha_bucketname: "samiksha-storage" +cloud_storage_samiksha_bucketname: "" +# This storage contains schema data +# Example: cloud_storage_schema_bucketname: "schema-storage" +cloud_storage_schema_bucketname: "" +# This storage contains sourcing related data +# Example: cloud_storage_sourcing_bucketname: "sourcing-storage" +cloud_storage_sourcing_bucketname: "" +# This storage contains desktop app data +# Example: cloud_storage_offlineinstaller_bucketname: "offlineinstaller-storage" +cloud_storage_offlineinstaller_bucketname: "" +# This storage contains public schemas, contents +# Example: cloud_storage_content_bucketname: "content-storage" +cloud_storage_content_bucketname: "" +# This storage contains telemetry data +# Example: cloud_storage_telemetry_bucketname: "telemetry-storage" +cloud_storage_telemetry_bucketname: "" +# This storage contains T & C data +# Example: cloud_storage_termsandcondtions_bucketname: "termsandconditions-storage" +cloud_storage_termsandcondtions_bucketname: "" +# Example: cloud_storage_user_bucketname: "user-storage" +cloud_storage_user_bucketname: "" +# This storage contains crashlogs +# Example: cloud_storage_desktopappcrashlogs_bucketname: "desktopappcrashlogs-storage" +cloud_storage_desktopappcrashlogs_bucketname: "" +# This storage contains labels data +# Example: cloud_storage_label_bucketname: "label-storage" +cloud_storage_label_bucketname: "" +# Example: cloud_storage_certservice_bucketname: "certservice-storage" +cloud_storage_certservice_bucketname: "" +# This storage contains UCI services data +# Example: cloud_storage_uci_bucketname: "uci-storage" +cloud_storage_uci_bucketname: "" +# This storage contains artifacts data +# Example: cloud_storage_artifacts_bucketname: "artifact-storage" +cloud_storage_artifacts_bucketname: "" +# This storage contains backups data +# Example: cloud_storage_management_bucketname: "management-storage" +cloud_storage_management_bucketname: "" + +# Uncomment the variable based on your cloud provider (as a default we have kept Azure variable uncommented) +# GCP +# cloud_storage_url: https://storage.googleapis.com +# AWS +# cloud_storage_url: "https://s3.{{ cloud_public_storage_region }}.amazonaws.com" +# Azure +cloud_storage_url: "https://{{ cloud_public_storage_accountname }}.blob.core.windows.net" # ------------------------------------------------------------------------------------------------------------ # # Optional variables - Can be left blank if you dont plan to use the intended features @@ -48,12 +131,47 @@ postgres: db_admin_password: "{{dp_vault_pgdb_admin_password}}" druid_postgres_user: druid # Do not change this -sunbird_private_azure_report_container_name: 'reports' -sunbird_public_azure_report_container_name: 'public-reports' imagepullsecrets: "{{env}}registrysecret" # kubernetes imagePullSecrets kubeconfig_path: /var/lib/jenkins/secrets/k8s.yaml # kubeconfig file path on jenkins core_kubeconfig_path: "{{ kubeconfig_path }}" # kubeconfig file path on jenkins for core kube cluster, change this if you use separate kube cluster for core and KP + DP +# Below endpoint is not required in current release +cloud_public_storage_endpoint: "" + +# Update below vars if seperate object storage is required +cloud_private_storage_accountname: "{{ cloud_public_storage_accountname }}" +cloud_private_storage_endpoint: "{{ cloud_public_storage_endpoint }}" +cloud_private_storage_region: "{{ cloud_public_storage_region }}" +cloud_private_storage_project: "{{ cloud_public_storage_project }}" + +cloud_management_storage_accountname: "{{ cloud_public_storage_accountname }}" +cloud_management_storage_endpoint: "{{ cloud_public_storage_endpoint }}" +cloud_management_storage_region: "{{ cloud_public_storage_region }}" +cloud_management_storage_project: "{{ cloud_public_storage_project }}" + +cloud_artifact_storage_accountname: "{{ cloud_public_storage_accountname }}" +cloud_artifact_storage_endpoint: "{{ cloud_public_storage_endpoint }}" +cloud_artifact_storage_region: "{{ cloud_public_storage_region }}" +cloud_artifact_storage_project: "{{ cloud_public_storage_project }}" + +## Enable below vars to upload database backups in seperate buckets +# cloud_storage_cassandrabackup_bucketname: "" +# cloud_storage_dpcassandrabackup_bucketname: "" +# cloud_storage_dppostgresbackup_bucketname: "" +# cloud_storage_dpredisbackup_bucketname: "" +# cloud_storage_esbackup_bucketname: "" +# cloud_storage_influxdbbackup_bucketname: "" +# cloud_storage_jenkinsbackup_bucketname: "" +# cloud_storage_mongobackup_bucketname: "" +# cloud_storage_neo4jbackup_bucketname: "" +# cloud_storage_redisbackup_bucketname: "" + +# Building block vars +cloud_storage_base_url: "{{ cloud_storage_url }}" +cloudstorage_base_path: "{{ cloud_storage_url }}" +valid_cloudstorage_base_urls: '["{{ cloud_storage_url }}"]' +cloudstorage_relative_path_prefix: "CONTENT_STORAGE_BASE_PATH" + # The below sets the kafka topics retention time to 1 day, if you use the defaults from the public repo, it will be 7 days # If you want to retain the topics for 7 days, remove the below sections completely # Ensure you have atleast 1 TB of disk to retain data for 7 days @@ -170,10 +288,3 @@ processing_kafka_overriden_topics: retention_time: 86400000 replication_factor: 1 -# Define the below if you are using Google Cloud -gcloud_private_bucket_name: "" -gcloud_public_bucket_name: "" -gcloud_artifact_bucket_name: "" -gcloud_management_bucket_name: "" - -gcloud_private_bucket_projectId: "" diff --git a/private_repo/ansible/inventory/dev/DataPipeline/secrets.yml b/private_repo/ansible/inventory/dev/DataPipeline/secrets.yml index c37b74d8fe..2b711a27a2 100644 --- a/private_repo/ansible/inventory/dev/DataPipeline/secrets.yml +++ b/private_repo/ansible/inventory/dev/DataPipeline/secrets.yml @@ -4,13 +4,38 @@ dp_vault_pgdb_password: "change.postgres.password" # postgres p dp_vault_pgdb_admin_password: "change.postgres.password" # postgres password for admin dp_vault_druid_postgress_pass: "change.postgres.password" # postgres password for druid db -# Azure storage account credentials - Note if you are using a single account, you can set the same key for the belows -sunbird_management_storage_account_key: "change.azure.storage.account.key" -sunbird_artifact_storage_account_key: "change.azure.storage.account.key" -sunbird_private_storage_account_key: "change.azure.storage.account.key" -sunbird_public_storage_account_key: "change.azure.storage.account.key" -sunbird_druid_storage_account_key: "change.azure.storage.account.key" -sunbird_artifact_storage_account_sas: "change.azure.storage.account.sas" # this must be the sas token from Core directory that your generated +# Cloud Service Provider Secret Variables +# If cloud_service_provider is aws then update secret access key +# Example: cloud_management_storage_secret: "xkxjfjsdsfjdfjdhgjfhgjdhfgjdgjdfgdjgjdgdjhgjhd" + +# If cloud_service_provider is gcp then update with service account json file content +# Example: cloud_management_storage_secret: | +# { +# "type": "service_account", +# "project_id": "your-project-id", +# "private_key_id": "...", +# "private_key": "-----BEGIN PRIVATE KEY-----\n...\n-----END PRIVATE KEY-----\n", +# "client_email": "service-account-for-your-repository@your-project-id.iam.gserviceaccount.com", +# "client_id": "...", +# "auth_uri": "https://accounts.google.com/o/oauth2/auth", +# "token_uri": "https://accounts.google.com/o/oauth2/token", +# "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", +# "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/your-bucket@your-project-id.iam.gserviceaccount.com" +# } + +# If cloud_service_provider is azure then update with storage account key +# Example: cloud_management_storage_secret: "BfoOQwA6UFI7URwkFjkjdjsdhjhjhjsdhjfM//lsTbXDmHOFR5GqEwAwzyrKXOUDywojCpOC/g==" +cloud_management_storage_secret: "" + +# If cloud_service_provider is aws then update secret access key +# Example: cloud_public_storage_secret: "xkxjfjsdsfjdfjdhgjfhgjdhfgjdgjdfgdjgjdgdjhgjhd" + +# If cloud_service_provider is gcp then update with private-key which is in service account json file +# Example: cloud_public_storage_secret: "-----BEGIN PRIVATE KEY-----\n...\n-----END PRIVATE KEY-----\n" + +# If cloud_service_provider is azure then update with storage account key +# Example: cloud_public_storage_secret: "BfoOQwA6UFI7URwkFjkjdjsdhjhjhjsdhjfM//lsTbXDmHOFR5GqEwAwzyrKXOUDywojCpOC/g==" +cloud_public_storage_secret: "" core_vault_docker_registry_url: "change.docker.url" # for docker hub use https://index.docker.io/v1 core_vault_docker_registry_user: "change.docker.user" @@ -25,8 +50,7 @@ dp_vault_data_exhaust_token: # slack api token # ------------------------------------------------------------------------------------------------------------ # # Sensible defaults which you need not change - But if you would like to change, you are free to do so -dp_vault_artifacts_container: artifacts -# Define the below if you are using Google Cloud -gcp_storage_service_account_name: "" -gcp_storage_key_file: "" +# update if seperate object storage is used +cloud_private_storage_secret: "{{ cloud_public_storage_secret }}" +cloud_artifact_storage_secret: "{{ cloud_public_storage_secret }}" \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml b/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml index b905d7b359..bede16cb5b 100644 --- a/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml +++ b/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml @@ -5,19 +5,107 @@ cloud_service_provider: "" # Your cloud service provider name. Supported v dockerhub: "change.docker.url" # docker hub username or url incase of private registry private_ingressgateway_ip: "" # your private kubernetes load balancer ip domain_name: "" # your domain name like example.com -# Note - You can use the same azure account for the below variables or have separate azure accounts -sunbird_public_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing public data (like contents) -sunbird_private_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing private data (like reports, telemetry data) -sunbird_artifact_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing artifacts data (like jenkins build zip files) -sunbird_management_storage_account_name: "change.azure.storage.account.name" # Azure account name for storing backup data (like cassandra backups) + +# Cloud Service Provider Variables +# If cloud_service_provider is AWS then update with access key as value +# Example: cloud_public_storage_accountname: "AKSHKSJHFJDHJDSHFKSD" +# If cloud_service_provider is gloud(GCP) then update with service account name +# Example: cloud_public_storage_accountname: "cloudstorage-gcp-test.iam.gserviceaccount.com" +# If cloud_service_provider is AZURE then update with stoage account name +# Example: cloud_public_storage_accountname: "azurestotageaccount" +cloud_public_storage_accountname: "" +# If cloud_service_provider is AWS then update with region +# Example: cloud_public_storage_region: us-east-1 +cloud_public_storage_region: "" +# If cloud_service_provider is gcp then update this variable with project id +# Example: cloud_public_storage_project: "sunbird-gcp-test" +cloud_public_storage_project: "" +# If cloud_service_provider is oci update this variable with namespace +# Example: cloud_public_storage_namespace: "apsjfhudfjs" +cloud_public_storage_namespace: "" + + +# Create object storage for each below mentioned variables and update accordingly +# If cloud_service_provider is AWS update with bucket name +# If cloud_service_provider is gcloud(GCP) update with bucket name +# If cloud_service_provider is AZURE update with container name +# Example: cloud_storage_certqr_bucketname: "certqr-storage" +cloud_storage_certqr_bucketname: "" +# This storage contains chatbot related data +# Example: cloud_storage_chatbot_bucketname: "chatbot-storage" +cloud_storage_chatbot_bucketname: "" +# This storage contains dial related data +# Example: cloud_storage_dial_bucketname: "dial-storage" +cloud_storage_dial_bucketname: "" +# This storage contains flink checkpoint data +# Example: cloud_storage_flink_bucketname: "flink-storage" +cloud_storage_flink_bucketname: "" +# This storage contains portal cdn file +# Example: cloud_storage_playercdn_bucketname: "playercdn-storage" +cloud_storage_playercdn_bucketname: "" +# This storage contains public data +# Example: cloud_storage_public_bucketname: "public-storage" +cloud_storage_public_bucketname: "" +# This storage contains public reports data +# Example: cloud_storage_publicreports_bucketname: "publicreports-storage" +cloud_storage_publicreports_bucketname: "" +# This storage contains private reports data +# Example: cloud_storage_privatereports_bucketname: "privatereports-storage" +cloud_storage_privatereports_bucketname: "" +# This storage contains samiksha data +# Example: cloud_storage_samiksha_bucketname: "samiksha-storage" +cloud_storage_samiksha_bucketname: "" +# This storage contains schema data +# Example: cloud_storage_schema_bucketname: "schema-storage" +cloud_storage_schema_bucketname: "" +# This storage contains sourcing related data +# Example: cloud_storage_sourcing_bucketname: "sourcing-storage" +cloud_storage_sourcing_bucketname: "" +# This storage contains desktop app data +# Example: cloud_storage_offlineinstaller_bucketname: "offlineinstaller-storage" +cloud_storage_offlineinstaller_bucketname: "" +# This storage contains public schemas, contents +# Example: cloud_storage_content_bucketname: "content-storage" +cloud_storage_content_bucketname: "" +# This storage contains telemetry data +# Example: cloud_storage_telemetry_bucketname: "telemetry-storage" +cloud_storage_telemetry_bucketname: "" +# This storage contains T & C data +# Example: cloud_storage_termsandcondtions_bucketname: "termsandconditions-storage" +cloud_storage_termsandcondtions_bucketname: "" +# Example: cloud_storage_user_bucketname: "user-storage" +cloud_storage_user_bucketname: "" +# This storage contains crashlogs +# Example: cloud_storage_desktopappcrashlogs_bucketname: "desktopappcrashlogs-storage" +cloud_storage_desktopappcrashlogs_bucketname: "" +# This storage contains labels data +# Example: cloud_storage_label_bucketname: "label-storage" +cloud_storage_label_bucketname: "" +# Example: cloud_storage_certservice_bucketname: "certservice-storage" +cloud_storage_certservice_bucketname: "" +# This storage contains UCI services data +# Example: cloud_storage_uci_bucketname: "uci-storage" +cloud_storage_uci_bucketname: "" +# This storage contains artifacts data +# Example: cloud_storage_artifacts_bucketname: "artifact-storage" +cloud_storage_artifacts_bucketname: "" +# This storage contains backups data +# Example: cloud_storage_management_bucketname: "management-storage" +cloud_storage_management_bucketname: "" + +# Uncomment the variable based on your cloud provider (as a default we have kept Azure variable uncommented) +# GCP +# cloud_storage_url: https://storage.googleapis.com +# AWS +# cloud_storage_url: "https://s3.{{ cloud_public_storage_region }}.amazonaws.com" +# Azure +cloud_storage_url: "https://{{ cloud_public_storage_accountname }}.blob.core.windows.net" # Optional env: dev # some name like dev, preprod etc proto: https # http or https, preferably https -azure_public_container: contents # Azure container name for storing public data (like contents) - environment_id: "10000003" # A 8 digit number for example like 1000000, # Important: same as the one in core/common.yaml neo4j_zip: neo4j-community-3.3.9-unix.tar.gz # Neo4j file name present in the azure blob artifacts folder (only neo4j 3.4 and below is supported) @@ -28,27 +116,50 @@ neo4j_enterprise: false # Set this to true if you use # ------------------------------------------------------------------------------------------------------------ # # Sensible defaults which you need not change - But if you would like to change, you are free to do so ekstep_domain_name: "{{ proto }}://{{ domain_name }}" -artifacts_container: artifacts - -# Uncomment the variable based on your cloud provider (as a default we have kept Azure variable uncommented) -# GCP -# cloud_storage_url: https://storage.cloud.google.com/{{ gcloud_public_bucket_name }} -# AWS -# cloud_storage_url: # Geetha to fill this url based on AWS role vars -# Azure -cloud_storage_url: "https://{{ sunbird_public_storage_account_name }}.blob.core.windows.net" # SB-31155 - This should be deprecated in future in favour of plugin_storage -plugin_container_name: "{{ azure_public_container }}" +plugin_container_name: "{{ cloud_storage_content_bucketname }}" -kp_schema_base_path: "{{ cloud_storage_url }}/{{ plugin_storage }}/schemas/local" +kp_schema_base_path: "{{ cloud_storage_url }}/{{ cloud_storage_content_bucketname }}/schemas/local" imagepullsecrets: "{{env}}registrysecret" # kubernetes imagePullSecrets kubeconfig_path: /var/lib/jenkins/secrets/k8s.yaml # kubeconfig file path on jenkins -# Define the below if you are using Google Cloud -gcloud_private_bucket_name: "" -gcloud_public_bucket_name: "" -gcloud_artifact_bucket_name: "" -gcloud_management_bucket_name: "" +# Below endpoint is not required in current release +cloud_public_storage_endpoint: "" + +# Update below vars if seperate object storage is required +cloud_private_storage_accountname: "{{ cloud_public_storage_accountname }}" +cloud_private_storage_endpoint: "{{ cloud_public_storage_endpoint }}" +cloud_private_storage_region: "{{ cloud_public_storage_region }}" +cloud_private_storage_project: "{{ cloud_public_storage_project }}" +cloud_private_storage_namespace: "{{ cloud_public_storage_namespace }}" + +cloud_management_storage_accountname: "{{ cloud_public_storage_accountname }}" +cloud_management_storage_endpoint: "{{ cloud_public_storage_endpoint }}" +cloud_management_storage_region: "{{ cloud_public_storage_region }}" +cloud_management_storage_project: "{{ cloud_public_storage_project }}" +cloud_management_storage_namespace: "{{ cloud_public_storage_namespace }}" + +cloud_artifact_storage_accountname: "{{ cloud_public_storage_accountname }}" +cloud_artifact_storage_endpoint: "{{ cloud_public_storage_endpoint }}" +cloud_artifact_storage_region: "{{ cloud_public_storage_region }}" +cloud_artifact_storage_project: "{{ cloud_public_storage_project }}" +cloud_artifact_storage_namespace: "{{ cloud_public_storage_namespace }}" + +## Enable below vars to upload database backups in seperate buckets +# cloud_storage_cassandrabackup_bucketname: "" +# cloud_storage_dpcassandrabackup_bucketname: "" +# cloud_storage_dppostgresbackup_bucketname: "" +# cloud_storage_dpredisbackup_bucketname: "" +# cloud_storage_esbackup_bucketname: "" +# cloud_storage_influxdbbackup_bucketname: "" +# cloud_storage_jenkinsbackup_bucketname: "" +# cloud_storage_mongobackup_bucketname: "" +# cloud_storage_neo4jbackup_bucketname: "" +# cloud_storage_redisbackup_bucketname: "" -gcloud_private_bucket_projectId: "" +# Building block vars +cloud_storage_base_url: "{{ cloud_storage_url }}" +cloudstorage_base_path: "{{ cloud_storage_url }}" +valid_cloudstorage_base_urls: '["{{ cloud_storage_url }}"]' +cloudstorage_relative_path_prefix: "CONTENT_STORAGE_BASE_PATH" \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/KnowledgePlatform/secrets.yml b/private_repo/ansible/inventory/dev/KnowledgePlatform/secrets.yml index ef5db134da..1b62ad0a1f 100644 --- a/private_repo/ansible/inventory/dev/KnowledgePlatform/secrets.yml +++ b/private_repo/ansible/inventory/dev/KnowledgePlatform/secrets.yml @@ -1,11 +1,38 @@ # ------------------------------------------------------------------------------------------------------------ # # Mandatorty variables - DO NOT LEAVE ANYTHING BLANK # -# Azure storage account credentials - Note if you are using a single account, you can set the same key for the belows -sunbird_private_storage_account_key: "change.azure.storage.account.key" -sunbird_public_storage_account_key: "change.azure.storage.account.key" -sunbird_management_storage_account_key: "change.azure.storage.account.key" -sunbird_artifact_storage_account_key: "change.azure.storage.account.key" -sunbird_artifact_storage_account_sas: "change.azure.storage.account.sas" + +# Cloud Service Provider Secret Variables +# If cloud_service_provider is aws then update secret access key +# Example: cloud_management_storage_secret: "xkxjfjsdsfjdfjdhgjfhgjdhfgjdgjdfgdjgjdgdjhgjhd" + +# If cloud_service_provider is gcp then update with service account json file content +# Example: cloud_management_storage_secret: | +# { +# "type": "service_account", +# "project_id": "your-project-id", +# "private_key_id": "...", +# "private_key": "-----BEGIN PRIVATE KEY-----\n...\n-----END PRIVATE KEY-----\n", +# "client_email": "service-account-for-your-repository@your-project-id.iam.gserviceaccount.com", +# "client_id": "...", +# "auth_uri": "https://accounts.google.com/o/oauth2/auth", +# "token_uri": "https://accounts.google.com/o/oauth2/token", +# "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", +# "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/your-bucket@your-project-id.iam.gserviceaccount.com" +# } + +# If cloud_service_provider is azure then update with storage account key +# Example: cloud_management_storage_secret: "BfoOQwA6UFI7URwkFjkjdjsdhjhjhjsdhjfM//lsTbXDmHOFR5GqEwAwzyrKXOUDywojCpOC/g==" +cloud_management_storage_secret: "" + +# If cloud_service_provider is aws then update secret access key +# Example: cloud_public_storage_secret: "xkxjfjsdsfjdfjdhgjfhgjdhfgjdgjdfgdjgjdgdjhgjhd" + +# If cloud_service_provider is gcp then update with private-key which is in service account json file +# Example: cloud_public_storage_secret: "-----BEGIN PRIVATE KEY-----\n...\n-----END PRIVATE KEY-----\n" + +# If cloud_service_provider is azure then update with storage account key +# Example: cloud_public_storage_secret: "BfoOQwA6UFI7URwkFjkjdjsdhjhjhjsdhjfM//lsTbXDmHOFR5GqEwAwzyrKXOUDywojCpOC/g==" +cloud_public_storage_secret: "" core_vault_docker_registry_url: "change.docker.url" # for docker hub use https://index.docker.io/v1 core_vault_docker_registry_user: "change.docker.user" @@ -19,6 +46,6 @@ lp_vault_youtube_api_key: # youtube api token if you want # Sensible defaults which you need not change - But if you would like to change, you are free to do so lp_vault_graph_passport_key: "long-secret-to-calm-entropy-gods" -# Define the below if you are using Google Cloud -gcp_storage_service_account_name: "" -gcp_storage_key_file: "" +# update if seperate object storage is used +cloud_private_storage_secret: "{{ cloud_public_storage_secret }}" +cloud_artifact_storage_secret: "{{ cloud_public_storage_secret }}" \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/UCI/common.yml b/private_repo/ansible/inventory/dev/UCI/common.yml new file mode 120000 index 0000000000..1465b46671 --- /dev/null +++ b/private_repo/ansible/inventory/dev/UCI/common.yml @@ -0,0 +1 @@ +../Core/common.yml \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/UCI/hosts b/private_repo/ansible/inventory/dev/UCI/hosts new file mode 120000 index 0000000000..fb74d690d4 --- /dev/null +++ b/private_repo/ansible/inventory/dev/UCI/hosts @@ -0,0 +1 @@ +../Core/hosts \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/UCI/secrets.yml b/private_repo/ansible/inventory/dev/UCI/secrets.yml new file mode 120000 index 0000000000..6bbc077aab --- /dev/null +++ b/private_repo/ansible/inventory/dev/UCI/secrets.yml @@ -0,0 +1 @@ +../Core/secrets.yml \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/managed-learn/common.yml b/private_repo/ansible/inventory/dev/managed-learn/common.yml new file mode 120000 index 0000000000..1465b46671 --- /dev/null +++ b/private_repo/ansible/inventory/dev/managed-learn/common.yml @@ -0,0 +1 @@ +../Core/common.yml \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/managed-learn/hosts b/private_repo/ansible/inventory/dev/managed-learn/hosts new file mode 120000 index 0000000000..fb74d690d4 --- /dev/null +++ b/private_repo/ansible/inventory/dev/managed-learn/hosts @@ -0,0 +1 @@ +../Core/hosts \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/managed-learn/secrets.yml b/private_repo/ansible/inventory/dev/managed-learn/secrets.yml new file mode 120000 index 0000000000..6bbc077aab --- /dev/null +++ b/private_repo/ansible/inventory/dev/managed-learn/secrets.yml @@ -0,0 +1 @@ +../Core/secrets.yml \ No newline at end of file From 664f931df9f36d273ce854cf85deff9e0dbdbcc4 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Tue, 10 Jan 2023 21:12:22 +0530 Subject: [PATCH 209/616] fix: adding graylog related changes --- .../ansible/inventory/dev/Core/common.yml | 15 +++++++++++++++ private_repo/ansible/inventory/dev/Core/hosts | 7 +++++++ .../ansible/inventory/dev/DataPipeline/hosts | 6 ++++++ .../ansible/inventory/dev/KnowledgePlatform/hosts | 6 ++++++ 4 files changed, 34 insertions(+) diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index bee6dc7028..a85b01c898 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -128,6 +128,8 @@ nginx_private_ingress_annotations: # ------------------------------------------------------------------------------------------------------------ # +graylog_trusted_proxies: "1.2.3.4/21" # This should be the kubernetes nodes subnet CIDR range + ## Optional learningservice_ip: "10.0.1.5" # Load balancer IP or server ip @@ -394,3 +396,16 @@ grafana_login_whitelisted_emails: |- # Add below var to monitor report-cassandra server report_cassandra_server_count: "{{ groups['report-cassandra'] | length }}" + +# graylog +graylog_open_to_public: true +send_logs_to_graylog: true +graylog_root_timezone: "Asia/Kolkata" +graylog_elasticsearch_discovery_enabled: "true" +graylog_allow_leading_wildcard_searches: "true" +graylog_allow_highlighting: "true" +graylog_transport_email_enabled: "true" +graylog_transport_email_hostname: "{{ mail_server_host }}" +graylog_transport_email_auth_username: "apikey" +graylog_transport_email_from_email: "{{ sunbird_mail_server_from_email }}" +graylog_transport_email_use_ssl: "false" \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/Core/hosts b/private_repo/ansible/inventory/dev/Core/hosts index 58fb6eeb95..6bcd606290 100644 --- a/private_repo/ansible/inventory/dev/Core/hosts +++ b/private_repo/ansible/inventory/dev/Core/hosts @@ -9,6 +9,12 @@ ansible_ssh_private_key_file=/var/lib/jenkins/secrets/deployer_ssh_key [keycloak:children] keycloak-1 +[graylog-1] +10.0.1.9 mongodb_master=True graylog_is_master=True + +[graylog:children] +graylog-1 + [log-es-1] 10.0.1.9 es_instance_name=log-es-1 node_name=log-es-1 es_etc_node_master=true es_etc_node_data=true @@ -137,6 +143,7 @@ node-exporter [core:children] es +graylog log-es cassandra postgresql-master diff --git a/private_repo/ansible/inventory/dev/DataPipeline/hosts b/private_repo/ansible/inventory/dev/DataPipeline/hosts index 2ecf51e3bc..199392ce08 100644 --- a/private_repo/ansible/inventory/dev/DataPipeline/hosts +++ b/private_repo/ansible/inventory/dev/DataPipeline/hosts @@ -19,6 +19,12 @@ core-es-1 [log-es:children] log-es-1 +[graylog-1] +10.0.1.9 + +[graylog:children] +graylog-1 + ################# KP ########################## [learning] 10.0.1.5 diff --git a/private_repo/ansible/inventory/dev/KnowledgePlatform/hosts b/private_repo/ansible/inventory/dev/KnowledgePlatform/hosts index c144bc6fa2..e66c3c6ab0 100644 --- a/private_repo/ansible/inventory/dev/KnowledgePlatform/hosts +++ b/private_repo/ansible/inventory/dev/KnowledgePlatform/hosts @@ -62,6 +62,12 @@ core-es-1 [log-es:children] log-es-1 +[graylog-1] +10.0.1.9 + +[graylog:children] +graylog-1 + [composite-search-cluster] 10.1.4.5 From a23919d698b1c5181fd1458651c664ba52b92a75 Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Tue, 10 Jan 2023 21:26:55 +0530 Subject: [PATCH 210/616] Release 5.1.0 (#3697) * Add Lern inventory template * Install openjdk-11.0.2 in jenkins * Add Lern BB vars to template * Remove duplicate flink jobs * Update template vars * Update lern specific vars --- deploy/jenkins/jenkins-server-setup.sh | 7 +++ .../jobs/FlinkPipelineJobs/config.xml | 3 +- .../jobs/FlinkJobs/config.xml | 2 - .../dev/KnowledgePlatform/common.yml | 47 ++++++++++++++++++- .../inventory/dev/KnowledgePlatform/hosts | 30 ++++++++++++ .../dev/KnowledgePlatform/secrets.yml | 27 ++++++++++- .../ansible/inventory/dev/Lern/common.yml | 1 + private_repo/ansible/inventory/dev/Lern/hosts | 1 + .../ansible/inventory/dev/Lern/secrets.yml | 1 + 9 files changed, 113 insertions(+), 6 deletions(-) create mode 120000 private_repo/ansible/inventory/dev/Lern/common.yml create mode 120000 private_repo/ansible/inventory/dev/Lern/hosts create mode 120000 private_repo/ansible/inventory/dev/Lern/secrets.yml diff --git a/deploy/jenkins/jenkins-server-setup.sh b/deploy/jenkins/jenkins-server-setup.sh index f2ef322969..670395335b 100755 --- a/deploy/jenkins/jenkins-server-setup.sh +++ b/deploy/jenkins/jenkins-server-setup.sh @@ -145,6 +145,13 @@ mv jdk-11 java-11-openjdk-amd64 cp -r java-11-openjdk-amd64 /usr/lib/jvm/ rm -rf java-11-openjdk-amd64 openjdk-11+28_linux-x64_bin.tar.gz +#Install openjdk-11.0.2 # needed for DP jobs +echo -e "\n\e[0;32m${bold}Installating openjdk 11.0.2${normal}" +wget https://download.java.net/java/GA/jdk11/9/GPL/openjdk-11.0.2_linux-x64_bin.tar.gz +tar -xf openjdk-11.0.2_linux-x64_bin.tar.gz +mv jdk-11.0.2 /usr/lib/jvm/ +rm openjdk-11.0.2_linux-x64_bin.tar.gz + #Install maven 3.6.3 echo -e "\n\e[0;32m${bold}Installating maven 3.6.3${normal}" wget https://downloads.apache.org/maven/maven-3/3.6.3/binaries/apache-maven-3.6.3-bin.tar.gz diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/DataPipeline/jobs/FlinkPipelineJobs/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/DataPipeline/jobs/FlinkPipelineJobs/config.xml index 8daf73245a..96881ee988 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/DataPipeline/jobs/FlinkPipelineJobs/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/DataPipeline/jobs/FlinkPipelineJobs/config.xml @@ -102,7 +102,6 @@ return """<b>This parameter is not used</b>""" 'de-normalization-primary', 'de-normalization-secondary', 'druid-validator', -'assessment-aggregator', 'content-cache-updater', 'user-cache-updater-v2', 'summary-denormalization', @@ -170,4 +169,4 @@ return """<b>This parameter is not used</b>""" false - \ No newline at end of file + diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/KnowledgePlatform/jobs/FlinkJobs/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/KnowledgePlatform/jobs/FlinkJobs/config.xml index cb98de88c1..9a0134703e 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/KnowledgePlatform/jobs/FlinkJobs/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/KnowledgePlatform/jobs/FlinkJobs/config.xml @@ -104,8 +104,6 @@ return """<b>This parameter is not used</b>""" 'asset-enrichment', 'audit-event-generator', 'audit-history-indexer', -'collection-cert-pre-processor', -'collection-certificate-generator', 'auto-creator-v2', 'metrics-data-transformer', 'content-publish', diff --git a/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml b/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml index bede16cb5b..1702cc633b 100644 --- a/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml +++ b/private_repo/ansible/inventory/dev/KnowledgePlatform/common.yml @@ -162,4 +162,49 @@ cloud_artifact_storage_namespace: "{{ cloud_public_storage_namespace }}" cloud_storage_base_url: "{{ cloud_storage_url }}" cloudstorage_base_path: "{{ cloud_storage_url }}" valid_cloudstorage_base_urls: '["{{ cloud_storage_url }}"]' -cloudstorage_relative_path_prefix: "CONTENT_STORAGE_BASE_PATH" \ No newline at end of file +cloudstorage_relative_path_prefix: "CONTENT_STORAGE_BASE_PATH" +cloud_storage_pathstyle_access: false + +### Lern BB - Adding Lern specific vars here. In future if we want to move it to seperate folder this can be used as the starting point + +# Mandatorty variables - DO NOT LEAVE ANYTHING BLANK # +is_multidc_enabled: false # Change this to true if you plan to use cassandra multi data center setup +#Assessment Aggregator Content Read API +content_read_api_host: "" # Your domain host ex: http://test.com +content_read_api_endpoint: "" # ex: api/content/v1/read/ + +# For sendgrid, if you want to change, update the following +sunbird_mail_server_host: "smtp.sendgrid.net" # Email host, can be any email provider +sunbird_mail_server_username: "apikey" # Email provider username; for sendgrid you can use "apikey" +sunbird_mail_server_port: ## Email server SMTP port ex: 587 +# This mail id should be verified by your provider. This is the mail id which will be used for `From Address`. For example, +# From: support@sunbird.org +# Subject: Forgot password +# Hi..... +sunbird_mail_server_from_email: "support@myorg.com" # Email ID that should be as from address in mails + +# Optional variables - Can be left blank if you dont plan to use the intended features +# data exhaust alerts +data_exhaust_webhook_url: "slack.com" # Slack webhook url +data_exhaust_Channel: "slack.com" # Slack channel for data products alerts + +# This sms sender id should be verified by your provider. This is the sender id which will be used for `From Address`. For example, +# From: SBSMS +# Hi..... +# This is optional. +# If not set, you won't get sms OTPs. You'll get it in mail though. +sunbird_notification_msg_default_sender: # SMS from Address; exact 6 char like SBSUNB + + +# Sensible defaults which you need not change - But if you would like to change, you are free to do so +data_exhaust_name: "lern-datapipeline-monitoring" # Slack notification name +postgres: + db_url: "{{ groups['postgres'][0] }}" + db_username: analytics + db_name: analytics + db_table_name: "{{env}}_consumer_channel_mapping" + db_port: 5432 + db_admin_user: postgres + db_admin_password: "{{dp_vault_pgdb_admin_password}}" + +### Lern BB diff --git a/private_repo/ansible/inventory/dev/KnowledgePlatform/hosts b/private_repo/ansible/inventory/dev/KnowledgePlatform/hosts index c144bc6fa2..828e96c01e 100644 --- a/private_repo/ansible/inventory/dev/KnowledgePlatform/hosts +++ b/private_repo/ansible/inventory/dev/KnowledgePlatform/hosts @@ -106,3 +106,33 @@ lp [mlworkbench] 0.0.0.0 + +### Lern BB - Adding Lern specific vars here. In future if we want to move it to seperate folder this can be used as the starting point +[spark] +10.0.2.4 + +[learning] +10.0.2.7 + +[raw-coordinator] +10.0.2.7 + +[raw-overlord] +10.0.2.7 + +[raw-broker] +10.0.2.7 + +[postgres] +10.0.2.5 + +[report-cassandra:children] +core-cassandra + +[lp-cassandra] +10.0.2.5 + +[redis] +10.0.2.2 + +### Lern BB diff --git a/private_repo/ansible/inventory/dev/KnowledgePlatform/secrets.yml b/private_repo/ansible/inventory/dev/KnowledgePlatform/secrets.yml index 1b62ad0a1f..0a03bfdb43 100644 --- a/private_repo/ansible/inventory/dev/KnowledgePlatform/secrets.yml +++ b/private_repo/ansible/inventory/dev/KnowledgePlatform/secrets.yml @@ -47,5 +47,30 @@ lp_vault_youtube_api_key: # youtube api token if you want lp_vault_graph_passport_key: "long-secret-to-calm-entropy-gods" # update if seperate object storage is used +# If cloud_service_provider is aws then update secret access key +# Example: cloud_public_storage_secret: "xkxjfjsdsfjdfjdhgjfhgjdhfgjdgjdfgdjgjdgdjhgjhd" + +# If cloud_service_provider is gcp then update with private-key which is in service account json file +# Example: cloud_public_storage_secret: "-----BEGIN PRIVATE KEY-----\n...\n-----END PRIVATE KEY-----\n" + +# If cloud_service_provider is azure then update with storage account key +# Example: cloud_public_storage_secret: "BfoOQwA6UFI7URwkFjkjdjsdhjhjhjsdhjfM//lsTbXDmHOFR5GqEwAwzyrKXOUDywojCpOC/g==" cloud_private_storage_secret: "{{ cloud_public_storage_secret }}" -cloud_artifact_storage_secret: "{{ cloud_public_storage_secret }}" \ No newline at end of file +cloud_artifact_storage_secret: "{{ cloud_public_storage_secret }}" + +### Lern BB - Adding Lern specific vars here. In future if we want to move it to seperate folder this can be used as the starting point + +# Mandatorty variables - DO NOT LEAVE ANYTHING BLANK # +core_vault_sunbird_api_auth_token: # copy value form variable core_vault_sunbird_api_auth_token from core/secrets.yml +dp_vault_pgdb_password: "change.postgres.password" # postgres password for analytics db +dp_vault_druid_postgress_pass: "change.postgres.password" # postgres password for druid db +dp_vault_pgdb_admin_password: "change.postgres.password" # postgres password for admin +core_vault_sunbird_encryption_key: "" # copy value from variable core_vault_sunbird_encryption_key from core secrets.yml + +# Optional variables - Can be left blank if you dont plan to use the intended features +core_vault_sunbird_fcm_account_key: "" # Firebase Cloud Messaging API Key +sunbird_msg_91_auth: "" # API key for sending OTP SMS +sunbird_mail_server_password: "" # Email server password +dp_vault_data_exhaust_token: "" # slack api token + +### Lern BB diff --git a/private_repo/ansible/inventory/dev/Lern/common.yml b/private_repo/ansible/inventory/dev/Lern/common.yml new file mode 120000 index 0000000000..1168242b3a --- /dev/null +++ b/private_repo/ansible/inventory/dev/Lern/common.yml @@ -0,0 +1 @@ +../KnowledgePlatform/common.yml \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/Lern/hosts b/private_repo/ansible/inventory/dev/Lern/hosts new file mode 120000 index 0000000000..d54fc1e61a --- /dev/null +++ b/private_repo/ansible/inventory/dev/Lern/hosts @@ -0,0 +1 @@ +../KnowledgePlatform/hosts \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/Lern/secrets.yml b/private_repo/ansible/inventory/dev/Lern/secrets.yml new file mode 120000 index 0000000000..1a0f3f3224 --- /dev/null +++ b/private_repo/ansible/inventory/dev/Lern/secrets.yml @@ -0,0 +1 @@ +../KnowledgePlatform/secrets.yml \ No newline at end of file From f94a83f3b240220414aab3c67c7b5e63e088ab08 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Tue, 10 Jan 2023 23:52:59 +0530 Subject: [PATCH 211/616] feat: ED-699 adding data to enable opa Signed-off-by: Keshav Prasad --- .../ansible/inventory/dev/Core/common.yml | 57 +++++++++++++++---- .../ansible/inventory/dev/Kubernetes/keys | 1 + .../inventory/dev/Sunbird-RC/common.yml | 1 + .../ansible/inventory/dev/Sunbird-RC/hosts | 1 + .../ansible/inventory/dev/Sunbird-RC/keys | 1 + .../inventory/dev/Sunbird-RC/secrets.yml | 1 + private_repo/ansible/inventory/dev/UCI/keys | 1 + .../ansible/inventory/dev/key-generate.sh | 21 +++++++ .../ansible/inventory/dev/managed-learn/keys | 1 + 9 files changed, 73 insertions(+), 12 deletions(-) create mode 120000 private_repo/ansible/inventory/dev/Kubernetes/keys create mode 120000 private_repo/ansible/inventory/dev/Sunbird-RC/common.yml create mode 120000 private_repo/ansible/inventory/dev/Sunbird-RC/hosts create mode 120000 private_repo/ansible/inventory/dev/Sunbird-RC/keys create mode 120000 private_repo/ansible/inventory/dev/Sunbird-RC/secrets.yml create mode 120000 private_repo/ansible/inventory/dev/UCI/keys create mode 100755 private_repo/ansible/inventory/dev/key-generate.sh create mode 120000 private_repo/ansible/inventory/dev/managed-learn/keys diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index a85b01c898..f2d6925eaf 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -397,15 +397,48 @@ grafana_login_whitelisted_emails: |- # Add below var to monitor report-cassandra server report_cassandra_server_count: "{{ groups['report-cassandra'] | length }}" -# graylog -graylog_open_to_public: true -send_logs_to_graylog: true -graylog_root_timezone: "Asia/Kolkata" -graylog_elasticsearch_discovery_enabled: "true" -graylog_allow_leading_wildcard_searches: "true" -graylog_allow_highlighting: "true" -graylog_transport_email_enabled: "true" -graylog_transport_email_hostname: "{{ mail_server_host }}" -graylog_transport_email_auth_username: "apikey" -graylog_transport_email_from_email: "{{ sunbird_mail_server_from_email }}" -graylog_transport_email_use_ssl: "false" \ No newline at end of file +# graylog variables +graylog_open_to_public: true # allows you to access via domain/graylog +send_logs_to_graylog: true # agents starts sending logs to graylog instead of ES +graylog_root_timezone: "Asia/Kolkata" # timezone +graylog_elasticsearch_discovery_enabled: "true" # auto discover ES nodes and version +graylog_allow_leading_wildcard_searches: "true" # allows wild card searching +graylog_allow_highlighting: "true" # shows yellow highlights on matches +graylog_transport_email_enabled: "true" # enables emails to be sent via graylog +graylog_transport_email_hostname: "{{ mail_server_host }}" # email server host name +graylog_transport_email_auth_username: "apikey" # sendgrid / email service api key +graylog_transport_email_from_email: "{{ sunbird_mail_server_from_email }}" # from email address +graylog_transport_email_use_ssl: "false" # cannot use both tls and ssl, so disabling ssl as tls is enabled by default + +# Opa and Adminutils +# Prefixes will match the starting part of the files under keys dirctory in inventory +adminutil__device_keyprefix: "mobile_devicev2_key" # private key prefix for mobile apps +adminutil__device_keystart: 1 # starting number of the key file +adminutil__device_keycount: 10 # ending number of the key file +adminutil__access_keyprefix: "accessv1_key" # private key prefix for user access tokens +adminutil__access_keystart: 1 # starting number of the key file +adminutil__access_keycount: 10 # ending number of the key file +adminutil__desktop_keyprefix: "desktop_devicev2_key" # private key prefix for desktop apps +adminutil__desktop_keystart: 1 # starting number of the key file +adminutil__desktop_keycount: 10 # ending number of the key file +adminutil__portal_anonymous_keyprefix: "portal_anonymous_key" # private key prefix for portal anonymous sessions +adminutil__portal_anonymous_keystart: 1 # starting number of the key file +adminutil__portal_anonymous_keycount: 10 # ending number of the key file +adminutil__portal_loggedin_keyprefix: "portal_loggedin_key" # private key prefix for portal loggedin sessions +adminutil__portal_loggedin_keystart: 1 # starting number of the key file +adminutil__portal_loggedin_keycount: 10 # ending number of the key file +adminutil_embed_role: 'true' # embeds user roles in access tokens + +# Kong and Adminutils +# Consumer names will match the starting part of the files under keys dirctory in inventory +kong_mobile_v2_consumer: "mobile_devicev2" # kong consumer name for mobile apps +kong_desktop_v2_consumer: "desktop_devicev2" # kong consumer name for desktop apps +kong_portal_anonymous_consumer: "portal_anonymous" # kong consumer name for portal anonymous sessions +kong_portal_loggedin_consumer: "portal_loggedin" # kong consumer name for portal loggedin sessions +kong_desktop_device_consumer_names_for_opa: '["desktop_devicev2", "desktop_device"]' # ops checks will be skipped for desktop consumers + +# Portal sessions +sunbird_kong_device_register: 'true' # enables refersh token api call after login +sunbird_kong_device_register_anonymous: 'true' # enabled anonymous sessions +sunbird_session_store_type: redis # uses redis for session data instead of cassandra +portal_redis_connection_string: "redis://:@{{ sunbird_redis_host }}:6379/3" # Uses KP redis and DB number 3 \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/Kubernetes/keys b/private_repo/ansible/inventory/dev/Kubernetes/keys new file mode 120000 index 0000000000..442dd3e557 --- /dev/null +++ b/private_repo/ansible/inventory/dev/Kubernetes/keys @@ -0,0 +1 @@ +../Core/keys/ \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/Sunbird-RC/common.yml b/private_repo/ansible/inventory/dev/Sunbird-RC/common.yml new file mode 120000 index 0000000000..1465b46671 --- /dev/null +++ b/private_repo/ansible/inventory/dev/Sunbird-RC/common.yml @@ -0,0 +1 @@ +../Core/common.yml \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/Sunbird-RC/hosts b/private_repo/ansible/inventory/dev/Sunbird-RC/hosts new file mode 120000 index 0000000000..fb74d690d4 --- /dev/null +++ b/private_repo/ansible/inventory/dev/Sunbird-RC/hosts @@ -0,0 +1 @@ +../Core/hosts \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/Sunbird-RC/keys b/private_repo/ansible/inventory/dev/Sunbird-RC/keys new file mode 120000 index 0000000000..442dd3e557 --- /dev/null +++ b/private_repo/ansible/inventory/dev/Sunbird-RC/keys @@ -0,0 +1 @@ +../Core/keys/ \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/Sunbird-RC/secrets.yml b/private_repo/ansible/inventory/dev/Sunbird-RC/secrets.yml new file mode 120000 index 0000000000..6bbc077aab --- /dev/null +++ b/private_repo/ansible/inventory/dev/Sunbird-RC/secrets.yml @@ -0,0 +1 @@ +../Core/secrets.yml \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/UCI/keys b/private_repo/ansible/inventory/dev/UCI/keys new file mode 120000 index 0000000000..442dd3e557 --- /dev/null +++ b/private_repo/ansible/inventory/dev/UCI/keys @@ -0,0 +1 @@ +../Core/keys/ \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/key-generate.sh b/private_repo/ansible/inventory/dev/key-generate.sh new file mode 100755 index 0000000000..2bf82230b0 --- /dev/null +++ b/private_repo/ansible/inventory/dev/key-generate.sh @@ -0,0 +1,21 @@ +#!/bin/bash +set -euo pipefail +read -s -p 'Enter the ansible vault password (redacted): ' vault_pass +echo +read -s -p 'Re-enter the ansible vault password (redacted): ' confirm_vault_pass +echo +if [[ $vault_pass == $confirm_vault_pass ]] +then + echo "$vault_pass" > temp_vault_pass + cd Core/keys + for i in {1..10}; do openssl genrsa -out mobile_devicev2_c$i 2048 && openssl pkcs8 -topk8 -inform PEM -in mobile_devicev2_c$i -out mobile_devicev2_key$i -nocrypt && rm -rf mobile_devicev2_c$i ; done + for i in {1..10}; do openssl genrsa -out accessv1_c$i 2048 && openssl pkcs8 -topk8 -inform PEM -in accessv1_c$i -out accessv1_key$i -nocrypt && rm -rf accessv1_c$i ; done + for i in {1..10}; do openssl genrsa -out desktop_devicev2_c$i 2048 && openssl pkcs8 -topk8 -inform PEM -in desktop_devicev2_c$i -out desktop_devicev2_key$i -nocrypt && rm -rf desktop_devicev2_c$i ; done + for i in {1..10}; do openssl genrsa -out portal_anonymous_c$i 2048 && openssl pkcs8 -topk8 -inform PEM -in portal_anonymous_c$i -out portal_anonymous_key$i -nocrypt && rm -rf portal_anonymous_c$i ; done + for i in {1..10}; do openssl genrsa -out portal_loggedin_c$i 2048 && openssl pkcs8 -topk8 -inform PEM -in portal_loggedin_c$i -out portal_loggedin_key$i -nocrypt && rm -rf portal_loggedin_c$i ; done + while read -r line; do ansible-vault encrypt $line --vault-password-file ../../temp_vault_pass; done <<< $(ls) + cd ../.. && rm temp_vault_pass + echo "OK" +else + echo "Vault passwords dont match" +fi \ No newline at end of file diff --git a/private_repo/ansible/inventory/dev/managed-learn/keys b/private_repo/ansible/inventory/dev/managed-learn/keys new file mode 120000 index 0000000000..442dd3e557 --- /dev/null +++ b/private_repo/ansible/inventory/dev/managed-learn/keys @@ -0,0 +1 @@ +../Core/keys/ \ No newline at end of file From c9297de22071f6ae568bae8cb69e6ee8b15539b4 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Wed, 11 Jan 2023 00:14:51 +0530 Subject: [PATCH 212/616] fear: adding required consumers for sessions Signed-off-by: Keshav Prasad --- .../ansible/inventory/dev/Core/common.yml | 41 ++++++++++++++++--- 1 file changed, 35 insertions(+), 6 deletions(-) diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index f2d6925eaf..3397552d63 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -241,11 +241,9 @@ cloudstorage_base_path: "{{ cloud_storage_url }}" valid_cloudstorage_base_urls: '["{{ cloud_storage_url }}"]' cloudstorage_relative_path_prefix: "CONTENT_STORAGE_BASE_PATH" - - # Provide the admin-api consumer access to all API's - The public repo restricts this for security reasons -# If you dont want to key to have access to all API's, please remove the variables kong_all_consumer_groups and kong_consumers or edit the groups to have a smaller subset -kong_all_consumer_groups: +# If you dont want the admin api key to have access to all API's, please remove the variables "all_apis_access_group" and "kong_consumers" or edit the groups to have a smaller subset +all_apis_access_group: - announcementAccess - anonymousAppAccess - anonymousCertificateAccess @@ -367,6 +365,37 @@ kong_all_consumer_groups: kong_consumers: - username: api-admin + groups: "{{ all_apis_access_group }}" + state: present + - username: mobile_admin + groups: "{{ mobile_admin_groups }}" + print_credentials: true + state: present + - username: mobile_app + groups: "{{ mobile_app_groups }}" + state: present + - username: mobile_device + groups: "{{ mobile_device_groups }}" + state: present + - username: mobile_devicev2 + groups: "{{ mobile_device_groups }}" + state: present + - username: portal_anonymous_register + groups: "{{ portal_anonymous_register }}" + state: present + - username: portal_loggedin_register + groups: "{{ portal_loggedin_register }}" + state: present + - username: portal_anonymous + groups: "{{ anonymous_user_groups }}" + state: present + - username: portal_loggedin + groups: "{{ kong_all_consumer_groups }}" + state: present + - username: portal_anonymous_fallback_token + groups: "{{ anonymous_user_groups }}" + state: present + - username: portal_loggedin_fallback_token groups: "{{ kong_all_consumer_groups }}" state: present @@ -439,6 +468,6 @@ kong_desktop_device_consumer_names_for_opa: '["desktop_devicev2", "desktop_devic # Portal sessions sunbird_kong_device_register: 'true' # enables refersh token api call after login -sunbird_kong_device_register_anonymous: 'true' # enabled anonymous sessions +sunbird_kong_device_register_anonymous: 'true' # enables anonymous sessions sunbird_session_store_type: redis # uses redis for session data instead of cassandra -portal_redis_connection_string: "redis://:@{{ sunbird_redis_host }}:6379/3" # Uses KP redis and DB number 3 \ No newline at end of file +portal_redis_connection_string: "redis://:@{{ sunbird_redis_host }}:6379/3" # Uses KP redis and DB number 3 to store session data \ No newline at end of file From c4ac3713c5e12a58dee937c8d6c7a86ab1dc1c30 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Wed, 11 Jan 2023 00:29:16 +0530 Subject: [PATCH 213/616] fix: adding additional set of vars for sessions and graylog Signed-off-by: Keshav Prasad --- .../ansible/inventory/dev/Core/common.yml | 2 +- .../ansible/inventory/dev/Core/secrets.yml | 18 ++++++++++++++++-- .../inventory/dev/DataPipeline/common.yml | 2 ++ .../inventory/dev/KnowledgePlatform/common.yml | 3 ++- 4 files changed, 21 insertions(+), 4 deletions(-) diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index 3397552d63..d174f6ea24 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -428,7 +428,7 @@ report_cassandra_server_count: "{{ groups['report-cassandra'] | length }}" # graylog variables graylog_open_to_public: true # allows you to access via domain/graylog -send_logs_to_graylog: true # agents starts sending logs to graylog instead of ES +send_logs_to_graylog: true # filebeat agents will send logs to graylog instead of ES graylog_root_timezone: "Asia/Kolkata" # timezone graylog_elasticsearch_discovery_enabled: "true" # auto discover ES nodes and version graylog_allow_leading_wildcard_searches: "true" # allows wild card searching diff --git a/private_repo/ansible/inventory/dev/Core/secrets.yml b/private_repo/ansible/inventory/dev/Core/secrets.yml index 9b8f0f43a5..68261f8ec5 100644 --- a/private_repo/ansible/inventory/dev/Core/secrets.yml +++ b/private_repo/ansible/inventory/dev/Core/secrets.yml @@ -117,7 +117,7 @@ mongodb_keyfile_content: | # 4. Again vist Admin Panel post restart -> Click on Plugins => Write API # 5. Under MASTER TOKENS section, click on create token button, It will generate a token. discussionsmw_nodebb_authorization_token: # Read the comment above to generate this key -core_vault_mail_server_password: "" # Email server password +core_vault_mail_server_password: "" # Email server password / api token # Oauth keys core_vault_sunbird_google_oauth_clientId_portal: # Google oauth client id @@ -126,6 +126,17 @@ core_vault_sunbird_google_captcha_site_key_portal: # Google recaptch site google_captcha_private_key: # Google recaptch private key learning_content_drive_apiKey: # Google drive api key +### Graylog ### +graylog_password_secret: "" # Random secret. Generate using the command: pwgen -s 96 1 +graylog_root_password_sha2: "" # Random secret. Generate using the command: echo -n "Enter Password: " && head -1 Date: Wed, 11 Jan 2023 10:28:05 +0530 Subject: [PATCH 214/616] LR-278 added lern specific variables --- .../roles/stack-sunbird/templates/sunbird_lms-service.env | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env b/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env index 946bf3af10..f1c421a603 100644 --- a/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env +++ b/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env @@ -146,4 +146,7 @@ sunbird_cloud_service_provider={{cloud_service_provider}} isMultiDCEnabled={{cassandra_multi_dc_enabled}} # Release-5.0.1 -cloud_storage_base_url={{cloud_storage_base_url}} \ No newline at end of file +cloud_storage_base_url={{cloud_storage_base_url}} +cloud_storage_cname_url={{ cloud_storage_cname_url | default('') }} +cloud_storage_dial_bucketname={{ cloud_storage_dial_bucketname | default('dial') }} +cloud_storage_path_prefix_dial={{ cloudstorage_relative_path_prefix_dial | default('DIAL_STORAGE_BASE_PATH') }} \ No newline at end of file From 08d628c20729a091ef8da5da6b33d1a2dc19bba9 Mon Sep 17 00:00:00 2001 From: PrasadMoka Date: Wed, 11 Jan 2023 15:22:24 +0530 Subject: [PATCH 215/616] LR-278 added placeholder as configurable value --- ansible/roles/stack-sunbird/defaults/main.yml | 5 ++++- .../roles/stack-sunbird/templates/sunbird_lms-service.env | 3 ++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/ansible/roles/stack-sunbird/defaults/main.yml b/ansible/roles/stack-sunbird/defaults/main.yml index 036fda51bd..bcb4581212 100644 --- a/ansible/roles/stack-sunbird/defaults/main.yml +++ b/ansible/roles/stack-sunbird/defaults/main.yml @@ -1062,4 +1062,7 @@ inquiry_assessment_publish_kafka_topic_name: "{{ env_name }}.assessment.publish. inquiry_cassandra_connection: "{{ lp_cassandra_connection }}" inquiry_cassandra_keyspace_prefix: "{{ lp_cassandra_keyspace_prefix }}" inquiry_redis_host: "{{ sunbird_lp_redis_host }}" -inquiry_search_service_base_url: "{{ sunbird_search_service_api_base_url }}/v3/search" \ No newline at end of file +inquiry_search_service_base_url: "{{ sunbird_search_service_api_base_url }}/v3/search" + +### LERN Release-5.0.1 +cloud_store_base_path_placeholder: "CLOUD_BASE_PATH" \ No newline at end of file diff --git a/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env b/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env index f1c421a603..0d5131b418 100644 --- a/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env +++ b/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env @@ -149,4 +149,5 @@ isMultiDCEnabled={{cassandra_multi_dc_enabled}} cloud_storage_base_url={{cloud_storage_base_url}} cloud_storage_cname_url={{ cloud_storage_cname_url | default('') }} cloud_storage_dial_bucketname={{ cloud_storage_dial_bucketname | default('dial') }} -cloud_storage_path_prefix_dial={{ cloudstorage_relative_path_prefix_dial | default('DIAL_STORAGE_BASE_PATH') }} \ No newline at end of file +cloud_storage_path_prefix_dial={{ cloudstorage_relative_path_prefix_dial | default('DIAL_STORAGE_BASE_PATH') }} +cloud_store_base_path_placeholder={{ cloud_store_base_path_placeholder | default('CLOUD_BASE_PATH') }} \ No newline at end of file From 8399904e04aa9192f837b752d92454b93441f54c Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Thu, 12 Jan 2023 11:19:45 +1100 Subject: [PATCH 216/616] added role for oci-cli Signed-off-by: Deepak Devadathan --- ansible/bootstrap.yml | 12 +++++++++++- ansible/roles/oci-cli/defaults/main.yml | 1 + ansible/roles/oci-cli/tasks/main.yml | 24 ++++++++++++++++++++++++ 3 files changed, 36 insertions(+), 1 deletion(-) create mode 100644 ansible/roles/oci-cli/defaults/main.yml create mode 100644 ansible/roles/oci-cli/tasks/main.yml diff --git a/ansible/bootstrap.yml b/ansible/bootstrap.yml index b23479e833..b3f1d9388c 100644 --- a/ansible/bootstrap.yml +++ b/ansible/bootstrap.yml @@ -48,7 +48,17 @@ roles: - role: aws-cli tags: - - aws_cli + - aws_cli + +- hosts: "{{ hosts }}" + become: yes + ignore_unreachable: yes + vars_files: + - "{{inventory_dir}}/secrets.yml" + roles: + - role: oci-cli + tags: + - oci_cli - hosts: "{{ hosts| default('all') }}" become: yes diff --git a/ansible/roles/oci-cli/defaults/main.yml b/ansible/roles/oci-cli/defaults/main.yml new file mode 100644 index 0000000000..147a2e03f1 --- /dev/null +++ b/ansible/roles/oci-cli/defaults/main.yml @@ -0,0 +1 @@ +oci_cli_url: https://github.com/oracle/oci-cli/releases/download/v3.22.0/oci-cli-3.22.0-Ubuntu-18.04-Offline.zip \ No newline at end of file diff --git a/ansible/roles/oci-cli/tasks/main.yml b/ansible/roles/oci-cli/tasks/main.yml new file mode 100644 index 0000000000..803eb31b56 --- /dev/null +++ b/ansible/roles/oci-cli/tasks/main.yml @@ -0,0 +1,24 @@ +--- +- name: Download the installation file + get_url: + url: "{{ oci_cli_url }}" + dest: /tmp/ocicli.zip + +- name: Installing unzip + apt: + name: "{{item}}" + state: latest + with_items: + - zip + - unzip + +- name: Unzip the installer + unarchive: + src: /tmp/ocicli.zip + dest: /tmp/ + remote_src: yes + +- name: install oci cli + shell: ./oci-cli-installation/install.sh --install-dir {{ lookup('env', 'HOME') }} --exec-dir {{ lookup('env', 'HOME') }} --script-dir {{ lookup('env', 'HOME') }} --accept-all-defaults + args: + chdir: /tmp/ From 3540d034aea2f09e77535d2dfdc3c34a43586b2b Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Thu, 12 Jan 2023 11:32:01 +1100 Subject: [PATCH 217/616] test home location Signed-off-by: Deepak Devadathan --- ansible/roles/oci-cli/tasks/main.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ansible/roles/oci-cli/tasks/main.yml b/ansible/roles/oci-cli/tasks/main.yml index 803eb31b56..513f793463 100644 --- a/ansible/roles/oci-cli/tasks/main.yml +++ b/ansible/roles/oci-cli/tasks/main.yml @@ -18,6 +18,9 @@ dest: /tmp/ remote_src: yes +- name: debug through ansible.env + debug: var=ansible_env.HOME + - name: install oci cli shell: ./oci-cli-installation/install.sh --install-dir {{ lookup('env', 'HOME') }} --exec-dir {{ lookup('env', 'HOME') }} --script-dir {{ lookup('env', 'HOME') }} --accept-all-defaults args: From 89dd838f2e4b54ec217866a1103442aa0810a789 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Thu, 12 Jan 2023 11:35:05 +1100 Subject: [PATCH 218/616] updated the oci cli install location Signed-off-by: Deepak Devadathan --- ansible/roles/oci-cli/tasks/main.yml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/ansible/roles/oci-cli/tasks/main.yml b/ansible/roles/oci-cli/tasks/main.yml index 513f793463..92f045ba38 100644 --- a/ansible/roles/oci-cli/tasks/main.yml +++ b/ansible/roles/oci-cli/tasks/main.yml @@ -18,10 +18,7 @@ dest: /tmp/ remote_src: yes -- name: debug through ansible.env - debug: var=ansible_env.HOME - - name: install oci cli - shell: ./oci-cli-installation/install.sh --install-dir {{ lookup('env', 'HOME') }} --exec-dir {{ lookup('env', 'HOME') }} --script-dir {{ lookup('env', 'HOME') }} --accept-all-defaults + shell: ./oci-cli-installation/install.sh --install-dir {{ ansible_env.HOME }} --exec-dir {{ ansible_env.HOME }} --script-dir {{ ansible_env.HOME }} --accept-all-defaults args: chdir: /tmp/ From f25512e65bdd2943e8b24c72b07b9617c097743b Mon Sep 17 00:00:00 2001 From: Kenneth Heung Date: Thu, 12 Jan 2023 11:58:31 +0800 Subject: [PATCH 219/616] include option oci-cli in template edit jenkins pipeline template to include oci_cli option --- .../jobs/dev/jobs/Core/jobs/Bootstrap/config.xml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/Bootstrap/config.xml b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/Bootstrap/config.xml index b95bca2645..fa00efdf00 100644 --- a/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/Bootstrap/config.xml +++ b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/Bootstrap/config.xml @@ -96,6 +96,7 @@ return """<b>This parameter is not used</b>""" 'azure_cli', 'aws_cli', 'gcloud_cli', +'oci_cli', 'all'] true @@ -155,4 +156,4 @@ return """<b>This parameter is not used</b>""" false - \ No newline at end of file + From 3937d5abe8465c6f14428463211771a1c243e591 Mon Sep 17 00:00:00 2001 From: kumarks1122 Date: Thu, 12 Jan 2023 19:44:17 +0530 Subject: [PATCH 220/616] #000 | LERN and ED Dataproducts jenkins changes added --- .../jobs/AnalyticsReplayJobs/config.xml | 14 +------------- .../Lern/jobs/LernAnalyticsReplayJobs/config.xml | 15 --------------- .../jobs/Lern/jobs/LernDataProducts/config.xml | 2 +- 3 files changed, 2 insertions(+), 29 deletions(-) diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/DataPipeline/jobs/AnalyticsReplayJobs/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/DataPipeline/jobs/AnalyticsReplayJobs/config.xml index 95b1f8c7e4..b65594e17b 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/DataPipeline/jobs/AnalyticsReplayJobs/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/DataPipeline/jobs/AnalyticsReplayJobs/config.xml @@ -48,12 +48,8 @@ <font color=dimgray size=2><b>Specify the job id.</b></font> - collection-summary-report-v2 - collection-summary-report program-collection-summary-report audit-metrics-report - admin-user-reports - admin-geo-reports district-weekly district-monthly desktop-consumption-report @@ -61,18 +57,10 @@ content-rating-updater druid-query-processor monitor-job-summ - course-enrollment-report textbook-progress-report etb-metrics daily-metrics - progress-exhaust - userinfo-exhaust - response-exhaust - progress-exhaust-v2 - response-exhaust-v2 - course-batch-status-updater - cassandra-migration - druid-dataset + druid-dataset uci-response-exhaust uci-private-exhaust diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Lern/jobs/LernAnalyticsReplayJobs/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Lern/jobs/LernAnalyticsReplayJobs/config.xml index cd2ac8b2d1..6567620e9c 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Lern/jobs/LernAnalyticsReplayJobs/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Lern/jobs/LernAnalyticsReplayJobs/config.xml @@ -50,32 +50,17 @@ collection-summary-report-v2 collection-summary-report - program-collection-summary-report - audit-metrics-report admin-user-reports admin-geo-reports - district-weekly - district-monthly - desktop-consumption-report - wfs - content-rating-updater - druid-query-processor - monitor-job-summ cassandra-migration course-enrollment-report - textbook-progress-report - etb-metrics - daily-metrics progress-exhaust userinfo-exhaust response-exhaust progress-exhaust-v2 response-exhaust-v2 course-batch-status-updater - druid-dataset score-metric-migration-job - uci-response-exhaust - uci-private-exhaust diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Lern/jobs/LernDataProducts/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Lern/jobs/LernDataProducts/config.xml index 6f8fd449c2..dba542fd06 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Lern/jobs/LernDataProducts/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Lern/jobs/LernDataProducts/config.xml @@ -230,7 +230,7 @@ return """<b>This parameter is not used</b>""" - pipelines/deploy/ed-dataproducts/Jenkinsfile + pipelines/deploy/lern-dataproducts/Jenkinsfile false From 1ed421fa280edb0921ec14335c9e200bd932a805 Mon Sep 17 00:00:00 2001 From: kumarks1122 Date: Thu, 12 Jan 2023 19:46:28 +0530 Subject: [PATCH 221/616] #000 | LERN and ED Dataproducts jenkins changes added --- .../Deploy/jobs/dev/jobs/Lern/jobs/LernDataProducts/config.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Lern/jobs/LernDataProducts/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Lern/jobs/LernDataProducts/config.xml index dba542fd06..4fa8dea65d 100644 --- a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Lern/jobs/LernDataProducts/config.xml +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/Lern/jobs/LernDataProducts/config.xml @@ -169,7 +169,7 @@ return """<b>This parameter is not used</b>""" module <font color=dimgray size=2><b>It will deploy only lpa_core_dp_artifacts(batch-models & job-manager) jar.</b></font> - ed-dataproducts + lern-dataproducts false From f6227f351191811e98b1e311ba8734108fb63898 Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Fri, 13 Jan 2023 13:38:48 +0530 Subject: [PATCH 222/616] fix: adding adminutil_learner_api_key consumer --- private_repo/ansible/inventory/dev/Core/common.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/private_repo/ansible/inventory/dev/Core/common.yml b/private_repo/ansible/inventory/dev/Core/common.yml index d174f6ea24..6010476336 100644 --- a/private_repo/ansible/inventory/dev/Core/common.yml +++ b/private_repo/ansible/inventory/dev/Core/common.yml @@ -398,6 +398,10 @@ kong_consumers: - username: portal_loggedin_fallback_token groups: "{{ kong_all_consumer_groups }}" state: present + - username: adminutil_learner_api_key + groups: "{{ userAccess }}" + state: present + ## Grafana oauth From eb77fcfac79429dc50f392aa1d978f18514ef7bd Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Fri, 13 Jan 2023 13:44:27 +0530 Subject: [PATCH 223/616] fix: adding adminutls to learner api token Signed-off-by: Keshav Prasad --- private_repo/ansible/inventory/dev/Core/secrets.yml | 3 +++ private_repo/ansible/inventory/dev/key-generate.sh | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/private_repo/ansible/inventory/dev/Core/secrets.yml b/private_repo/ansible/inventory/dev/Core/secrets.yml index 68261f8ec5..c57a8bd37c 100644 --- a/private_repo/ansible/inventory/dev/Core/secrets.yml +++ b/private_repo/ansible/inventory/dev/Core/secrets.yml @@ -137,6 +137,9 @@ sunbird_loggedin_register_token: # Use portal_loggedin_register consumer tok sunbird_anonymous_default_token: # Use portal_anonymous_fallback_token consumer token sunbird_logged_default_token: # Use portal_loggedin_fallback_token consumer token +# adminutils to learner api token +adminutil_learner_api_auth_key: # Use adminutil_learner_api_key consumer token + # ------------------------------------------------------------------------------------------------------------ # # Optional variables - Can be left blank if you dont plan to use the intended features core_vault_monitor_alerts_slack_url: "" # Slack webhook for alerts from alertmanager diff --git a/private_repo/ansible/inventory/dev/key-generate.sh b/private_repo/ansible/inventory/dev/key-generate.sh index 2bf82230b0..e0514c6dd6 100755 --- a/private_repo/ansible/inventory/dev/key-generate.sh +++ b/private_repo/ansible/inventory/dev/key-generate.sh @@ -7,7 +7,7 @@ echo if [[ $vault_pass == $confirm_vault_pass ]] then echo "$vault_pass" > temp_vault_pass - cd Core/keys + mkdir -p Core/keys && cd Core/keys for i in {1..10}; do openssl genrsa -out mobile_devicev2_c$i 2048 && openssl pkcs8 -topk8 -inform PEM -in mobile_devicev2_c$i -out mobile_devicev2_key$i -nocrypt && rm -rf mobile_devicev2_c$i ; done for i in {1..10}; do openssl genrsa -out accessv1_c$i 2048 && openssl pkcs8 -topk8 -inform PEM -in accessv1_c$i -out accessv1_key$i -nocrypt && rm -rf accessv1_c$i ; done for i in {1..10}; do openssl genrsa -out desktop_devicev2_c$i 2048 && openssl pkcs8 -topk8 -inform PEM -in desktop_devicev2_c$i -out desktop_devicev2_key$i -nocrypt && rm -rf desktop_devicev2_c$i ; done From 6ab68620854580256361bb9fb7b8e703656e35b4 Mon Sep 17 00:00:00 2001 From: G33tha Date: Tue, 17 Jan 2023 16:25:48 +0530 Subject: [PATCH 224/616] Update youtube api key for content service (#3714) --- ansible/roles/stack-sunbird/defaults/main.yml | 6 +++++- .../templates/content-service_application.conf | 6 ++++++ private_repo/ansible/inventory/dev/Core/secrets.yml | 6 +++++- 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/ansible/roles/stack-sunbird/defaults/main.yml b/ansible/roles/stack-sunbird/defaults/main.yml index 620ec5f4ad..7fbeb17d70 100644 --- a/ansible/roles/stack-sunbird/defaults/main.yml +++ b/ansible/roles/stack-sunbird/defaults/main.yml @@ -1065,4 +1065,8 @@ inquiry_redis_host: "{{ sunbird_lp_redis_host }}" inquiry_search_service_base_url: "{{ sunbird_search_service_api_base_url }}/v3/search" ### LERN Release-5.0.1 -cloud_store_base_path_placeholder: "CLOUD_BASE_PATH" \ No newline at end of file +cloud_store_base_path_placeholder: "CLOUD_BASE_PATH" + +#Youtube Standard Licence Validation +youtube_app_name: fetch-youtube-license +youtube_api_key: "{{ lp_vault_youtube_api_key }}" diff --git a/ansible/roles/stack-sunbird/templates/content-service_application.conf b/ansible/roles/stack-sunbird/templates/content-service_application.conf index bb44a71828..837298ac30 100644 --- a/ansible/roles/stack-sunbird/templates/content-service_application.conf +++ b/ansible/roles/stack-sunbird/templates/content-service_application.conf @@ -490,6 +490,12 @@ cloud_storage_container: "{{ cloud_storage_content_bucketname }}" # Google Drive APIKEY learning_content_drive_apiKey = "{{ learning_content_drive_apiKey }}" +#Youtube Standard Licence Validation +learning.content.youtube.application.name="{{ youtube_app_name }}" +learning_content_youtube_apikey="{{ youtube_api_key }}" +youtube.license.regex.pattern=["\\?vi?=([^&]*)", "watch\\?.*v=([^&]*)", "(?:embed|vi?)/([^/?]*)","^([A-Za-z0-9\\-\\_]*)"] +learning.valid_license=["creativeCommon"] + kafka { urls : "{{ kafka_urls }}" topic.send.enable : true diff --git a/private_repo/ansible/inventory/dev/Core/secrets.yml b/private_repo/ansible/inventory/dev/Core/secrets.yml index c57a8bd37c..8bf07e91cd 100644 --- a/private_repo/ansible/inventory/dev/Core/secrets.yml +++ b/private_repo/ansible/inventory/dev/Core/secrets.yml @@ -200,4 +200,8 @@ cloud_private_storage_secret: "{{ cloud_public_storage_secret }}" cloud_artifact_storage_secret: "{{ cloud_public_storage_secret }}" # Graylog -graylog_transport_email_auth_password: "{{ core_vault_mail_server_password }}" # email server password / api token \ No newline at end of file +graylog_transport_email_auth_password: "{{ core_vault_mail_server_password }}" # email server password / api token + +# ------------------------------------------------------------------------------------------------------------ # +# Optional variables - Can be left blank if you dont plan to use the intended features +lp_vault_youtube_api_key: # youtube api token if you want to upload youtube video urls on your site From 93eeb2db65ef9281401532a50e4aa3b2104a9634 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 20 Jan 2023 14:26:53 +1100 Subject: [PATCH 225/616] added the missing uploads for desktop faq upload Signed-off-by: Deepak Devadathan --- ansible/desktop-faq-upload.yml | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/ansible/desktop-faq-upload.yml b/ansible/desktop-faq-upload.yml index 7c5317dd29..1ed429bd23 100644 --- a/ansible/desktop-faq-upload.yml +++ b/ansible/desktop-faq-upload.yml @@ -252,7 +252,7 @@ - upload-label - block: - - name: upload folder to oci oss + - name: upload folder to oci oss - chatbot include_role: name: oci-cloud-storage tasks_from: upload-folder.yml @@ -260,5 +260,22 @@ oss_path: "{{ destination_path }}/" tags: - upload-chatbot-config - - upload-batch + - block: + - name: upload folder to oci oss - csv-template + include_role: + name: oci-cloud-storage + tasks_from: upload-folder.yml + vars: + oss_path: "{{ destination_path }}/" + tags: + - upload-csv-template + - block: + - name: upload folder to oci oss - discussion-ui + include_role: + name: oci-cloud-storage + tasks_from: upload-folder.yml + vars: + oss_path: "{{ destination_path }}/" + tags: + - upload-discussion-ui when: cloud_service_provider == "oci" From ccb48064200bf0dc5fc7b07a18e6bb297722fa30 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 20 Jan 2023 14:52:55 +1100 Subject: [PATCH 226/616] added CLOUD_STORAGE_ENDPOINT env variable Signed-off-by: Deepak Devadathan --- ansible/roles/stack-sunbird/templates/sunbird_cert-service.env | 1 + 1 file changed, 1 insertion(+) diff --git a/ansible/roles/stack-sunbird/templates/sunbird_cert-service.env b/ansible/roles/stack-sunbird/templates/sunbird_cert-service.env index 19a9a6c46c..e3d1e813d7 100644 --- a/ansible/roles/stack-sunbird/templates/sunbird_cert-service.env +++ b/ansible/roles/stack-sunbird/templates/sunbird_cert-service.env @@ -11,3 +11,4 @@ ITEXT_LICENSE_PATH=/home/sunbird/itext_trail_license.xml PUBLIC_CLOUD_STORAGE_KEY={{cloud_public_storage_accountname}} PUBLIC_CLOUD_STORAGE_SECRET={{cloud_public_storage_secret}} PUBLIC_CONTAINER_NAME={{cloud_storage_certqr_bucketname}} +CLOUD_STORAGE_ENDPOINT={{cloud_storage_endpoint}} From f64c76907eb96b8482fcbe70e9b5d82f0da55800 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 20 Jan 2023 14:55:51 +1100 Subject: [PATCH 227/616] updated the ansible variable for cloud_storage_endpoint Signed-off-by: Deepak Devadathan --- ansible/roles/stack-sunbird/templates/sunbird_cert-service.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/stack-sunbird/templates/sunbird_cert-service.env b/ansible/roles/stack-sunbird/templates/sunbird_cert-service.env index e3d1e813d7..2008cb28dc 100644 --- a/ansible/roles/stack-sunbird/templates/sunbird_cert-service.env +++ b/ansible/roles/stack-sunbird/templates/sunbird_cert-service.env @@ -11,4 +11,4 @@ ITEXT_LICENSE_PATH=/home/sunbird/itext_trail_license.xml PUBLIC_CLOUD_STORAGE_KEY={{cloud_public_storage_accountname}} PUBLIC_CLOUD_STORAGE_SECRET={{cloud_public_storage_secret}} PUBLIC_CONTAINER_NAME={{cloud_storage_certqr_bucketname}} -CLOUD_STORAGE_ENDPOINT={{cloud_storage_endpoint}} +CLOUD_STORAGE_ENDPOINT={{cloud_public_storage_endpoint}} From 13a24cbf81ca9fff257c82a7ca0207f1e52bc73e Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Mon, 23 Jan 2023 14:43:16 +1100 Subject: [PATCH 228/616] updated env template for lms to add endpoint Signed-off-by: Deepak Devadathan --- ansible/roles/stack-sunbird/templates/sunbird_lms-service.env | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env b/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env index 0d5131b418..7b3796feb4 100644 --- a/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env +++ b/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env @@ -31,6 +31,7 @@ sunbird_encryption_key={{sunbird_encryption_key}} sunbird_encryption_mode={{sunbird_encryption_mode}} sunbird_account_name={{sunbird_public_storage_account_name}} sunbird_account_key={{sunbird_public_storage_account_key}} +sunbird_account_endpoint={{sunbird_public_storage_account_endpoint} sunbird_quartz_mode={{sunbird_sunbird_quartz_mode}} sunbird_web_url={{sunbird_web_url}} sunbird_msg_91_auth={{sunbird_msg_91_auth}} @@ -90,6 +91,7 @@ sunbird_gzip_enable={{sunbird_gzip_enable}} sunbird_gzip_size_threshold={{sunbird_gzip_size_threshold | default(262144)}} sunbird_analytics_blob_account_name={{sunbird_private_storage_account_name}} sunbird_analytics_blob_account_key={{sunbird_private_storage_account_key}} +sunbird_analytics_blob_account_endpoint={{sunbird_private_storage_account_endpoint}} # Optional for caching sunbird_cache_enable={{sunbird_cache_enable | default(false)}} # Set below variables if above true From cdb8365015f2c18e69ac1f0a1417cbe557a18a54 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Mon, 23 Jan 2023 14:45:05 +1100 Subject: [PATCH 229/616] corrected the typo in the template for lms Signed-off-by: Deepak Devadathan --- ansible/roles/stack-sunbird/templates/sunbird_lms-service.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env b/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env index 7b3796feb4..cbbf6612c2 100644 --- a/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env +++ b/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env @@ -31,7 +31,7 @@ sunbird_encryption_key={{sunbird_encryption_key}} sunbird_encryption_mode={{sunbird_encryption_mode}} sunbird_account_name={{sunbird_public_storage_account_name}} sunbird_account_key={{sunbird_public_storage_account_key}} -sunbird_account_endpoint={{sunbird_public_storage_account_endpoint} +sunbird_account_endpoint={{sunbird_public_storage_account_endpoint}} sunbird_quartz_mode={{sunbird_sunbird_quartz_mode}} sunbird_web_url={{sunbird_web_url}} sunbird_msg_91_auth={{sunbird_msg_91_auth}} From 07dad19c7ef8585afe9bdefa23827558cf21f21f Mon Sep 17 00:00:00 2001 From: Keshav Prasad Date: Mon, 30 Jan 2023 11:10:13 +0530 Subject: [PATCH 230/616] fix: ED-1164 adding missing UCI job Signed-off-by: Keshav Prasad --- .../UCI/jobs/uci-transport-socket/config.xml | 108 ++++++++++++ .../UCI/jobs/uci-transport-socket/config.xml | 77 +++++++++ .../UCI/jobs/uci-transport-socket/config.xml | 159 ++++++++++++++++++ 3 files changed, 344 insertions(+) create mode 100644 deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/UCI/jobs/uci-transport-socket/config.xml create mode 100644 deploy/jenkins/jobs/Build/jobs/UCI/jobs/uci-transport-socket/config.xml create mode 100644 deploy/jenkins/jobs/Deploy/jobs/dev/jobs/UCI/jobs/uci-transport-socket/config.xml diff --git a/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/UCI/jobs/uci-transport-socket/config.xml b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/UCI/jobs/uci-transport-socket/config.xml new file mode 100644 index 0000000000..afba1c3fa0 --- /dev/null +++ b/deploy/jenkins/jobs/ArtifactUpload/jobs/dev/jobs/UCI/jobs/uci-transport-socket/config.xml @@ -0,0 +1,108 @@ + + + + + hudson.model.ParametersDefinitionProperty + com.sonyericsson.rebuild.RebuildSettings + + + + + false + + + + -1 + 10 + -1 + 1 + + + + + false + false + + + + + absolute_job_path + <font color=dimgray size=2><b>Do not change this value! The metadata.json will be copied from this job.</b></font> + Build/UCI/uci-transport-socket + false + + + image_tag + <font color=darkgreen size=2><b>OPTIONAL: Specify the tag to upload a specific image version to the container registry.</b></font> + + false + + + artifact_source + <font color=dimgray size=2><b> +ArtifactRepo - Push the docker image to container registry. +</b></font> + + + ArtifactRepo + + + + + + + 0 + 0 + + false + project + false + + + + + + + + Build/UCI/uci-transport-socket + + SUCCESS + 0 + BLUE + true + + + + + + + + 2 + + + https://github.com/project-sunbird/sunbird-devops.git + + + + + ${public_repo_branch} + + + false + + + + true + false + + 0 + false + + + + pipelines/upload/docker/Jenkinsfile + false + + + false + diff --git a/deploy/jenkins/jobs/Build/jobs/UCI/jobs/uci-transport-socket/config.xml b/deploy/jenkins/jobs/Build/jobs/UCI/jobs/uci-transport-socket/config.xml new file mode 100644 index 0000000000..c63ac79a73 --- /dev/null +++ b/deploy/jenkins/jobs/Build/jobs/UCI/jobs/uci-transport-socket/config.xml @@ -0,0 +1,77 @@ + + + + + hudson.model.ParametersDefinitionProperty + com.sonyericsson.rebuild.RebuildSettings + + + + + false + + + + -1 + 10 + -1 + 1 + + + + + false + false + + + + + github_release_tag + <font style="color:dimgray;font-size:14px;"><b> +<li>To build from a tag, use refs/tags/github_tag</li> +<li>To build from a branch, use refs/heads/github_branch</li> +<li>The default value of ${public_repo_branch} will be the release / tag version set in global configuration</li> +<li>To build from a differnt branch, replace the ${public_repo_branch} with your branch</li> +</b></font> + refs/heads/${public_repo_branch} + true + + + + + 0 + 0 + + false + project + false + + + + + + + + + + 2 + + + https://github.com/samagra-comms/transport-socket.git + + + + + master + + + false + + + + build/Jenkinsfile + false + + + false + diff --git a/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/UCI/jobs/uci-transport-socket/config.xml b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/UCI/jobs/uci-transport-socket/config.xml new file mode 100644 index 0000000000..b418c503df --- /dev/null +++ b/deploy/jenkins/jobs/Deploy/jobs/dev/jobs/UCI/jobs/uci-transport-socket/config.xml @@ -0,0 +1,159 @@ + + + + + hudson.model.ParametersDefinitionProperty + com.sonyericsson.rebuild.RebuildSettings + + + + + false + + + + -1 + 10 + -1 + 2 + + + + + false + false + + + + + absolute_job_path + <font color=dimgray size=2><b>Do not change this value! The metadata.json will be copied from this job.</b></font> + ArtifactUpload/dev/UCI/uci-transport-socket + false + + + image_tag + <font color=red size=2><b>CAUTION: If the value is blank, image tag will be taken from the latest metadata.json.</b></font> + + false + + + private_branch + + choice-parameter-2544395024638227 + 1 + + true + + + + true + + + uci-transport-socket + Deploy/dev/UCI/uci-transport-socket + + + ET_FORMATTED_HTML + true + + + branch_or_tag + + choice-parameter-2620434998790477 + 1 + + true + + + + true + + + uci-transport-socket + Deploy/dev/UCI/uci-transport-socket + + + ET_FORMATTED_HTML + true + + + role_name + + + + sunbird-deploy + + + + + + + 0 + 0 + + false + project + false + + + + + + + + ArtifactUpload/dev/UCI/uci-transport-socket + + SUCCESS + 0 + BLUE + true + + + + + + + + 2 + + + https://github.com/project-sunbird/sunbird-devops.git + + + + + uci-transport-socket + + + false + + + + true + false + + 0 + false + + + + kubernetes/pipelines/deploy_core/Jenkinsfile + false + + + false + From 0fd5d9a4da250ce4ac5eafad8a2aecc823c28a0d Mon Sep 17 00:00:00 2001 From: Kenneth Heung Date: Mon, 30 Jan 2023 16:05:06 +0800 Subject: [PATCH 231/616] adding oci-cli in bootstrap and Jenkins job template (#3715) --- ansible/bootstrap.yml | 13 ++++++++-- ansible/roles/oci-cli/defaults/main.yml | 1 + ansible/roles/oci-cli/tasks/main.yml | 24 +++++++++++++++++++ .../dev/jobs/Core/jobs/Bootstrap/config.xml | 5 ++-- 4 files changed, 39 insertions(+), 4 deletions(-) create mode 100644 ansible/roles/oci-cli/defaults/main.yml create mode 100644 ansible/roles/oci-cli/tasks/main.yml diff --git a/ansible/bootstrap.yml b/ansible/bootstrap.yml index b23479e833..e77a1cb038 100644 --- a/ansible/bootstrap.yml +++ b/ansible/bootstrap.yml @@ -48,7 +48,17 @@ roles: - role: aws-cli tags: - - aws_cli + - aws_cli + +- hosts: "{{ hosts }}" + become: yes + ignore_unreachable: yes + vars_files: + - "{{inventory_dir}}/secrets.yml" + roles: + - role: oci-cli + tags: + - oci_cli - hosts: "{{ hosts| default('all') }}" become: yes @@ -60,4 +70,3 @@ - vm-agents-nodeexporter tags: - node_exporter - diff --git a/ansible/roles/oci-cli/defaults/main.yml b/ansible/roles/oci-cli/defaults/main.yml new file mode 100644 index 0000000000..00a8940a29 --- /dev/null +++ b/ansible/roles/oci-cli/defaults/main.yml @@ -0,0 +1 @@ +oci_cli_url: https://github.com/oracle/oci-cli/releases/download/v3.22.0/oci-cli-3.22.0-Ubuntu-18.04-Offline.zip diff --git a/ansible/roles/oci-cli/tasks/main.yml b/ansible/roles/oci-cli/tasks/main.yml new file mode 100644 index 0000000000..8f21263672 --- /dev/null +++ b/ansible/roles/oci-cli/tasks/main.yml @@ -0,0 +1,24 @@ +--- +- name: Download the installation file + get_url: + url: "{{ oci_cli_url }}" + dest: /tmp/ocicli.zip + +- name: Installing unzip + apt: + name: "{{item}}" + state: latest + with_items: + - zip + - unzip + +- name: Unzip the installer + unarchive: + src: /tmp/ocicli.zip + dest: /tmp/ + remote_src: yes + +- name: install oci cli + shell: ./oci-cli-installation/install.sh --install-dir {{ ansible_env.HOME }} --exec-dir {{ ansible_env.HOME }} --script-dir {{ ansible_env.HOME }} --accept-all-defaults + args: + chdir: /tmp/ diff --git a/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/Bootstrap/config.xml b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/Bootstrap/config.xml index b95bca2645..e11b5b5843 100644 --- a/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/Bootstrap/config.xml +++ b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/Bootstrap/config.xml @@ -96,6 +96,7 @@ return """<b>This parameter is not used</b>""" 'azure_cli', 'aws_cli', 'gcloud_cli', +'oci_cli' 'all'] true @@ -123,7 +124,7 @@ return """<b>This parameter is not used</b>""" false - + @@ -155,4 +156,4 @@ return """<b>This parameter is not used</b>""" false - \ No newline at end of file + From 612fbfa8fc4f5e8c6080e236215b136fa71d2035 Mon Sep 17 00:00:00 2001 From: Akhil <30873558+saiakhil46@users.noreply.github.com> Date: Fri, 3 Feb 2023 20:19:20 +0530 Subject: [PATCH 232/616] ED-1173 OfflineInstaller deploy job fix (#3721) * updated electronuserland/builder tag form wine to 16-wine * added npm update command for electronuserland/builder:16-wine --- ansible/roles/desktop-deploy/templates/build.sh.j2 | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ansible/roles/desktop-deploy/templates/build.sh.j2 b/ansible/roles/desktop-deploy/templates/build.sh.j2 index 2f431915e8..1fd121f219 100644 --- a/ansible/roles/desktop-deploy/templates/build.sh.j2 +++ b/ansible/roles/desktop-deploy/templates/build.sh.j2 @@ -4,8 +4,9 @@ set -eo pipefail cd {{offline_repo_location}}/ # Run the docker image and run the OS Specific build along with environment specific build -docker run -d --env-file envfile --env ELECTRON_CACHE="/root/.cache/electron" --env ELECTRON_BUILDER_CACHE="/root/.cache/electron-builder" --name offline_deploy -w /project electronuserland/builder:wine sleep infinity +docker run -d --env-file envfile --env ELECTRON_CACHE="/root/.cache/electron" --env ELECTRON_BUILDER_CACHE="/root/.cache/electron-builder" --name offline_deploy -w /project electronuserland/builder:16-wine sleep infinity docker cp . offline_deploy:/project/ +docker exec offline_deploy npm install -g npm@9.4.1 docker exec offline_deploy bash -x /project/setupOfflineInstaller.sh # Copy the built artifacts From bef49da693058119696970271639ea09d09f7a76 Mon Sep 17 00:00:00 2001 From: Raghupathi Date: Tue, 7 Feb 2023 15:37:47 +0530 Subject: [PATCH 233/616] Update config.j2 --- ansible/roles/ml-analytics-service/templates/config.j2 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ansible/roles/ml-analytics-service/templates/config.j2 b/ansible/roles/ml-analytics-service/templates/config.j2 index 27da8be26b..70fe0ff018 100644 --- a/ansible/roles/ml-analytics-service/templates/config.j2 +++ b/ansible/roles/ml-analytics-service/templates/config.j2 @@ -150,7 +150,7 @@ survey_streaming_success = {{ ml_analytics_survey_log_folder_path }}/success.log survey_streaming_error = {{ ml_analytics_survey_log_folder_path }}/error.log -{% if ML_Cloud_Service_Provider is eq 'ORACLE' %} +{% if ML_Cloud_Service_Provider is equalto 'ORACLE' %} [ORACLE] @@ -164,7 +164,7 @@ region_name = {{ cloud_public_storage_region }} bucket_name = {{ cloud_storage_telemetry_bucketname }} -{% elif ML_Cloud_Service_Provider is eq 'gcloud' %} +{% elif ML_Cloud_Service_Provider is equalto 'gcloud' %} [GCP] @@ -172,7 +172,7 @@ secret_data = {{ ml_Cloud_secret_json_file }} bucket_name = {{ cloud_storage_telemetry_bucketname }} -{% elif ML_Cloud_Service_Provider is eq 'aws' %} +{% elif ML_Cloud_Service_Provider is equalto 'aws' %} [AWS] From c603f1e24692b5727d1c847d7f9f74f8c199ebbc Mon Sep 17 00:00:00 2001 From: santhosh-tg <93243580+santhosh-tg@users.noreply.github.com> Date: Wed, 8 Feb 2023 11:01:57 +0530 Subject: [PATCH 234/616] ED-700: Update Monitoring Stack Helm Chart (#3723) * Update CRDs Api version and definitions Remove depricated webhook crd-install * Let helm takecare of creating CRDs * Update admission webhook and rbac API version * Update admissionwebhook patch repo * Update apiVersions --- .../charts/grafana/templates/role.yaml | 2 +- .../charts/grafana/templates/rolebinding.yaml | 2 +- .../templates/clusterrole.yaml | 2 +- .../templates/clusterrolebinding.yaml | 2 +- .../crds/crd-alertmanager.yaml | 8522 ++++++------ .../crds/crd-podmonitor.yaml | 449 +- .../crds/crd-prometheus.yaml | 10950 ++++++++-------- .../crds/crd-prometheusrules.yaml | 143 +- .../crds/crd-servicemonitor.yaml | 831 +- .../crds/crd-thanosrulers.yaml | 8903 +++++++------ .../mutatingWebhookConfiguration.yaml | 6 +- .../validatingWebhookConfiguration.yaml | 6 +- .../templates/prometheus-operator/crds.yaml | 6 - .../prometheus-operator/values.yaml | 6 +- .../templates/role.yaml | 2 +- .../templates/rolebinding.yaml | 2 +- 16 files changed, 14914 insertions(+), 14920 deletions(-) delete mode 100755 kubernetes/helm_charts/monitoring/prometheus-operator/templates/prometheus-operator/crds.yaml diff --git a/kubernetes/helm_charts/monitoring/prometheus-operator/charts/grafana/templates/role.yaml b/kubernetes/helm_charts/monitoring/prometheus-operator/charts/grafana/templates/role.yaml index c95c1d0424..6a673b7b7d 100755 --- a/kubernetes/helm_charts/monitoring/prometheus-operator/charts/grafana/templates/role.yaml +++ b/kubernetes/helm_charts/monitoring/prometheus-operator/charts/grafana/templates/role.yaml @@ -1,5 +1,5 @@ {{- if .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: {{ template "grafana.fullname" . }} diff --git a/kubernetes/helm_charts/monitoring/prometheus-operator/charts/grafana/templates/rolebinding.yaml b/kubernetes/helm_charts/monitoring/prometheus-operator/charts/grafana/templates/rolebinding.yaml index c42229bf92..74ec303061 100755 --- a/kubernetes/helm_charts/monitoring/prometheus-operator/charts/grafana/templates/rolebinding.yaml +++ b/kubernetes/helm_charts/monitoring/prometheus-operator/charts/grafana/templates/rolebinding.yaml @@ -1,5 +1,5 @@ {{- if .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: {{ template "grafana.fullname" . }} diff --git a/kubernetes/helm_charts/monitoring/prometheus-operator/charts/kube-state-metrics/templates/clusterrole.yaml b/kubernetes/helm_charts/monitoring/prometheus-operator/charts/kube-state-metrics/templates/clusterrole.yaml index 319aec16c2..a9198b823d 100755 --- a/kubernetes/helm_charts/monitoring/prometheus-operator/charts/kube-state-metrics/templates/clusterrole.yaml +++ b/kubernetes/helm_charts/monitoring/prometheus-operator/charts/kube-state-metrics/templates/clusterrole.yaml @@ -1,5 +1,5 @@ {{- if .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: diff --git a/kubernetes/helm_charts/monitoring/prometheus-operator/charts/kube-state-metrics/templates/clusterrolebinding.yaml b/kubernetes/helm_charts/monitoring/prometheus-operator/charts/kube-state-metrics/templates/clusterrolebinding.yaml index 4635985aa0..160db8bd18 100755 --- a/kubernetes/helm_charts/monitoring/prometheus-operator/charts/kube-state-metrics/templates/clusterrolebinding.yaml +++ b/kubernetes/helm_charts/monitoring/prometheus-operator/charts/kube-state-metrics/templates/clusterrolebinding.yaml @@ -1,5 +1,5 @@ {{- if .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: diff --git a/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-alertmanager.yaml b/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-alertmanager.yaml index cbf9fc27f9..2609b2f09b 100755 --- a/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-alertmanager.yaml +++ b/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-alertmanager.yaml @@ -1,25 +1,12 @@ # https://raw.githubusercontent.com/coreos/prometheus-operator/release-0.38/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.2.4 - helm.sh/hook: crd-install creationTimestamp: null name: alertmanagers.monitoring.coreos.com spec: - additionalPrinterColumns: - - JSONPath: .spec.version - description: The version of Alertmanager - name: Version - type: string - - JSONPath: .spec.replicas - description: The desired replicas number of Alertmanagers - name: Replicas - type: integer - - JSONPath: .metadata.creationTimestamp - name: Age - type: date group: monitoring.coreos.com names: kind: Alertmanager @@ -28,4474 +15,4485 @@ spec: singular: alertmanager preserveUnknownFields: false scope: Namespaced - subresources: {} - validation: - openAPIV3Schema: - description: Alertmanager describes an Alertmanager cluster. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the Alertmanager - cluster. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - additionalPeers: - description: AdditionalPeers allows injecting a set of additional Alertmanagers - to peer with to form a highly available cluster. - items: - type: string - type: array - affinity: - description: If specified, the pod's scheduling constraints. - properties: - nodeAffinity: - description: Describes node affinity scheduling rules for the pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes - that satisfy the affinity expressions specified by this field, - but it may choose a node that violates one or more of the - expressions. The node that is most preferred is the one with - the greatest sum of weights, i.e. for each node that meets - all of the scheduling requirements (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by iterating through - the elements of this field and adding "weight" to the sum - if the node matches the corresponding matchExpressions; the - node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches all - objects with implicit weight 0 (i.e. it's a no-op). A null - preferred scheduling term matches no objects (i.e. is also - a no-op). + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Alertmanager describes an Alertmanager cluster. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'Specification of the desired behavior of the Alertmanager + cluster. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + additionalPeers: + description: AdditionalPeers allows injecting a set of additional Alertmanagers + to peer with to form a highly available cluster. + items: + type: string + type: array + affinity: + description: If specified, the pod's scheduling constraints. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes + that satisfy the affinity expressions specified by this field, + but it may choose a node that violates one or more of the + expressions. The node that is most preferred is the one with + the greatest sum of weights, i.e. for each node that meets + all of the scheduling requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating through + the elements of this field and adding "weight" to the sum + if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all + objects with implicit weight 0 (i.e. it's a no-op). A null + preferred scheduling term matches no objects (i.e. is also + a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. properties: - preference: - description: A node selector term, associated with the - corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: A node selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the - operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be - empty. If the operator is Gt or Lt, the values - array must have a single element, which will - be interpreted as an integer. This array is - replaced during a strategic merge patch. - items: + nodeSelectorTerms: + description: Required. A list of node selector terms. The + terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: The label key that the selector + applies to. type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: A node selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the - operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be - empty. If the operator is Gt or Lt, the values - array must have a single element, which will - be interpreted as an integer. This array is - replaced during a strategic merge patch. - items: + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding - nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array required: - - preference - - weight + - nodeSelectorTerms type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to an update), the system may or may not try to - eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. The - terms are ORed. - items: - description: A null or empty node selector term matches - no objects. The requirements of them are ANDed. The - TopologySelectorTerm type implements a subset of the - NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: A node selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes + that satisfy the affinity expressions specified by this field, + but it may choose a node that violates one or more of the + expressions. The node that is most preferred is the one with + the greatest sum of weights, i.e. for each node that meets + all of the scheduling requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating through + the elements of this field and adding "weight" to the sum + if the node has pods which matches the corresponding podAffinityTerm; + the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the - operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be - empty. If the operator is Gt or Lt, the values - array must have a single element, which will - be interpreted as an integer. This array is - replaced during a strategic merge patch. + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. items: - type: string + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object type: array - required: - - key - - operator + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object type: object - type: array - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: A node selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey matches + that of any node on which any of the selected pods + is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may not + try to eventually evict the pod from its node. When there + are multiple elements, the lists of nodes corresponding to + each podAffinityTerm are intersected, i.e. all terms must + be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) that + this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of pods + is running + properties: + labelSelector: + description: A label query over a set of resources, in + this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any + node on which any of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some other + pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes + that satisfy the anti-affinity expressions specified by this + field, but it may choose a node that violates one or more + of the expressions. The node that is most preferred is the + one with the greatest sum of weights, i.e. for each node that + meets all of the scheduling requirements (resource request, + requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field + and adding "weight" to the sum if the node has pods which + matches the corresponding podAffinityTerm; the node(s) with + the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the - operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be - empty. If the operator is Gt or Lt, the values - array must have a single element, which will - be interpreted as an integer. This array is - replaced during a strategic merge patch. + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. items: - type: string + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object type: array - required: - - key - - operator + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: Describes pod affinity scheduling rules (e.g. co-locate - this pod in the same node, zone, etc. as some other pod(s)). + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey matches + that of any node on which any of the selected pods + is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will not + be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms must + be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) that + this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of pods + is running + properties: + labelSelector: + description: A label query over a set of resources, in + this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any + node on which any of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + baseImage: + description: Base image that is used to deploy pods, without tag. + type: string + configMaps: + description: ConfigMaps is a list of ConfigMaps in the same namespace + as the Alertmanager object, which shall be mounted into the Alertmanager + Pods. The ConfigMaps are mounted into /etc/alertmanager/configmaps/. + items: + type: string + type: array + configSecret: + description: ConfigSecret is the name of a Kubernetes Secret in the + same namespace as the Alertmanager object, which contains configuration + for this Alertmanager instance. Defaults to 'alertmanager-' + The secret is mounted into /etc/alertmanager/config. + type: string + containers: + description: Containers allows injecting additional containers. This + is meant to allow adding an authentication proxy to an Alertmanager + pod. + items: + description: A single application container that you want to run within + a pod. properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes - that satisfy the affinity expressions specified by this field, - but it may choose a node that violates one or more of the - expressions. The node that is most preferred is the one with - the greatest sum of weights, i.e. for each node that meets - all of the scheduling requirements (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by iterating through - the elements of this field and adding "weight" to the sum - if the node has pods which matches the corresponding podAffinityTerm; - the node(s) with the highest sum are the most preferred. + args: + description: 'Arguments to the entrypoint. The docker image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will be + unchanged. The $(VAR_NAME) syntax can be escaped with a double + $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + regardless of whether the variable exists or not. Cannot be + updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The + docker image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. The $(VAR_NAME) syntax + can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references + will never be expanded, regardless of whether the variable exists + or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred node(s) + description: EnvVar represents an environment variable present + in a Container. properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a + variable cannot be resolved, the reference in the input + string will be unchanged. The $(VAR_NAME) syntax can be + escaped with a double $$, ie: $$(VAR_NAME). Escaped references + will never be expanded, regardless of whether the variable + exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. + configMapKeyRef: + description: Selects a key of a ConfigMap. properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: A label selector requirement is - a selector that contains values, a key, and - an operator that relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If - the operator is Exists or DoesNotExist, - the values array must be empty. This array - is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". - The requirements are ANDed. - type: object + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key type: object - namespaces: - description: namespaces specifies which namespaces - the labelSelector applies to (matches against); - null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey matches - that of any node on which any of the selected pods - is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to a pod label update), the system may or may not - try to eventually evict the pod from its node. When there - are multiple elements, the lists of nodes corresponding to - each podAffinityTerm are intersected, i.e. all terms must - be satisfied. - items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) that - this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of pods - is running - properties: - labelSelector: - description: A label query over a set of resources, in - this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values - array must be non-empty. If the operator is - Exists or DoesNotExist, the values array must - be empty. This array is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, metadata.labels, metadata.annotations, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath type: object - type: object - namespaces: - description: namespaces specifies which namespaces the - labelSelector applies to (matches against); null or - empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of any - node on which any of the selected pods is running. Empty - topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: Describes pod anti-affinity scheduling rules (e.g. - avoid putting this pod in the same node, zone, etc. as some other - pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes - that satisfy the anti-affinity expressions specified by this - field, but it may choose a node that violates one or more - of the expressions. The node that is most preferred is the - one with the greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource request, - requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field - and adding "weight" to the sum if the node has pods which - matches the corresponding podAffinityTerm; the node(s) with - the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: A label selector requirement is - a selector that contains values, a key, and - an operator that relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If - the operator is Exists or DoesNotExist, - the values array must be empty. This array - is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". - The requirements are ANDed. - type: object + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + description: Specifies the output format of the + exposed resources, defaults to "1" + type: string + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key type: object - namespaces: - description: namespaces specifies which namespaces - the labelSelector applies to (matches against); - null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey matches - that of any node on which any of the selected pods - is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey type: object - weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. - format: int32 - type: integer required: - - podAffinityTerm - - weight + - name type: object type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by - this field are not met at scheduling time, the pod will not - be scheduled onto the node. If the anti-affinity requirements - specified by this field cease to be met at some point during - pod execution (e.g. due to a pod label update), the system - may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. all terms must - be satisfied. + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be a + C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key will + take precedence. Cannot be updated. items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) that - this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of pods - is running + description: EnvFromSource represents the source of a set of + ConfigMaps properties: - labelSelector: - description: A label query over a set of resources, in - this case pods. + configMapRef: + description: The ConfigMap to select from properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values - array must be non-empty. If the operator is - Exists or DoesNotExist, the values array must - be empty. This array is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. - type: object + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean type: object - namespaces: - description: namespaces specifies which namespaces the - labelSelector applies to (matches against); null or - empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of any - node on which any of the selected pods is running. Empty - topologyKey is not allowed. + prefix: + description: An optional identifier to prepend to each key + in the ConfigMap. Must be a C_IDENTIFIER. type: string - required: - - topologyKey + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object type: object type: array - type: object - type: object - baseImage: - description: Base image that is used to deploy pods, without tag. - type: string - configMaps: - description: ConfigMaps is a list of ConfigMaps in the same namespace - as the Alertmanager object, which shall be mounted into the Alertmanager - Pods. The ConfigMaps are mounted into /etc/alertmanager/configmaps/. - items: - type: string - type: array - configSecret: - description: ConfigSecret is the name of a Kubernetes Secret in the - same namespace as the Alertmanager object, which contains configuration - for this Alertmanager instance. Defaults to 'alertmanager-' - The secret is mounted into /etc/alertmanager/config. - type: string - containers: - description: Containers allows injecting additional containers. This - is meant to allow adding an authentication proxy to an Alertmanager - pod. - items: - description: A single application container that you want to run within - a pod. - properties: - args: - description: 'Arguments to the entrypoint. The docker image''s - CMD is used if this is not provided. Variable references $(VAR_NAME) - are expanded using the container''s environment. If a variable - cannot be resolved, the reference in the input string will be - unchanged. The $(VAR_NAME) syntax can be escaped with a double - $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - regardless of whether the variable exists or not. Cannot be - updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' - items: + image: + description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' type: string - type: array - command: - description: 'Entrypoint array. Not executed within a shell. The - docker image''s ENTRYPOINT is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container''s - environment. If a variable cannot be resolved, the reference - in the input string will be unchanged. The $(VAR_NAME) syntax - can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references - will never be expanded, regardless of whether the variable exists - or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' - items: + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' type: string - type: array - env: - description: List of environment variables to set in the container. - Cannot be updated. - items: - description: EnvVar represents an environment variable present - in a Container. + lifecycle: + description: Actions that the management system should take in + response to container lifecycle events. Cannot be updated. properties: - name: - description: Name of the environment variable. Must be a - C_IDENTIFIER. - type: string - value: - description: 'Variable references $(VAR_NAME) are expanded - using the previous defined environment variables in the - container and any service environment variables. If a - variable cannot be resolved, the reference in the input - string will be unchanged. The $(VAR_NAME) syntax can be - escaped with a double $$, ie: $$(VAR_NAME). Escaped references - will never be expanded, regardless of whether the variable - exists or not. Defaults to "".' - type: string - valueFrom: - description: Source for the environment variable's value. - Cannot be used if value is not empty. + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. + exec: + description: One and only one of the following should + be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. properties: - key: - description: The key to select. + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. type: string - optional: - description: Specify whether the ConfigMap or its - key must be defined - type: boolean required: - - key + - port type: object - fieldRef: - description: 'Selects a field of the pod: supports metadata.name, - metadata.namespace, metadata.labels, metadata.annotations, - spec.nodeName, spec.serviceAccountName, status.hostIP, - status.podIP, status.podIPs.' + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: implement + a realistic TCP lifecycle hook' properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the - specified API version. + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true required: - - fieldPath + - port type: object - resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, requests.memory - and requests.ephemeral-storage) are currently supported.' + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The reason for termination is passed to + the handler. The Pod''s termination grace period countdown + begins before the PreStop hooked is executed. Regardless + of the outcome of the handler, the container will eventually + terminate within the Pod''s termination grace period. Other + management of the container blocks until the hook completes + or until the termination grace period is reached. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should + be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. type: string - divisor: - description: Specifies the output format of the - exposed resources, defaults to "1" + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. type: string - resource: - description: 'Required: resource to select' + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - - resource + - port type: object - secretKeyRef: - description: Selects a key of a secret in the pod's - namespace + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: implement + a realistic TCP lifecycle hook' properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true required: - - key + - port type: object type: object - required: - - name type: object - type: array - envFrom: - description: List of sources to populate environment variables - in the container. The keys defined within a source must be a - C_IDENTIFIER. All invalid keys will be reported as an event - when the container is starting. When a key exists in multiple - sources, the value associated with the last source will take - precedence. Values defined by an Env with a duplicate key will - take precedence. Cannot be updated. - items: - description: EnvFromSource represents the source of a set of - ConfigMaps + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: - configMapRef: - description: The ConfigMap to select from + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. type: string - optional: - description: Specify whether the ConfigMap must be defined - type: boolean + required: + - port type: object - prefix: - description: An optional identifier to prepend to each key - in the ConfigMap. Must be a C_IDENTIFIER. - type: string - secretRef: - description: The Secret to select from + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string - optional: - description: Specify whether the Secret must be defined - type: boolean + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer type: object - type: array - image: - description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher level config management - to default or override container images in workload controllers - like Deployments and StatefulSets.' - type: string - imagePullPolicy: - description: 'Image pull policy. One of Always, Never, IfNotPresent. - Defaults to Always if :latest tag is specified, or IfNotPresent - otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' - type: string - lifecycle: - description: Actions that the management system should take in - response to container lifecycle events. Cannot be updated. - properties: - postStart: - description: 'PostStart is called immediately after a container - is created. If the handler fails, the container is terminated - and restarted according to its restart policy. Other management - of the container blocks until the hook completes. More info: - https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + name: + description: Name of the container specified as a DNS_LABEL. Each + container in a pod must have a unique name (DNS_LABEL). Cannot + be updated. + type: string + ports: + description: List of ports to expose from the container. Exposing + a port here gives the system additional information about the + network connections a container uses, but is primarily informational. + Not specifying a port here DOES NOT prevent that port from being + exposed. Any port which is listening on the default "0.0.0.0" + address inside a container will be accessible from the network. + Cannot be updated. + items: + description: ContainerPort represents a network port in a single + container. properties: - exec: - description: One and only one of the following should - be specified. Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute - inside the container, the working directory for - the command is root ('/') in the container's filesystem. - The command is simply exec'd, it is not run inside - a shell, so traditional shell instructions ('|', - etc) won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is treated - as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to - the pod IP. You probably want to set "Host" in httpHeaders - instead. + containerPort: + description: Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, + this must be a valid port number, 0 < x < 65536. If HostNetwork + is specified, this must match ContainerPort. Most containers + do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod must + have a unique name. Name for the port that can be referred + to by services. + type: string + protocol: + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + description: 'Periodic probe of container service readiness. Container + will be removed from service endpoints if the probe fails. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: 'TCPSocket specifies an action involving - a TCP port. TCP hooks not yet supported TODO: implement - a realistic TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - description: 'PreStop is called immediately before a container - is terminated due to an API request or management event - such as liveness/startup probe failure, preemption, resource - contention, etc. The handler is not called if the container - crashes or exits. The reason for termination is passed to - the handler. The Pod''s termination grace period countdown - begins before the PreStop hooked is executed. Regardless - of the outcome of the handler, the container will eventually - terminate within the Pod''s termination grace period. Other - management of the container blocks until the hook completes - or until the termination grace period is reached. More info: - https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' - properties: - exec: - description: One and only one of the following should - be specified. Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute - inside the container, the working directory for - the command is root ('/') in the container's filesystem. - The command is simply exec'd, it is not run inside - a shell, so traditional shell instructions ('|', - etc) won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is treated - as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to - the pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: 'TCPSocket specifies an action involving - a TCP port. TCP hooks not yet supported TODO: implement - a realistic TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - description: 'Periodic probe of container liveness. Container - will be restarted if the probe fails. Cannot be updated. More - info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + properties: + limits: + additionalProperties: type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - name: - description: Name of the container specified as a DNS_LABEL. Each - container in a pod must have a unique name (DNS_LABEL). Cannot - be updated. - type: string - ports: - description: List of ports to expose from the container. Exposing - a port here gives the system additional information about the - network connections a container uses, but is primarily informational. - Not specifying a port here DOES NOT prevent that port from being - exposed. Any port which is listening on the default "0.0.0.0" - address inside a container will be accessible from the network. - Cannot be updated. - items: - description: ContainerPort represents a network port in a single - container. + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + securityContext: + description: 'Security options the pod should run with. More info: + https://kubernetes.io/docs/concepts/policy/security-context/ + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a + process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the + container runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in + privileged containers are essentially equivalent to root + on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use + for the containers. The default is DefaultProcMount which + uses the container runtime defaults for readonly paths and + masked paths. This requires the ProcMountType feature flag + to be enabled. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. + Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail + to start the container if it does. If unset or false, no + such validation will be performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata if + unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. This field is beta-level and may be + disabled with the WindowsRunAsUserName feature flag. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod will + be restarted, just as if the livenessProbe failed. This can + be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. This + cannot be updated. This is an alpha feature enabled by the StartupProbe + feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: - containerPort: - description: Number of port to expose on the pod's IP address. - This must be a valid port number, 0 < x < 65536. + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. format: int32 type: integer - hostIP: - description: What host IP to bind the external port to. - type: string - hostPort: - description: Number of port to expose on the host. If specified, - this must be a valid port number, 0 < x < 65536. If HostNetwork - is specified, this must match ContainerPort. Most containers - do not need this. + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' format: int32 type: integer - name: - description: If specified, this must be an IANA_SVC_NAME - and unique within the pod. Each named port in a pod must - have a unique name. Name for the port that can be referred - to by services. - type: string - protocol: - description: Protocol for port. Must be UDP, TCP, or SCTP. - Defaults to "TCP". - type: string - required: - - containerPort - type: object - type: array - readinessProbe: - description: 'Periodic probe of container service readiness. Container - will be removed from service endpoints if the probe fails. Cannot - be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for + stdin in the container runtime. If this is not set, reads from + stdin in the container will always result in EOF. Default is + false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin + channel after it has been opened by a single attach. When stdin + is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container + start, is empty until the first client attaches to stdin, and + then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container + is restarted. If this flag is false, a container processes that + reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s + termination message will be written is mounted into the container''s + filesystem. Message written is intended to be brief final status, + such as an assertion failure message. Will be truncated by the + node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. Defaults to /dev/termination-log. + Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. + File will use the contents of terminationMessagePath to populate + the container status message on both success and failure. FallbackToLogsOnError + will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever + is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. This is a beta feature. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. + name: + description: name must match the name of a persistentVolumeClaim + in the pod type: string required: - - port + - devicePath + - name type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within + a container. properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - resources: - description: 'Compute Resources required by this container. Cannot - be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - properties: - limits: - additionalProperties: - type: string - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - type: string - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - securityContext: - description: 'Security options the pod should run with. More info: - https://kubernetes.io/docs/concepts/policy/security-context/ - More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' - properties: - allowPrivilegeEscalation: - description: 'AllowPrivilegeEscalation controls whether a - process can gain more privileges than its parent process. - This bool directly controls if the no_new_privs flag will - be set on the container process. AllowPrivilegeEscalation - is true always when the container is: 1) run as Privileged - 2) has CAP_SYS_ADMIN' - type: boolean - capabilities: - description: The capabilities to add/drop when running containers. - Defaults to the default set of capabilities granted by the - container runtime. - properties: - add: - description: Added capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - drop: - description: Removed capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - type: object - privileged: - description: Run container in privileged mode. Processes in - privileged containers are essentially equivalent to root - on the host. Defaults to false. - type: boolean - procMount: - description: procMount denotes the type of proc mount to use - for the containers. The default is DefaultProcMount which - uses the container runtime defaults for readonly paths and - masked paths. This requires the ProcMountType feature flag - to be enabled. - type: string - readOnlyRootFilesystem: - description: Whether this container has a read-only root filesystem. - Default is false. - type: boolean - runAsGroup: - description: The GID to run the entrypoint of the container - process. Uses runtime default if unset. May also be set - in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. - format: int64 - type: integer - runAsNonRoot: - description: Indicates that the container must run as a non-root - user. If true, the Kubelet will validate the image at runtime - to ensure that it does not run as UID 0 (root) and fail - to start the container if it does. If unset or false, no - such validation will be performed. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: The UID to run the entrypoint of the container - process. Defaults to user specified in image metadata if - unspecified. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. - format: int64 - type: integer - seLinuxOptions: - description: The SELinux context to be applied to the container. - If unspecified, the container runtime will allocate a random - SELinux context for each container. May also be set in - PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. - properties: - level: - description: Level is SELinux level label that applies - to the container. + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other way + around. When not set, MountPropagationNone is used. This + field is beta in 1.10. type: string - role: - description: Role is a SELinux role label that applies - to the container. + name: + description: This must match the Name of a Volume. type: string - type: - description: Type is a SELinux type label that applies - to the container. + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). type: string - user: - description: User is a SELinux user label that applies - to the container. + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. type: string + required: + - mountPath + - name type: object - windowsOptions: - description: The Windows specific settings applied to all - containers. If unspecified, the options from the PodSecurityContext - will be used. If set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might be + configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + externalUrl: + description: The external URL the Alertmanager instances will be available + under. This is necessary to generate correct URLs. This is necessary + if Alertmanager is not served from root of a DNS name. + type: string + image: + description: Image if specified has precedence over baseImage, tag and + sha combinations. Specifying the version is still necessary to ensure + the Prometheus Operator knows what version of Alertmanager is being + configured. + type: string + imagePullSecrets: + description: An optional list of references to secrets in the same namespace + to use for pulling prometheus and alertmanager images from registries + see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod + items: + description: LocalObjectReference contains enough information to let + you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + type: array + initContainers: + description: 'InitContainers allows adding initContainers to the pod + definition. Those can be used to e.g. fetch secrets for injection + into the Alertmanager configuration from external sources. Any errors + during the execution of an initContainer will lead to a restart of + the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + Using initContainers for any use case other then secret fetching is + entirely outside the scope of what the maintainers will support and + by doing so, you accept that this behaviour may break at any time + without notice.' + items: + description: A single application container that you want to run within + a pod. + properties: + args: + description: 'Arguments to the entrypoint. The docker image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will be + unchanged. The $(VAR_NAME) syntax can be escaped with a double + $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + regardless of whether the variable exists or not. Cannot be + updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The + docker image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. The $(VAR_NAME) syntax + can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references + will never be expanded, regardless of whether the variable exists + or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. properties: - gmsaCredentialSpec: - description: GMSACredentialSpec is where the GMSA admission - webhook (https://github.com/kubernetes-sigs/windows-gmsa) - inlines the contents of the GMSA credential spec named - by the GMSACredentialSpecName field. This field is alpha-level - and is only honored by servers that enable the WindowsGMSA - feature flag. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name of the - GMSA credential spec to use. This field is alpha-level - and is only honored by servers that enable the WindowsGMSA - feature flag. + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. type: string - runAsUserName: - description: The UserName in Windows to run the entrypoint - of the container process. Defaults to the user specified - in image metadata if unspecified. May also be set in - PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. This field is beta-level and may be - disabled with the WindowsRunAsUserName feature flag. + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a + variable cannot be resolved, the reference in the input + string will be unchanged. The $(VAR_NAME) syntax can be + escaped with a double $$, ie: $$(VAR_NAME). Escaped references + will never be expanded, regardless of whether the variable + exists or not. Defaults to "".' type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, metadata.labels, metadata.annotations, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + description: Specifies the output format of the + exposed resources, defaults to "1" + type: string + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name type: object - type: object - startupProbe: - description: 'StartupProbe indicates that the Pod has successfully - initialized. If specified, no other probes are executed until - this completes successfully. If this probe fails, the Pod will - be restarted, just as if the livenessProbe failed. This can - be used to provide different probe parameters at the beginning - of a Pod''s lifecycle, when it might take a long time to load - data or warm a cache, than during steady-state operation. This - cannot be updated. This is an alpha feature enabled by the StartupProbe - feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be a + C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key will + take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of + ConfigMaps properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key + in the ConfigMap. Must be a C_IDENTIFIER. type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - stdin: - description: Whether this container should allocate a buffer for - stdin in the container runtime. If this is not set, reads from - stdin in the container will always result in EOF. Default is - false. - type: boolean - stdinOnce: - description: Whether the container runtime should close the stdin - channel after it has been opened by a single attach. When stdin - is true the stdin stream will remain open across multiple attach - sessions. If stdinOnce is set to true, stdin is opened on container - start, is empty until the first client attaches to stdin, and - then remains open and accepts data until the client disconnects, - at which time stdin is closed and remains closed until the container - is restarted. If this flag is false, a container processes that - reads from stdin will never receive an EOF. Default is false - type: boolean - terminationMessagePath: - description: 'Optional: Path at which the file to which the container''s - termination message will be written is mounted into the container''s - filesystem. Message written is intended to be brief final status, - such as an assertion failure message. Will be truncated by the - node if greater than 4096 bytes. The total message length across - all containers will be limited to 12kb. Defaults to /dev/termination-log. - Cannot be updated.' - type: string - terminationMessagePolicy: - description: Indicate how the termination message should be populated. - File will use the contents of terminationMessagePath to populate - the container status message on both success and failure. FallbackToLogsOnError - will use the last chunk of container log output if the termination - message file is empty and the container exited with an error. - The log output is limited to 2048 bytes or 80 lines, whichever - is smaller. Defaults to File. Cannot be updated. - type: string - tty: - description: Whether this container should allocate a TTY for - itself, also requires 'stdin' to be true. Default is false. - type: boolean - volumeDevices: - description: volumeDevices is the list of block devices to be - used by the container. This is a beta feature. - items: - description: volumeDevice describes a mapping of a raw block - device within a container. - properties: - devicePath: - description: devicePath is the path inside of the container - that the device will be mapped to. - type: string - name: - description: name must match the name of a persistentVolumeClaim - in the pod - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - description: Pod volumes to mount into the container's filesystem. - Cannot be updated. - items: - description: VolumeMount describes a mounting of a Volume within - a container. - properties: - mountPath: - description: Path within the container at which the volume - should be mounted. Must not contain ':'. - type: string - mountPropagation: - description: mountPropagation determines how mounts are - propagated from the host to container and the other way - around. When not set, MountPropagationNone is used. This - field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: Mounted read-only if true, read-write otherwise - (false or unspecified). Defaults to false. - type: boolean - subPath: - description: Path within the volume from which the container's - volume should be mounted. Defaults to "" (volume's root). - type: string - subPathExpr: - description: Expanded path within the volume from which - the container's volume should be mounted. Behaves similarly - to SubPath but environment variable references $(VAR_NAME) - are expanded using the container's environment. Defaults - to "" (volume's root). SubPathExpr and SubPath are mutually - exclusive. - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - description: Container's working directory. If not specified, - the container runtime's default will be used, which might be - configured in the container image. Cannot be updated. - type: string - required: - - name - type: object - type: array - externalUrl: - description: The external URL the Alertmanager instances will be available - under. This is necessary to generate correct URLs. This is necessary - if Alertmanager is not served from root of a DNS name. - type: string - image: - description: Image if specified has precedence over baseImage, tag and - sha combinations. Specifying the version is still necessary to ensure - the Prometheus Operator knows what version of Alertmanager is being - configured. - type: string - imagePullSecrets: - description: An optional list of references to secrets in the same namespace - to use for pulling prometheus and alertmanager images from registries - see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod - items: - description: LocalObjectReference contains enough information to let - you locate the referenced object inside the same namespace. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: array - initContainers: - description: 'InitContainers allows adding initContainers to the pod - definition. Those can be used to e.g. fetch secrets for injection - into the Alertmanager configuration from external sources. Any errors - during the execution of an initContainer will lead to a restart of - the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - Using initContainers for any use case other then secret fetching is - entirely outside the scope of what the maintainers will support and - by doing so, you accept that this behaviour may break at any time - without notice.' - items: - description: A single application container that you want to run within - a pod. - properties: - args: - description: 'Arguments to the entrypoint. The docker image''s - CMD is used if this is not provided. Variable references $(VAR_NAME) - are expanded using the container''s environment. If a variable - cannot be resolved, the reference in the input string will be - unchanged. The $(VAR_NAME) syntax can be escaped with a double - $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - regardless of whether the variable exists or not. Cannot be - updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' - items: + type: array + image: + description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' type: string - type: array - command: - description: 'Entrypoint array. Not executed within a shell. The - docker image''s ENTRYPOINT is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container''s - environment. If a variable cannot be resolved, the reference - in the input string will be unchanged. The $(VAR_NAME) syntax - can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references - will never be expanded, regardless of whether the variable exists - or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' - items: + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' type: string - type: array - env: - description: List of environment variables to set in the container. - Cannot be updated. - items: - description: EnvVar represents an environment variable present - in a Container. + lifecycle: + description: Actions that the management system should take in + response to container lifecycle events. Cannot be updated. properties: - name: - description: Name of the environment variable. Must be a - C_IDENTIFIER. - type: string - value: - description: 'Variable references $(VAR_NAME) are expanded - using the previous defined environment variables in the - container and any service environment variables. If a - variable cannot be resolved, the reference in the input - string will be unchanged. The $(VAR_NAME) syntax can be - escaped with a double $$, ie: $$(VAR_NAME). Escaped references - will never be expanded, regardless of whether the variable - exists or not. Defaults to "".' - type: string - valueFrom: - description: Source for the environment variable's value. - Cannot be used if value is not empty. + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. + exec: + description: One and only one of the following should + be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. properties: - key: - description: The key to select. + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. type: string - optional: - description: Specify whether the ConfigMap or its - key must be defined - type: boolean required: - - key + - port type: object - fieldRef: - description: 'Selects a field of the pod: supports metadata.name, - metadata.namespace, metadata.labels, metadata.annotations, - spec.nodeName, spec.serviceAccountName, status.hostIP, - status.podIP, status.podIPs.' + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: implement + a realistic TCP lifecycle hook' properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the - specified API version. + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true required: - - fieldPath + - port type: object - resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, requests.memory - and requests.ephemeral-storage) are currently supported.' + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The reason for termination is passed to + the handler. The Pod''s termination grace period countdown + begins before the PreStop hooked is executed. Regardless + of the outcome of the handler, the container will eventually + terminate within the Pod''s termination grace period. Other + management of the container blocks until the hook completes + or until the termination grace period is reached. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should + be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. type: string - divisor: - description: Specifies the output format of the - exposed resources, defaults to "1" + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. type: string - resource: - description: 'Required: resource to select' + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - - resource + - port type: object - secretKeyRef: - description: Selects a key of a secret in the pod's - namespace + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: implement + a realistic TCP lifecycle hook' properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true required: - - key + - port type: object type: object - required: - - name type: object - type: array - envFrom: - description: List of sources to populate environment variables - in the container. The keys defined within a source must be a - C_IDENTIFIER. All invalid keys will be reported as an event - when the container is starting. When a key exists in multiple - sources, the value associated with the last source will take - precedence. Values defined by an Env with a duplicate key will - take precedence. Cannot be updated. - items: - description: EnvFromSource represents the source of a set of - ConfigMaps + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: - configMapRef: - description: The ConfigMap to select from + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. type: string - optional: - description: Specify whether the ConfigMap must be defined - type: boolean + required: + - port type: object - prefix: - description: An optional identifier to prepend to each key - in the ConfigMap. Must be a C_IDENTIFIER. - type: string - secretRef: - description: The Secret to select from + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string - optional: - description: Specify whether the Secret must be defined - type: boolean + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer type: object - type: array - image: - description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher level config management - to default or override container images in workload controllers - like Deployments and StatefulSets.' - type: string - imagePullPolicy: - description: 'Image pull policy. One of Always, Never, IfNotPresent. - Defaults to Always if :latest tag is specified, or IfNotPresent - otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' - type: string - lifecycle: - description: Actions that the management system should take in - response to container lifecycle events. Cannot be updated. - properties: - postStart: - description: 'PostStart is called immediately after a container - is created. If the handler fails, the container is terminated - and restarted according to its restart policy. Other management - of the container blocks until the hook completes. More info: - https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' - properties: - exec: - description: One and only one of the following should - be specified. Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute - inside the container, the working directory for - the command is root ('/') in the container's filesystem. - The command is simply exec'd, it is not run inside - a shell, so traditional shell instructions ('|', - etc) won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is treated - as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to - the pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: 'TCPSocket specifies an action involving - a TCP port. TCP hooks not yet supported TODO: implement - a realistic TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - description: 'PreStop is called immediately before a container - is terminated due to an API request or management event - such as liveness/startup probe failure, preemption, resource - contention, etc. The handler is not called if the container - crashes or exits. The reason for termination is passed to - the handler. The Pod''s termination grace period countdown - begins before the PreStop hooked is executed. Regardless - of the outcome of the handler, the container will eventually - terminate within the Pod''s termination grace period. Other - management of the container blocks until the hook completes - or until the termination grace period is reached. More info: - https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' - properties: - exec: - description: One and only one of the following should - be specified. Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute - inside the container, the working directory for - the command is root ('/') in the container's filesystem. - The command is simply exec'd, it is not run inside - a shell, so traditional shell instructions ('|', - etc) won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is treated - as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to - the pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: 'TCPSocket specifies an action involving - a TCP port. TCP hooks not yet supported TODO: implement - a realistic TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - description: 'Periodic probe of container liveness. Container - will be restarted if the probe fails. Cannot be updated. More - info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. + name: + description: Name of the container specified as a DNS_LABEL. Each + container in a pod must have a unique name (DNS_LABEL). Cannot + be updated. + type: string + ports: + description: List of ports to expose from the container. Exposing + a port here gives the system additional information about the + network connections a container uses, but is primarily informational. + Not specifying a port here DOES NOT prevent that port from being + exposed. Any port which is listening on the default "0.0.0.0" + address inside a container will be accessible from the network. + Cannot be updated. + items: + description: ContainerPort represents a network port in a single + container. properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. + containerPort: + description: Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. + hostPort: + description: Number of port to expose on the host. If specified, + this must be a valid port number, 0 < x < 65536. If HostNetwork + is specified, this must match ContainerPort. Most containers + do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod must + have a unique name. Name for the port that can be referred + to by services. type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' + protocol: + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true required: - - port + - containerPort type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - name: - description: Name of the container specified as a DNS_LABEL. Each - container in a pod must have a unique name (DNS_LABEL). Cannot - be updated. - type: string - ports: - description: List of ports to expose from the container. Exposing - a port here gives the system additional information about the - network connections a container uses, but is primarily informational. - Not specifying a port here DOES NOT prevent that port from being - exposed. Any port which is listening on the default "0.0.0.0" - address inside a container will be accessible from the network. - Cannot be updated. - items: - description: ContainerPort represents a network port in a single - container. + type: array + readinessProbe: + description: 'Periodic probe of container service readiness. Container + will be removed from service endpoints if the probe fails. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: - containerPort: - description: Number of port to expose on the pod's IP address. - This must be a valid port number, 0 < x < 65536. + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. format: int32 type: integer - hostIP: - description: What host IP to bind the external port to. - type: string - hostPort: - description: Number of port to expose on the host. If specified, - this must be a valid port number, 0 < x < 65536. If HostNetwork - is specified, this must match ContainerPort. Most containers - do not need this. + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' format: int32 type: integer - name: - description: If specified, this must be an IANA_SVC_NAME - and unique within the pod. Each named port in a pod must - have a unique name. Name for the port that can be referred - to by services. - type: string - protocol: - description: Protocol for port. Must be UDP, TCP, or SCTP. - Defaults to "TCP". - type: string - required: - - containerPort - type: object - type: array - readinessProbe: - description: 'Periodic probe of container service readiness. Container - will be removed from service endpoints if the probe fails. Cannot - be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + properties: + limits: + additionalProperties: type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - resources: - description: 'Compute Resources required by this container. Cannot - be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - properties: - limits: - additionalProperties: - type: string - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + securityContext: + description: 'Security options the pod should run with. More info: + https://kubernetes.io/docs/concepts/policy/security-context/ + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a + process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the + container runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in + privileged containers are essentially equivalent to root + on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use + for the containers. The default is DefaultProcMount which + uses the container runtime defaults for readonly paths and + masked paths. This requires the ProcMountType feature flag + to be enabled. type: string - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - securityContext: - description: 'Security options the pod should run with. More info: - https://kubernetes.io/docs/concepts/policy/security-context/ - More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' - properties: - allowPrivilegeEscalation: - description: 'AllowPrivilegeEscalation controls whether a - process can gain more privileges than its parent process. - This bool directly controls if the no_new_privs flag will - be set on the container process. AllowPrivilegeEscalation - is true always when the container is: 1) run as Privileged - 2) has CAP_SYS_ADMIN' - type: boolean - capabilities: - description: The capabilities to add/drop when running containers. - Defaults to the default set of capabilities granted by the - container runtime. - properties: - add: - description: Added capabilities - items: - description: Capability represent POSIX capabilities - type + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. + Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail + to start the container if it does. If unset or false, no + such validation will be performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata if + unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + properties: + level: + description: Level is SELinux level label that applies + to the container. type: string - type: array - drop: - description: Removed capabilities - items: - description: Capability represent POSIX capabilities - type + role: + description: Role is a SELinux role label that applies + to the container. type: string - type: array - type: object - privileged: - description: Run container in privileged mode. Processes in - privileged containers are essentially equivalent to root - on the host. Defaults to false. - type: boolean - procMount: - description: procMount denotes the type of proc mount to use - for the containers. The default is DefaultProcMount which - uses the container runtime defaults for readonly paths and - masked paths. This requires the ProcMountType feature flag - to be enabled. - type: string - readOnlyRootFilesystem: - description: Whether this container has a read-only root filesystem. - Default is false. - type: boolean - runAsGroup: - description: The GID to run the entrypoint of the container - process. Uses runtime default if unset. May also be set - in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. - format: int64 - type: integer - runAsNonRoot: - description: Indicates that the container must run as a non-root - user. If true, the Kubelet will validate the image at runtime - to ensure that it does not run as UID 0 (root) and fail - to start the container if it does. If unset or false, no - such validation will be performed. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: The UID to run the entrypoint of the container - process. Defaults to user specified in image metadata if - unspecified. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. - format: int64 - type: integer - seLinuxOptions: - description: The SELinux context to be applied to the container. - If unspecified, the container runtime will allocate a random - SELinux context for each container. May also be set in - PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. This field is beta-level and may be + disabled with the WindowsRunAsUserName feature flag. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod will + be restarted, just as if the livenessProbe failed. This can + be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. This + cannot be updated. This is an alpha feature enabled by the StartupProbe + feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for + stdin in the container runtime. If this is not set, reads from + stdin in the container will always result in EOF. Default is + false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin + channel after it has been opened by a single attach. When stdin + is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container + start, is empty until the first client attaches to stdin, and + then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container + is restarted. If this flag is false, a container processes that + reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s + termination message will be written is mounted into the container''s + filesystem. Message written is intended to be brief final status, + such as an assertion failure message. Will be truncated by the + node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. Defaults to /dev/termination-log. + Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. + File will use the contents of terminationMessagePath to populate + the container status message on both success and failure. FallbackToLogsOnError + will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever + is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. This is a beta feature. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. properties: - level: - description: Level is SELinux level label that applies - to the container. - type: string - role: - description: Role is a SELinux role label that applies - to the container. - type: string - type: - description: Type is a SELinux type label that applies - to the container. + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. type: string - user: - description: User is a SELinux user label that applies - to the container. + name: + description: name must match the name of a persistentVolumeClaim + in the pod type: string + required: + - devicePath + - name type: object - windowsOptions: - description: The Windows specific settings applied to all - containers. If unspecified, the options from the PodSecurityContext - will be used. If set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within + a container. properties: - gmsaCredentialSpec: - description: GMSACredentialSpec is where the GMSA admission - webhook (https://github.com/kubernetes-sigs/windows-gmsa) - inlines the contents of the GMSA credential spec named - by the GMSACredentialSpecName field. This field is alpha-level - and is only honored by servers that enable the WindowsGMSA - feature flag. + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name of the - GMSA credential spec to use. This field is alpha-level - and is only honored by servers that enable the WindowsGMSA - feature flag. - type: string - runAsUserName: - description: The UserName in Windows to run the entrypoint - of the container process. Defaults to the user specified - in image metadata if unspecified. May also be set in - PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. This field is beta-level and may be - disabled with the WindowsRunAsUserName feature flag. - type: string - type: object - type: object - startupProbe: - description: 'StartupProbe indicates that the Pod has successfully - initialized. If specified, no other probes are executed until - this completes successfully. If this probe fails, the Pod will - be restarted, just as if the livenessProbe failed. This can - be used to provide different probe parameters at the beginning - of a Pod''s lifecycle, when it might take a long time to load - data or warm a cache, than during steady-state operation. This - cannot be updated. This is an alpha feature enabled by the StartupProbe - feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other way + around. When not set, MountPropagationNone is used. This + field is beta in 1.10. type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. + name: + description: This must match the Name of a Volume. type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true required: - - port + - mountPath + - name type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might be + configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + listenLocal: + description: ListenLocal makes the Alertmanager server listen on loopback, + so that it does not bind against the Pod IP. Note this is only for + the Alertmanager UI, not the gossip communication. + type: boolean + logFormat: + description: Log format for Alertmanager to be configured with. + type: string + logLevel: + description: Log level for Alertmanager to be configured with. + type: string + nodeSelector: + additionalProperties: + type: string + description: Define which Nodes the Pods are scheduled on. + type: object + paused: + description: If set to true all actions on the underlaying managed objects + are not goint to be performed, except for delete actions. + type: boolean + podMetadata: + description: PodMetadata configures Labels and Annotations which are + propagated to the alertmanager pods. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored + with a resource that may be set by external tools to store and + retrieve arbitrary metadata. They are not queryable and should + be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' type: object - stdin: - description: Whether this container should allocate a buffer for - stdin in the container runtime. If this is not set, reads from - stdin in the container will always result in EOF. Default is - false. - type: boolean - stdinOnce: - description: Whether the container runtime should close the stdin - channel after it has been opened by a single attach. When stdin - is true the stdin stream will remain open across multiple attach - sessions. If stdinOnce is set to true, stdin is opened on container - start, is empty until the first client attaches to stdin, and - then remains open and accepts data until the client disconnects, - at which time stdin is closed and remains closed until the container - is restarted. If this flag is false, a container processes that - reads from stdin will never receive an EOF. Default is false - type: boolean - terminationMessagePath: - description: 'Optional: Path at which the file to which the container''s - termination message will be written is mounted into the container''s - filesystem. Message written is intended to be brief final status, - such as an assertion failure message. Will be truncated by the - node if greater than 4096 bytes. The total message length across - all containers will be limited to 12kb. Defaults to /dev/termination-log. - Cannot be updated.' - type: string - terminationMessagePolicy: - description: Indicate how the termination message should be populated. - File will use the contents of terminationMessagePath to populate - the container status message on both success and failure. FallbackToLogsOnError - will use the last chunk of container log output if the termination - message file is empty and the container exited with an error. - The log output is limited to 2048 bytes or 80 lines, whichever - is smaller. Defaults to File. Cannot be updated. - type: string - tty: - description: Whether this container should allocate a TTY for - itself, also requires 'stdin' to be true. Default is false. + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to + organize and categorize (scope and select) objects. May match + selectors of replication controllers and services. More info: + http://kubernetes.io/docs/user-guide/labels' + type: object + type: object + portName: + description: Port name used for the pods and governing service. This + defaults to web + type: string + priorityClassName: + description: Priority class assigned to the Pods + type: string + replicas: + description: Size is the expected size of the alertmanager cluster. + The controller will eventually make the size of the running cluster + equal to the expected size. + format: int32 + type: integer + resources: + description: Define resources requests and limits for single Pods. + properties: + limits: + additionalProperties: + type: string + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + type: string + description: 'Requests describes the minimum amount of compute resources + required. If Requests is omitted for a container, it defaults + to Limits if that is explicitly specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + retention: + description: Time duration Alertmanager shall retain data for. Default + is '120h', and must match the regular expression `[0-9]+(ms|s|m|h)` + (milliseconds seconds minutes hours). + type: string + routePrefix: + description: The route prefix Alertmanager registers HTTP handlers for. + This is useful, if using ExternalURL and a proxy is rewriting HTTP + routes of a request, and the actual ExternalURL is still true, but + the server serves requests under a different route prefix. For example + for use with `kubectl proxy`. + type: string + secrets: + description: Secrets is a list of Secrets in the same namespace as the + Alertmanager object, which shall be mounted into the Alertmanager + Pods. The Secrets are mounted into /etc/alertmanager/secrets/. + items: + type: string + type: array + securityContext: + description: SecurityContext holds pod-level security attributes and + common container settings. This defaults to the default PodSecurityContext. + properties: + fsGroup: + description: "A special supplemental group that applies to all containers + in a pod. Some volume types allow the Kubelet to change the ownership + of that volume to be owned by the pod: \n 1. The owning GID will + be the FSGroup 2. The setgid bit is set (new files created in + the volume will be owned by FSGroup) 3. The permission bits are + OR'd with rw-rw---- \n If unset, the Kubelet will not modify the + ownership and permissions of any volume." + format: int64 + type: integer + runAsGroup: + description: The GID to run the entrypoint of the container process. + Uses runtime default if unset. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail to start + the container if it does. If unset or false, no such validation + will be performed. May also be set in SecurityContext. If set + in both SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. type: boolean - volumeDevices: - description: volumeDevices is the list of block devices to be - used by the container. This is a beta feature. + runAsUser: + description: The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. May + also be set in SecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux + context for each container. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. + properties: + level: + description: Level is SELinux level label that applies to the + container. + type: string + role: + description: Role is a SELinux role label that applies to the + container. + type: string + type: + description: Type is a SELinux type label that applies to the + container. + type: string + user: + description: User is a SELinux user label that applies to the + container. + type: string + type: object + supplementalGroups: + description: A list of groups applied to the first process run in + each container, in addition to the container's primary GID. If + unspecified, no groups will be added to any container. items: - description: volumeDevice describes a mapping of a raw block - device within a container. - properties: - devicePath: - description: devicePath is the path inside of the container - that the device will be mapped to. - type: string - name: - description: name must match the name of a persistentVolumeClaim - in the pod - type: string - required: - - devicePath - - name - type: object + format: int64 + type: integer type: array - volumeMounts: - description: Pod volumes to mount into the container's filesystem. - Cannot be updated. + sysctls: + description: Sysctls hold a list of namespaced sysctls used for + the pod. Pods with unsupported sysctls (by the container runtime) + might fail to launch. items: - description: VolumeMount describes a mounting of a Volume within - a container. + description: Sysctl defines a kernel parameter to be set properties: - mountPath: - description: Path within the container at which the volume - should be mounted. Must not contain ':'. - type: string - mountPropagation: - description: mountPropagation determines how mounts are - propagated from the host to container and the other way - around. When not set, MountPropagationNone is used. This - field is beta in 1.10. - type: string name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: Mounted read-only if true, read-write otherwise - (false or unspecified). Defaults to false. - type: boolean - subPath: - description: Path within the volume from which the container's - volume should be mounted. Defaults to "" (volume's root). + description: Name of a property to set type: string - subPathExpr: - description: Expanded path within the volume from which - the container's volume should be mounted. Behaves similarly - to SubPath but environment variable references $(VAR_NAME) - are expanded using the container's environment. Defaults - to "" (volume's root). SubPathExpr and SubPath are mutually - exclusive. + value: + description: Value of a property to set type: string required: - - mountPath - name + - value type: object type: array - workingDir: - description: Container's working directory. If not specified, - the container runtime's default will be used, which might be - configured in the container image. Cannot be updated. - type: string - required: - - name + windowsOptions: + description: The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named by + the GMSACredentialSpecName field. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA + credential spec to use. This field is alpha-level and is only + honored by servers that enable the WindowsGMSA feature flag. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint of + the container process. Defaults to the user specified in image + metadata if unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. This + field is beta-level and may be disabled with the WindowsRunAsUserName + feature flag. + type: string + type: object type: object - type: array - listenLocal: - description: ListenLocal makes the Alertmanager server listen on loopback, - so that it does not bind against the Pod IP. Note this is only for - the Alertmanager UI, not the gossip communication. - type: boolean - logFormat: - description: Log format for Alertmanager to be configured with. - type: string - logLevel: - description: Log level for Alertmanager to be configured with. - type: string - nodeSelector: - additionalProperties: + serviceAccountName: + description: ServiceAccountName is the name of the ServiceAccount to + use to run the Prometheus Pods. type: string - description: Define which Nodes the Pods are scheduled on. - type: object - paused: - description: If set to true all actions on the underlaying managed objects - are not goint to be performed, except for delete actions. - type: boolean - podMetadata: - description: PodMetadata configures Labels and Annotations which are - propagated to the alertmanager pods. - properties: - annotations: - additionalProperties: - type: string - description: 'Annotations is an unstructured key value map stored - with a resource that may be set by external tools to store and - retrieve arbitrary metadata. They are not queryable and should - be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' - type: object - labels: - additionalProperties: - type: string - description: 'Map of string keys and values that can be used to - organize and categorize (scope and select) objects. May match - selectors of replication controllers and services. More info: - http://kubernetes.io/docs/user-guide/labels' - type: object - type: object - portName: - description: Port name used for the pods and governing service. This - defaults to web - type: string - priorityClassName: - description: Priority class assigned to the Pods - type: string - replicas: - description: Size is the expected size of the alertmanager cluster. - The controller will eventually make the size of the running cluster - equal to the expected size. - format: int32 - type: integer - resources: - description: Define resources requests and limits for single Pods. - properties: - limits: - additionalProperties: - type: string - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - type: string - description: 'Requests describes the minimum amount of compute resources - required. If Requests is omitted for a container, it defaults - to Limits if that is explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - retention: - description: Time duration Alertmanager shall retain data for. Default - is '120h', and must match the regular expression `[0-9]+(ms|s|m|h)` - (milliseconds seconds minutes hours). - type: string - routePrefix: - description: The route prefix Alertmanager registers HTTP handlers for. - This is useful, if using ExternalURL and a proxy is rewriting HTTP - routes of a request, and the actual ExternalURL is still true, but - the server serves requests under a different route prefix. For example - for use with `kubectl proxy`. - type: string - secrets: - description: Secrets is a list of Secrets in the same namespace as the - Alertmanager object, which shall be mounted into the Alertmanager - Pods. The Secrets are mounted into /etc/alertmanager/secrets/. - items: + sha: + description: SHA of Alertmanager container image to be deployed. Defaults + to the value of `version`. Similar to a tag, but the SHA explicitly + deploys an immutable container image. Version and Tag are ignored + if SHA is set. type: string - type: array - securityContext: - description: SecurityContext holds pod-level security attributes and - common container settings. This defaults to the default PodSecurityContext. - properties: - fsGroup: - description: "A special supplemental group that applies to all containers - in a pod. Some volume types allow the Kubelet to change the ownership - of that volume to be owned by the pod: \n 1. The owning GID will - be the FSGroup 2. The setgid bit is set (new files created in - the volume will be owned by FSGroup) 3. The permission bits are - OR'd with rw-rw---- \n If unset, the Kubelet will not modify the - ownership and permissions of any volume." - format: int64 - type: integer - runAsGroup: - description: The GID to run the entrypoint of the container process. - Uses runtime default if unset. May also be set in SecurityContext. If - set in both SecurityContext and PodSecurityContext, the value - specified in SecurityContext takes precedence for that container. - format: int64 - type: integer - runAsNonRoot: - description: Indicates that the container must run as a non-root - user. If true, the Kubelet will validate the image at runtime - to ensure that it does not run as UID 0 (root) and fail to start - the container if it does. If unset or false, no such validation - will be performed. May also be set in SecurityContext. If set - in both SecurityContext and PodSecurityContext, the value specified - in SecurityContext takes precedence. - type: boolean - runAsUser: - description: The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. May - also be set in SecurityContext. If set in both SecurityContext - and PodSecurityContext, the value specified in SecurityContext - takes precedence for that container. - format: int64 - type: integer - seLinuxOptions: - description: The SELinux context to be applied to all containers. - If unspecified, the container runtime will allocate a random SELinux - context for each container. May also be set in SecurityContext. If - set in both SecurityContext and PodSecurityContext, the value - specified in SecurityContext takes precedence for that container. - properties: - level: - description: Level is SELinux level label that applies to the - container. - type: string - role: - description: Role is a SELinux role label that applies to the - container. - type: string - type: - description: Type is a SELinux type label that applies to the - container. - type: string - user: - description: User is a SELinux user label that applies to the - container. - type: string - type: object - supplementalGroups: - description: A list of groups applied to the first process run in - each container, in addition to the container's primary GID. If - unspecified, no groups will be added to any container. - items: - format: int64 - type: integer - type: array - sysctls: - description: Sysctls hold a list of namespaced sysctls used for - the pod. Pods with unsupported sysctls (by the container runtime) - might fail to launch. - items: - description: Sysctl defines a kernel parameter to be set + storage: + description: Storage is the definition of how storage will be used by + the Alertmanager instances. + properties: + emptyDir: + description: 'EmptyDirVolumeSource to be used by the Prometheus + StatefulSets. If specified, used in place of any volumeClaimTemplate. + More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir' properties: - name: - description: Name of a property to set + medium: + description: 'What type of storage medium should back this directory. + The default is "" which means to use the node''s default medium. + Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' type: string - value: - description: Value of a property to set + sizeLimit: + description: 'Total amount of local storage required for this + EmptyDir volume. The size limit is also applicable for memory + medium. The maximum usage on memory medium EmptyDir would + be the minimum value between the SizeLimit specified here + and the sum of memory limits of all containers in a pod. The + default is nil which means that the limit is undefined. More + info: http://kubernetes.io/docs/user-guide/volumes#emptydir' type: string - required: - - name - - value type: object - type: array - windowsOptions: - description: The Windows specific settings applied to all containers. - If unspecified, the options within a container's SecurityContext - will be used. If set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. - properties: - gmsaCredentialSpec: - description: GMSACredentialSpec is where the GMSA admission - webhook (https://github.com/kubernetes-sigs/windows-gmsa) - inlines the contents of the GMSA credential spec named by - the GMSACredentialSpecName field. This field is alpha-level - and is only honored by servers that enable the WindowsGMSA - feature flag. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name of the GMSA - credential spec to use. This field is alpha-level and is only - honored by servers that enable the WindowsGMSA feature flag. - type: string - runAsUserName: - description: The UserName in Windows to run the entrypoint of - the container process. Defaults to the user specified in image - metadata if unspecified. May also be set in PodSecurityContext. - If set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. This - field is beta-level and may be disabled with the WindowsRunAsUserName - feature flag. - type: string - type: object - type: object - serviceAccountName: - description: ServiceAccountName is the name of the ServiceAccount to - use to run the Prometheus Pods. - type: string - sha: - description: SHA of Alertmanager container image to be deployed. Defaults - to the value of `version`. Similar to a tag, but the SHA explicitly - deploys an immutable container image. Version and Tag are ignored - if SHA is set. - type: string - storage: - description: Storage is the definition of how storage will be used by - the Alertmanager instances. - properties: - emptyDir: - description: 'EmptyDirVolumeSource to be used by the Prometheus - StatefulSets. If specified, used in place of any volumeClaimTemplate. - More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir' - properties: - medium: - description: 'What type of storage medium should back this directory. - The default is "" which means to use the node''s default medium. - Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' - type: string - sizeLimit: - description: 'Total amount of local storage required for this - EmptyDir volume. The size limit is also applicable for memory - medium. The maximum usage on memory medium EmptyDir would - be the minimum value between the SizeLimit specified here - and the sum of memory limits of all containers in a pod. The - default is nil which means that the limit is undefined. More - info: http://kubernetes.io/docs/user-guide/volumes#emptydir' - type: string - type: object - volumeClaimTemplate: - description: A PVC spec to be used by the Prometheus StatefulSets. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource - this object represents. Servers may infer this from the endpoint - the client submits requests to. Cannot be updated. In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' - type: object - spec: - description: 'Spec defines the desired characteristics of a - volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - accessModes: - description: 'AccessModes contains the desired access modes - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: This field requires the VolumeSnapshotDataSource - alpha feature gate to be enabled and currently VolumeSnapshot - is the only supported data source. If the provisioner - can support VolumeSnapshot data source, it will create - a new volume and data will be restored to the volume at - the same time. If the provisioner does not support VolumeSnapshot - data source, volume will not be created and the failure - will be reported as an event. In the future, we plan to - support more data source types and the behavior of the - provisioner may change. - properties: - apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, the - specified Kind must be in the core API group. For - any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced + volumeClaimTemplate: + description: A PVC spec to be used by the Prometheus StatefulSets. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint + the client submits requests to. Cannot be updated. In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + type: object + spec: + description: 'Spec defines the desired characteristics of a + volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'AccessModes contains the desired access modes + the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: + type: array + dataSource: + description: This field requires the VolumeSnapshotDataSource + alpha feature gate to be enabled and currently VolumeSnapshot + is the only supported data source. If the provisioner + can support VolumeSnapshot data source, it will create + a new volume and data will be restored to the volume at + the same time. If the provisioner does not support VolumeSnapshot + data source, volume will not be created and the failure + will be reported as an event. In the future, we plan to + support more data source types and the behavior of the + provisioner may change. + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, the + specified Kind must be in the core API group. For + any other third-party types, APIGroup is required. type: string - description: 'Limits describes the maximum amount of - compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: + kind: + description: Kind is the type of resource being referenced type: string - description: 'Requests describes the minimum amount - of compute resources required. If Requests is omitted - for a container, it defaults to Limits if that is - explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - selector: - description: A label query over volumes to consider for - binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be empty. - This array is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: + name: + description: Name is the name of resource being referenced type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required - by the claim. Value of Filesystem is implied when not - included in claim spec. This is a beta feature. - type: string - volumeName: - description: VolumeName is the binding reference to the - PersistentVolume backing this claim. + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources + the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + type: string + description: 'Limits describes the maximum amount of + compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + type: string + description: 'Requests describes the minimum amount + of compute resources required. If Requests is omitted + for a container, it defaults to Limits if that is + explicitly specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + selector: + description: A label query over volumes to consider for + binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required + by the claim. Value of Filesystem is implied when not + included in claim spec. This is a beta feature. + type: string + volumeName: + description: VolumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + status: + description: 'Status represents the current information/status + of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'AccessModes contains the actual access modes + the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + capacity: + additionalProperties: + type: string + description: Represents the actual resources of the underlying + volume. + type: object + conditions: + description: Current Condition of persistent volume claim. + If underlying persistent volume is being resized then + the Condition will be set to 'ResizeStarted'. + items: + description: PersistentVolumeClaimCondition contails details + about state of pvc + properties: + lastProbeTime: + description: Last time we probed the condition. + format: date-time + type: string + lastTransitionTime: + description: Last time the condition transitioned + from one status to another. + format: date-time + type: string + message: + description: Human-readable message indicating details + about last transition. + type: string + reason: + description: Unique, this should be a short, machine + understandable string that gives the reason for + condition's last transition. If it reports "ResizeStarted" + that means the underlying persistent volume is being + resized. + type: string + status: + type: string + type: + description: PersistentVolumeClaimConditionType is + a valid value of PersistentVolumeClaimCondition.Type + type: string + required: + - status + - type + type: object + type: array + phase: + description: Phase represents the current phase of PersistentVolumeClaim. + type: string + type: object + type: object + type: object + tag: + description: Tag of Alertmanager container image to be deployed. Defaults + to the value of `version`. Version is ignored if Tag is set. + type: string + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple using the matching + operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, operator + must be Exists; this combination means to match all values and + all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. Exists + is equivalent to wildcard for value, so that a pod can tolerate + all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the + toleration (which must be of effect NoExecute, otherwise this + field is ignored) tolerates the taint. By default, it is not + set, which means tolerate the taint forever (do not evict). + Zero and negative values will be treated as 0 (evict immediately) + by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise + just a regular string. + type: string + type: object + type: array + version: + description: Version the cluster should be on. + type: string + volumeMounts: + description: VolumeMounts allows configuration of additional VolumeMounts + on the output StatefulSet definition. VolumeMounts specified will + be appended to other VolumeMounts in the alertmanager container, that + are generated as a result of StorageSpec objects. + items: + description: VolumeMount describes a mounting of a Volume within a + container. + properties: + mountPath: + description: Path within the container at which the volume should + be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated + from the host to container and the other way around. When not + set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false + or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's + volume should be mounted. Behaves similarly to SubPath but environment + variable references $(VAR_NAME) are expanded using the container's + environment. Defaults to "" (volume's root). SubPathExpr and + SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + volumes: + description: Volumes allows configuration of additional volumes on the + output StatefulSet definition. Volumes specified will be appended + to other volumes that are generated as a result of StorageSpec objects. + items: + description: Volume represents a named volume in a pod that may be + accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'AWSElasticBlockStore represents an AWS Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from compromising + the machine' + type: string + partition: + description: 'The partition in the volume that you want to + mount. If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition + as "1". Similarly, the volume partition for /dev/sda is + "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'Specify "true" to force and set the ReadOnly + property in VolumeMounts to "true". If omitted, the default + is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'Unique ID of the persistent disk resource in + AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' type: string + required: + - volumeID + type: object + azureDisk: + description: AzureDisk represents an Azure Data Disk mount on + the host and bind mount to the pod. + properties: + cachingMode: + description: 'Host Caching mode: None, Read Only, Read Write.' + type: string + diskName: + description: The Name of the data disk in the blob storage + type: string + diskURI: + description: The URI the data disk in the blob storage + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'Expected values Shared: multiple blob disks + per storage account Dedicated: single blob disk per storage + account Managed: azure managed data disk (only in managed + availability set). defaults to shared' + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: AzureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: the name of secret that contains Azure Storage + Account Name and Key + type: string + shareName: + description: Share Name + type: string + required: + - secretName + - shareName type: object - status: - description: 'Status represents the current information/status - of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + cephfs: + description: CephFS represents a Ceph FS mount on the host that + shares a pod's lifetime properties: - accessModes: - description: 'AccessModes contains the actual access modes - the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + monitors: + description: 'Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' items: type: string type: array - capacity: - additionalProperties: - type: string - description: Represents the actual resources of the underlying - volume. + path: + description: 'Optional: Used as the mounted root, rather than + the full Ceph tree, default is /' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. More + info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'Optional: SecretFile is the path to key ring + for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'Optional: SecretRef is reference to the authentication + secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'Optional: User is the rados user name, default + is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'Cinder represents a cinder volume attached and mounted + on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Examples: "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. More + info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'Optional: points to a secret object containing + parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string type: object - conditions: - description: Current Condition of persistent volume claim. - If underlying persistent volume is being resized then - the Condition will be set to 'ResizeStarted'. + volumeID: + description: 'volume id used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: ConfigMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the Data + field of the referenced ConfigMap will be projected into + the volume as a file whose name is the key and content is + the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the + ConfigMap, the volume setup will error unless it is marked + optional. Paths must be relative and may not contain the + '..' path or start with '..'. items: - description: PersistentVolumeClaimCondition contails details - about state of pvc + description: Maps a string key to a path within a volume. properties: - lastProbeTime: - description: Last time we probed the condition. - format: date-time - type: string - lastTransitionTime: - description: Last time the condition transitioned - from one status to another. - format: date-time - type: string - message: - description: Human-readable message indicating details - about last transition. - type: string - reason: - description: Unique, this should be a short, machine - understandable string that gives the reason for - condition's last transition. If it reports "ResizeStarted" - that means the underlying persistent volume is being - resized. - type: string - status: + key: + description: The key to project. type: string - type: - description: PersistentVolumeClaimConditionType is - a valid value of PersistentVolumeClaimCondition.Type + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might be + in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode + bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map the + key to. May not be an absolute path. May not contain + the path element '..'. May not start with the string + '..'. type: string required: - - status - - type + - key + - path type: object type: array - phase: - description: Phase represents the current phase of PersistentVolumeClaim. + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string + optional: + description: Specify whether the ConfigMap or its keys must + be defined + type: boolean type: object - type: object - type: object - tag: - description: Tag of Alertmanager container image to be deployed. Defaults - to the value of `version`. Version is ignored if Tag is set. - type: string - tolerations: - description: If specified, the pod's tolerations. - items: - description: The pod this Toleration is attached to tolerates any - taint that matches the triple using the matching - operator . - properties: - effect: - description: Effect indicates the taint effect to match. Empty - means match all taint effects. When specified, allowed values - are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies - to. Empty means match all taint keys. If the key is empty, operator - must be Exists; this combination means to match all values and - all keys. - type: string - operator: - description: Operator represents a key's relationship to the value. - Valid operators are Exists and Equal. Defaults to Equal. Exists - is equivalent to wildcard for value, so that a pod can tolerate - all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time the - toleration (which must be of effect NoExecute, otherwise this - field is ignored) tolerates the taint. By default, it is not - set, which means tolerate the taint forever (do not evict). - Zero and negative values will be treated as 0 (evict immediately) - by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches to. - If the operator is Exists, the value should be empty, otherwise - just a regular string. - type: string - type: object - type: array - version: - description: Version the cluster should be on. - type: string - volumeMounts: - description: VolumeMounts allows configuration of additional VolumeMounts - on the output StatefulSet definition. VolumeMounts specified will - be appended to other VolumeMounts in the alertmanager container, that - are generated as a result of StorageSpec objects. - items: - description: VolumeMount describes a mounting of a Volume within a - container. - properties: - mountPath: - description: Path within the container at which the volume should - be mounted. Must not contain ':'. - type: string - mountPropagation: - description: mountPropagation determines how mounts are propagated - from the host to container and the other way around. When not - set, MountPropagationNone is used. This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: Mounted read-only if true, read-write otherwise (false - or unspecified). Defaults to false. - type: boolean - subPath: - description: Path within the volume from which the container's - volume should be mounted. Defaults to "" (volume's root). - type: string - subPathExpr: - description: Expanded path within the volume from which the container's - volume should be mounted. Behaves similarly to SubPath but environment - variable references $(VAR_NAME) are expanded using the container's - environment. Defaults to "" (volume's root). SubPathExpr and - SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array - volumes: - description: Volumes allows configuration of additional volumes on the - output StatefulSet definition. Volumes specified will be appended - to other volumes that are generated as a result of StorageSpec objects. - items: - description: Volume represents a named volume in a pod that may be - accessed by any container in the pod. - properties: - awsElasticBlockStore: - description: 'AWSElasticBlockStore represents an AWS Disk resource - that is attached to a kubelet''s host machine and then exposed - to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' - properties: - fsType: - description: 'Filesystem type of the volume that you want - to mount. Tip: Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", "xfs", "ntfs". - Implicitly inferred to be "ext4" if unspecified. More info: - https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - TODO: how do we prevent errors in the filesystem from compromising - the machine' - type: string - partition: - description: 'The partition in the volume that you want to - mount. If omitted, the default is to mount by volume name. - Examples: For volume /dev/sda1, you specify the partition - as "1". Similarly, the volume partition for /dev/sda is - "0" (or you can leave the property empty).' - format: int32 - type: integer - readOnly: - description: 'Specify "true" to force and set the ReadOnly - property in VolumeMounts to "true". If omitted, the default - is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' - type: boolean - volumeID: - description: 'Unique ID of the persistent disk resource in - AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' - type: string - required: - - volumeID - type: object - azureDisk: - description: AzureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. - properties: - cachingMode: - description: 'Host Caching mode: None, Read Only, Read Write.' - type: string - diskName: - description: The Name of the data disk in the blob storage - type: string - diskURI: - description: The URI the data disk in the blob storage - type: string - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - kind: - description: 'Expected values Shared: multiple blob disks - per storage account Dedicated: single blob disk per storage - account Managed: azure managed data disk (only in managed - availability set). defaults to shared' - type: string - readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. - type: boolean - required: - - diskName - - diskURI - type: object - azureFile: - description: AzureFile represents an Azure File Service mount - on the host and bind mount to the pod. - properties: - readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. - type: boolean - secretName: - description: the name of secret that contains Azure Storage - Account Name and Key - type: string - shareName: - description: Share Name - type: string - required: - - secretName - - shareName - type: object - cephfs: - description: CephFS represents a Ceph FS mount on the host that - shares a pod's lifetime - properties: - monitors: - description: 'Required: Monitors is a collection of Ceph monitors - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - items: + csi: + description: CSI (Container Storage Interface) represents storage + that is handled by an external CSI driver (Alpha feature). + properties: + driver: + description: Driver is the name of the CSI driver that handles + this volume. Consult with your admin for the correct name + as registered in the cluster. type: string - type: array - path: - description: 'Optional: Used as the mounted root, rather than - the full Ceph tree, default is /' - type: string - readOnly: - description: 'Optional: Defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. More - info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - type: boolean - secretFile: - description: 'Optional: SecretFile is the path to key ring - for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - type: string - secretRef: - description: 'Optional: SecretRef is reference to the authentication - secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - user: - description: 'Optional: User is the rados user name, default - is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - type: string - required: - - monitors - type: object - cinder: - description: 'Cinder represents a cinder volume attached and mounted - on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' - properties: - fsType: - description: 'Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Examples: "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md' - type: string - readOnly: - description: 'Optional: Defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. More - info: https://examples.k8s.io/mysql-cinder-pd/README.md' - type: boolean - secretRef: - description: 'Optional: points to a secret object containing - parameters used to connect to OpenStack.' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - volumeID: - description: 'volume id used to identify the volume in cinder. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md' - type: string - required: - - volumeID - type: object - configMap: - description: ConfigMap represents a configMap that should populate - this volume - properties: - defaultMode: - description: 'Optional: mode bits to use on created files - by default. Must be a value between 0 and 0777. Defaults - to 0644. Directories within the path are not affected by - this setting. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - items: - description: If unspecified, each key-value pair in the Data - field of the referenced ConfigMap will be projected into - the volume as a file whose name is the key and content is - the value. If specified, the listed keys will be projected - into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the - ConfigMap, the volume setup will error unless it is marked - optional. Paths must be relative and may not contain the - '..' path or start with '..'. - items: - description: Maps a string key to a path within a volume. + fsType: + description: Filesystem type to mount. Ex. "ext4", "xfs", + "ntfs". If not provided, the empty value is passed to the + associated CSI driver which will determine the default filesystem + to apply. + type: string + nodePublishSecretRef: + description: NodePublishSecretRef is a reference to the secret + object containing sensitive information to pass to the CSI + driver to complete the CSI NodePublishVolume and NodeUnpublishVolume + calls. This field is optional, and may be empty if no secret + is required. If the secret object contains more than one + secret, all secret references are passed. properties: - key: - description: The key to project. - type: string - mode: - description: 'Optional: mode bits to use on this file, - must be a value between 0 and 0777. If not specified, - the volume defaultMode will be used. This might be - in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode - bits set.' - format: int32 - type: integer - path: - description: The relative path of the file to map the - key to. May not be an absolute path. May not contain - the path element '..'. May not start with the string - '..'. + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string - required: - - key - - path type: object - type: array - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the ConfigMap or its keys must - be defined - type: boolean - type: object - csi: - description: CSI (Container Storage Interface) represents storage - that is handled by an external CSI driver (Alpha feature). - properties: - driver: - description: Driver is the name of the CSI driver that handles - this volume. Consult with your admin for the correct name - as registered in the cluster. - type: string - fsType: - description: Filesystem type to mount. Ex. "ext4", "xfs", - "ntfs". If not provided, the empty value is passed to the - associated CSI driver which will determine the default filesystem - to apply. - type: string - nodePublishSecretRef: - description: NodePublishSecretRef is a reference to the secret - object containing sensitive information to pass to the CSI - driver to complete the CSI NodePublishVolume and NodeUnpublishVolume - calls. This field is optional, and may be empty if no secret - is required. If the secret object contains more than one - secret, all secret references are passed. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + readOnly: + description: Specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: type: string - type: object - readOnly: - description: Specifies a read-only configuration for the volume. - Defaults to false (read/write). - type: boolean - volumeAttributes: - additionalProperties: - type: string - description: VolumeAttributes stores driver-specific properties - that are passed to the CSI driver. Consult your driver's - documentation for supported values. - type: object - required: - - driver - type: object - downwardAPI: - description: DownwardAPI represents downward API about the pod - that should populate this volume - properties: - defaultMode: - description: 'Optional: mode bits to use on created files - by default. Must be a value between 0 and 0777. Defaults - to 0644. Directories within the path are not affected by - this setting. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - items: - description: Items is a list of downward API volume file - items: - description: DownwardAPIVolumeFile represents information - to create the file containing the pod field - properties: - fieldRef: - description: 'Required: Selects a field of the pod: - only annotations, labels, name and namespace are supported.' - properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the - specified API version. - type: string - required: - - fieldPath - type: object - mode: - description: 'Optional: mode bits to use on this file, - must be a value between 0 and 0777. If not specified, - the volume defaultMode will be used. This might be - in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode - bits set.' - format: int32 - type: integer - path: - description: 'Required: Path is the relative path name - of the file to be created. Must not be absolute or - contain the ''..'' path. Must be utf-8 encoded. The - first item of the relative path must not start with - ''..''' - type: string - resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - requests.cpu and requests.memory) are currently supported.' - properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' - type: string - divisor: - description: Specifies the output format of the - exposed resources, defaults to "1" - type: string - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - required: - - path + description: VolumeAttributes stores driver-specific properties + that are passed to the CSI driver. Consult your driver's + documentation for supported values. type: object - type: array - type: object - emptyDir: - description: 'EmptyDir represents a temporary directory that shares - a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' - properties: - medium: - description: 'What type of storage medium should back this - directory. The default is "" which means to use the node''s - default medium. Must be an empty string (default) or Memory. - More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' - type: string - sizeLimit: - description: 'Total amount of local storage required for this - EmptyDir volume. The size limit is also applicable for memory - medium. The maximum usage on memory medium EmptyDir would - be the minimum value between the SizeLimit specified here - and the sum of memory limits of all containers in a pod. - The default is nil which means that the limit is undefined. - More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' - type: string - type: object - fc: - description: FC represents a Fibre Channel resource that is attached - to a kubelet's host machine and then exposed to the pod. - properties: - fsType: - description: 'Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - TODO: how do we prevent errors in the filesystem from compromising - the machine' - type: string - lun: - description: 'Optional: FC target lun number' - format: int32 - type: integer - readOnly: - description: 'Optional: Defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts.' - type: boolean - targetWWNs: - description: 'Optional: FC target worldwide names (WWNs)' + required: + - driver + type: object + downwardAPI: + description: DownwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might be + in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode + bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path name + of the file to be created. Must not be absolute or + contain the ''..'' path. Must be utf-8 encoded. The + first item of the relative path must not start with + ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + requests.cpu and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + description: Specifies the output format of the + exposed resources, defaults to "1" + type: string + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'EmptyDir represents a temporary directory that shares + a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'What type of storage medium should back this + directory. The default is "" which means to use the node''s + default medium. Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' type: string - type: array - wwids: - description: 'Optional: FC volume world wide identifiers (wwids) - Either wwids or combination of targetWWNs and lun must be - set, but not both simultaneously.' - items: + sizeLimit: + description: 'Total amount of local storage required for this + EmptyDir volume. The size limit is also applicable for memory + medium. The maximum usage on memory medium EmptyDir would + be the minimum value between the SizeLimit specified here + and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' type: string - type: array - type: object - flexVolume: - description: FlexVolume represents a generic volume resource that - is provisioned/attached using an exec based plugin. - properties: - driver: - description: Driver is the name of the driver to use for this - volume. - type: string - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". The default filesystem depends on FlexVolume - script. - type: string - options: - additionalProperties: + type: object + fc: + description: FC represents a Fibre Channel resource that is attached + to a kubelet's host machine and then exposed to the pod. + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from compromising + the machine' type: string - description: 'Optional: Extra command options if any.' - type: object - readOnly: - description: 'Optional: Defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts.' - type: boolean - secretRef: - description: 'Optional: SecretRef is reference to the secret - object containing sensitive information to pass to the plugin - scripts. This may be empty if no secret object is specified. - If the secret object contains more than one secret, all - secrets are passed to the plugin scripts.' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + lun: + description: 'Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'Optional: FC target worldwide names (WWNs)' + items: type: string - type: object - required: - - driver - type: object - flocker: - description: Flocker represents a Flocker volume attached to a - kubelet's host machine. This depends on the Flocker control - service being running - properties: - datasetName: - description: Name of the dataset stored as metadata -> name - on the dataset for Flocker should be considered as deprecated - type: string - datasetUUID: - description: UUID of the dataset. This is unique identifier - of a Flocker dataset - type: string - type: object - gcePersistentDisk: - description: 'GCEPersistentDisk represents a GCE Disk resource - that is attached to a kubelet''s host machine and then exposed - to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - properties: - fsType: - description: 'Filesystem type of the volume that you want - to mount. Tip: Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", "xfs", "ntfs". - Implicitly inferred to be "ext4" if unspecified. More info: - https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - TODO: how do we prevent errors in the filesystem from compromising - the machine' - type: string - partition: - description: 'The partition in the volume that you want to - mount. If omitted, the default is to mount by volume name. - Examples: For volume /dev/sda1, you specify the partition - as "1". Similarly, the volume partition for /dev/sda is - "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - format: int32 - type: integer - pdName: - description: 'Unique name of the PD resource in GCE. Used - to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - type: string - readOnly: - description: 'ReadOnly here will force the ReadOnly setting - in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - type: boolean - required: - - pdName - type: object - gitRepo: - description: 'GitRepo represents a git repository at a particular - revision. DEPRECATED: GitRepo is deprecated. To provision a - container with a git repo, mount an EmptyDir into an InitContainer - that clones the repo using git, then mount the EmptyDir into - the Pod''s container.' - properties: - directory: - description: Target directory name. Must not contain or start - with '..'. If '.' is supplied, the volume directory will - be the git repository. Otherwise, if specified, the volume - will contain the git repository in the subdirectory with - the given name. - type: string - repository: - description: Repository URL - type: string - revision: - description: Commit hash for the specified revision. - type: string - required: - - repository - type: object - glusterfs: - description: 'Glusterfs represents a Glusterfs mount on the host - that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' - properties: - endpoints: - description: 'EndpointsName is the endpoint name that details - Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' - type: string - path: - description: 'Path is the Glusterfs volume path. More info: - https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' - type: string - readOnly: - description: 'ReadOnly here will force the Glusterfs volume - to be mounted with read-only permissions. Defaults to false. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' - type: boolean - required: - - endpoints - - path - type: object - hostPath: - description: 'HostPath represents a pre-existing file or directory - on the host machine that is directly exposed to the container. - This is generally used for system agents or other privileged - things that are allowed to see the host machine. Most containers - will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - --- TODO(jonesdl) We need to restrict who can use host directory - mounts and who can/can not mount host directories as read/write.' - properties: - path: - description: 'Path of the directory on the host. If the path - is a symlink, it will follow the link to the real path. - More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' - type: string - type: - description: 'Type for HostPath Volume Defaults to "" More - info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' - type: string - required: - - path - type: object - iscsi: - description: 'ISCSI represents an ISCSI Disk resource that is - attached to a kubelet''s host machine and then exposed to the - pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' - properties: - chapAuthDiscovery: - description: whether support iSCSI Discovery CHAP authentication - type: boolean - chapAuthSession: - description: whether support iSCSI Session CHAP authentication - type: boolean - fsType: - description: 'Filesystem type of the volume that you want - to mount. Tip: Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", "xfs", "ntfs". - Implicitly inferred to be "ext4" if unspecified. More info: - https://kubernetes.io/docs/concepts/storage/volumes#iscsi - TODO: how do we prevent errors in the filesystem from compromising - the machine' - type: string - initiatorName: - description: Custom iSCSI Initiator Name. If initiatorName - is specified with iscsiInterface simultaneously, new iSCSI - interface : will be created - for the connection. - type: string - iqn: - description: Target iSCSI Qualified Name. - type: string - iscsiInterface: - description: iSCSI Interface Name that uses an iSCSI transport. - Defaults to 'default' (tcp). - type: string - lun: - description: iSCSI Target Lun number. - format: int32 - type: integer - portals: - description: iSCSI Target Portal List. The portal is either - an IP or ip_addr:port if the port is other than default - (typically TCP ports 860 and 3260). - items: + type: array + wwids: + description: 'Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be + set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: FlexVolume represents a generic volume resource that + is provisioned/attached using an exec based plugin. + properties: + driver: + description: Driver is the name of the driver to use for this + volume. type: string - type: array - readOnly: - description: ReadOnly here will force the ReadOnly setting - in VolumeMounts. Defaults to false. - type: boolean - secretRef: - description: CHAP Secret for iSCSI target and initiator authentication - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". The default filesystem depends on FlexVolume + script. + type: string + options: + additionalProperties: type: string - type: object - targetPortal: - description: iSCSI Target Portal. The Portal is either an - IP or ip_addr:port if the port is other than default (typically - TCP ports 860 and 3260). - type: string - required: - - iqn - - lun - - targetPortal - type: object - name: - description: 'Volume''s name. Must be a DNS_LABEL and unique within - the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - nfs: - description: 'NFS represents an NFS mount on the host that shares - a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - properties: - path: - description: 'Path that is exported by the NFS server. More - info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - type: string - readOnly: - description: 'ReadOnly here will force the NFS export to be - mounted with read-only permissions. Defaults to false. More - info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - type: boolean - server: - description: 'Server is the hostname or IP address of the - NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - type: string - required: - - path - - server - type: object - persistentVolumeClaim: - description: 'PersistentVolumeClaimVolumeSource represents a reference - to a PersistentVolumeClaim in the same namespace. More info: - https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - claimName: - description: 'ClaimName is the name of a PersistentVolumeClaim - in the same namespace as the pod using this volume. More - info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - type: string - readOnly: - description: Will force the ReadOnly setting in VolumeMounts. - Default false. - type: boolean - required: - - claimName - type: object - photonPersistentDisk: - description: PhotonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine - properties: - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - pdID: - description: ID that identifies Photon Controller persistent - disk - type: string - required: - - pdID - type: object - portworxVolume: - description: PortworxVolume represents a portworx volume attached - and mounted on kubelets host machine - properties: - fsType: - description: FSType represents the filesystem type to mount - Must be a filesystem type supported by the host operating - system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" - if unspecified. - type: string - readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. - type: boolean - volumeID: - description: VolumeID uniquely identifies a Portworx volume - type: string - required: - - volumeID - type: object - projected: - description: Items for all in one resources secrets, configmaps, - and downward API - properties: - defaultMode: - description: Mode bits to use on created files by default. - Must be a value between 0 and 0777. Directories within the - path are not affected by this setting. This might be in - conflict with other options that affect the file mode, like - fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - sources: - description: list of volume projections - items: - description: Projection that may be projected along with - other supported volume types + description: 'Optional: Extra command options if any.' + type: object + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'Optional: SecretRef is reference to the secret + object containing sensitive information to pass to the plugin + scripts. This may be empty if no secret object is specified. + If the secret object contains more than one secret, all + secrets are passed to the plugin scripts.' properties: - configMap: - description: information about the configMap data to - project - properties: - items: - description: If unspecified, each key-value pair - in the Data field of the referenced ConfigMap - will be projected into the volume as a file whose - name is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. - If a key is specified which is not present in - the ConfigMap, the volume setup will error unless - it is marked optional. Paths must be relative - and may not contain the '..' path or start with - '..'. + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: Flocker represents a Flocker volume attached to a + kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: Name of the dataset stored as metadata -> name + on the dataset for Flocker should be considered as deprecated + type: string + datasetUUID: + description: UUID of the dataset. This is unique identifier + of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'GCEPersistentDisk represents a GCE Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from compromising + the machine' + type: string + partition: + description: 'The partition in the volume that you want to + mount. If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition + as "1". Similarly, the volume partition for /dev/sda is + "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'Unique name of the PD resource in GCE. Used + to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'GitRepo represents a git repository at a particular + revision. DEPRECATED: GitRepo is deprecated. To provision a + container with a git repo, mount an EmptyDir into an InitContainer + that clones the repo using git, then mount the EmptyDir into + the Pod''s container.' + properties: + directory: + description: Target directory name. Must not contain or start + with '..'. If '.' is supplied, the volume directory will + be the git repository. Otherwise, if specified, the volume + will contain the git repository in the subdirectory with + the given name. + type: string + repository: + description: Repository URL + type: string + revision: + description: Commit hash for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'Glusterfs represents a Glusterfs mount on the host + that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'EndpointsName is the endpoint name that details + Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'Path is the Glusterfs volume path. More info: + https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'ReadOnly here will force the Glusterfs volume + to be mounted with read-only permissions. Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'HostPath represents a pre-existing file or directory + on the host machine that is directly exposed to the container. + This is generally used for system agents or other privileged + things that are allowed to see the host machine. Most containers + will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host directory + mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'Path of the directory on the host. If the path + is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'Type for HostPath Volume Defaults to "" More + info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'ISCSI represents an ISCSI Disk resource that is + attached to a kubelet''s host machine and then exposed to the + pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: whether support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: whether support iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from compromising + the machine' + type: string + initiatorName: + description: Custom iSCSI Initiator Name. If initiatorName + is specified with iscsiInterface simultaneously, new iSCSI + interface : will be created + for the connection. + type: string + iqn: + description: Target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iSCSI Interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: iSCSI Target Portal List. The portal is either + an IP or ip_addr:port if the port is other than default + (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: CHAP Secret for iSCSI target and initiator authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + targetPortal: + description: iSCSI Target Portal. The Portal is either an + IP or ip_addr:port if the port is other than default (typically + TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'Volume''s name. Must be a DNS_LABEL and unique within + the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'NFS represents an NFS mount on the host that shares + a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'Path that is exported by the NFS server. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'ReadOnly here will force the NFS export to be + mounted with read-only permissions. Defaults to false. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'Server is the hostname or IP address of the + NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'PersistentVolumeClaimVolumeSource represents a reference + to a PersistentVolumeClaim in the same namespace. More info: + https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'ClaimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: PhotonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: ID that identifies Photon Controller persistent + disk + type: string + required: + - pdID + type: object + portworxVolume: + description: PortworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: FSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: VolumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: Items for all in one resources secrets, configmaps, + and downward API + properties: + defaultMode: + description: Mode bits to use on created files by default. + Must be a value between 0 and 0777. Directories within the + path are not affected by this setting. This might be in + conflict with other options that affect the file mode, like + fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: list of volume projections + items: + description: Projection that may be projected along with + other supported volume types + properties: + configMap: + description: information about the configMap data to + project + properties: items: - description: Maps a string key to a path within - a volume. - properties: - key: - description: The key to project. - type: string - mode: - description: 'Optional: mode bits to use on - this file, must be a value between 0 and - 0777. If not specified, the volume defaultMode - will be used. This might be in conflict - with other options that affect the file - mode, like fsGroup, and the result can be - other mode bits set.' - format: int32 - type: integer - path: - description: The relative path of the file - to map the key to. May not be an absolute - path. May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the ConfigMap or its - keys must be defined - type: boolean - type: object - downwardAPI: - description: information about the downwardAPI data - to project - properties: - items: - description: Items is a list of DownwardAPIVolume - file + description: If unspecified, each key-value pair + in the Data field of the referenced ConfigMap + will be projected into the volume as a file whose + name is the key and content is the value. If specified, + the listed keys will be projected into the specified + paths, and unlisted keys will not be present. + If a key is specified which is not present in + the ConfigMap, the volume setup will error unless + it is marked optional. Paths must be relative + and may not contain the '..' path or start with + '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on + this file, must be a value between 0 and + 0777. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can be + other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + keys must be defined + type: boolean + type: object + downwardAPI: + description: information about the downwardAPI data + to project + properties: items: - description: DownwardAPIVolumeFile represents - information to create the file containing the - pod field - properties: - fieldRef: - description: 'Required: Selects a field of - the pod: only annotations, labels, name - and namespace are supported.' - properties: - apiVersion: - description: Version of the schema the - FieldPath is written in terms of, defaults - to "v1". - type: string - fieldPath: - description: Path of the field to select - in the specified API version. - type: string - required: - - fieldPath - type: object - mode: - description: 'Optional: mode bits to use on - this file, must be a value between 0 and - 0777. If not specified, the volume defaultMode - will be used. This might be in conflict - with other options that affect the file - mode, like fsGroup, and the result can be - other mode bits set.' - format: int32 - type: integer - path: - description: 'Required: Path is the relative - path name of the file to be created. Must - not be absolute or contain the ''..'' path. - Must be utf-8 encoded. The first item of - the relative path must not start with ''..''' - type: string - resourceFieldRef: - description: 'Selects a resource of the container: - only resources limits and requests (limits.cpu, - limits.memory, requests.cpu and requests.memory) - are currently supported.' - properties: - containerName: - description: 'Container name: required - for volumes, optional for env vars' - type: string - divisor: - description: Specifies the output format - of the exposed resources, defaults to - "1" - type: string - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - required: - - path - type: object - type: array - type: object - secret: - description: information about the secret data to project - properties: - items: - description: If unspecified, each key-value pair - in the Data field of the referenced Secret will - be projected into the volume as a file whose name - is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. - If a key is specified which is not present in - the Secret, the volume setup will error unless - it is marked optional. Paths must be relative - and may not contain the '..' path or start with - '..'. + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name + and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits to use on + this file, must be a value between 0 and + 0777. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can be + other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + description: Specifies the output format + of the exposed resources, defaults to + "1" + type: string + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: information about the secret data to project + properties: items: - description: Maps a string key to a path within - a volume. - properties: - key: - description: The key to project. - type: string - mode: - description: 'Optional: mode bits to use on - this file, must be a value between 0 and - 0777. If not specified, the volume defaultMode - will be used. This might be in conflict - with other options that affect the file - mode, like fsGroup, and the result can be - other mode bits set.' - format: int32 - type: integer - path: - description: The relative path of the file - to map the key to. May not be an absolute - path. May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - type: object - serviceAccountToken: - description: information about the serviceAccountToken - data to project - properties: - audience: - description: Audience is the intended audience of - the token. A recipient of a token must identify - itself with an identifier specified in the audience - of the token, and otherwise should reject the - token. The audience defaults to the identifier - of the apiserver. - type: string - expirationSeconds: - description: ExpirationSeconds is the requested - duration of validity of the service account token. - As the token approaches expiration, the kubelet - volume plugin will proactively rotate the service - account token. The kubelet will start trying to - rotate the token if the token is older than 80 - percent of its time to live or if the token is - older than 24 hours.Defaults to 1 hour and must - be at least 10 minutes. - format: int64 - type: integer - path: - description: Path is the path relative to the mount - point of the file to project the token into. - type: string - required: - - path - type: object - type: object - type: array - required: - - sources - type: object - quobyte: - description: Quobyte represents a Quobyte mount on the host that - shares a pod's lifetime - properties: - group: - description: Group to map volume access to Default is no group - type: string - readOnly: - description: ReadOnly here will force the Quobyte volume to - be mounted with read-only permissions. Defaults to false. - type: boolean - registry: - description: Registry represents a single or multiple Quobyte - Registry services specified as a string as host:port pair - (multiple entries are separated with commas) which acts - as the central registry for volumes - type: string - tenant: - description: Tenant owning the given Quobyte volume in the - Backend Used with dynamically provisioned Quobyte volumes, - value is set by the plugin - type: string - user: - description: User to map volume access to Defaults to serivceaccount - user - type: string - volume: - description: Volume is a string that references an already - created Quobyte volume by name. - type: string - required: - - registry - - volume - type: object - rbd: - description: 'RBD represents a Rados Block Device mount on the - host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' - properties: - fsType: - description: 'Filesystem type of the volume that you want - to mount. Tip: Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", "xfs", "ntfs". - Implicitly inferred to be "ext4" if unspecified. More info: - https://kubernetes.io/docs/concepts/storage/volumes#rbd - TODO: how do we prevent errors in the filesystem from compromising - the machine' - type: string - image: - description: 'The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - keyring: - description: 'Keyring is the path to key ring for RBDUser. - Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - monitors: - description: 'A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - items: + description: If unspecified, each key-value pair + in the Data field of the referenced Secret will + be projected into the volume as a file whose name + is the key and content is the value. If specified, + the listed keys will be projected into the specified + paths, and unlisted keys will not be present. + If a key is specified which is not present in + the Secret, the volume setup will error unless + it is marked optional. Paths must be relative + and may not contain the '..' path or start with + '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on + this file, must be a value between 0 and + 0777. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can be + other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + type: object + serviceAccountToken: + description: information about the serviceAccountToken + data to project + properties: + audience: + description: Audience is the intended audience of + the token. A recipient of a token must identify + itself with an identifier specified in the audience + of the token, and otherwise should reject the + token. The audience defaults to the identifier + of the apiserver. + type: string + expirationSeconds: + description: ExpirationSeconds is the requested + duration of validity of the service account token. + As the token approaches expiration, the kubelet + volume plugin will proactively rotate the service + account token. The kubelet will start trying to + rotate the token if the token is older than 80 + percent of its time to live or if the token is + older than 24 hours.Defaults to 1 hour and must + be at least 10 minutes. + format: int64 + type: integer + path: + description: Path is the path relative to the mount + point of the file to project the token into. + type: string + required: + - path + type: object + type: object + type: array + required: + - sources + type: object + quobyte: + description: Quobyte represents a Quobyte mount on the host that + shares a pod's lifetime + properties: + group: + description: Group to map volume access to Default is no group type: string - type: array - pool: - description: 'The rados pool name. Default is rbd. More info: - https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - readOnly: - description: 'ReadOnly here will force the ReadOnly setting - in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: boolean - secretRef: - description: 'SecretRef is name of the authentication secret - for RBDUser. If provided overrides keyring. Default is nil. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - user: - description: 'The rados user name. Default is admin. More - info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - required: - - image - - monitors - type: object - scaleIO: - description: ScaleIO represents a ScaleIO persistent volume attached - and mounted on Kubernetes nodes. - properties: - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Default is "xfs". - type: string - gateway: - description: The host address of the ScaleIO API Gateway. - type: string - protectionDomain: - description: The name of the ScaleIO Protection Domain for - the configured storage. - type: string - readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: SecretRef references to the secret for ScaleIO - user and other sensitive information. If this is not provided, - Login operation will fail. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + readOnly: + description: ReadOnly here will force the Quobyte volume to + be mounted with read-only permissions. Defaults to false. + type: boolean + registry: + description: Registry represents a single or multiple Quobyte + Registry services specified as a string as host:port pair + (multiple entries are separated with commas) which acts + as the central registry for volumes + type: string + tenant: + description: Tenant owning the given Quobyte volume in the + Backend Used with dynamically provisioned Quobyte volumes, + value is set by the plugin + type: string + user: + description: User to map volume access to Defaults to serivceaccount + user + type: string + volume: + description: Volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'RBD represents a Rados Block Device mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from compromising + the machine' + type: string + image: + description: 'The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'Keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: type: string - type: object - sslEnabled: - description: Flag to enable/disable SSL communication with - Gateway, default false - type: boolean - storageMode: - description: Indicates whether the storage for a volume should - be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. - type: string - storagePool: - description: The ScaleIO Storage Pool associated with the - protection domain. - type: string - system: - description: The name of the storage system as configured - in ScaleIO. - type: string - volumeName: - description: The name of a volume already created in the ScaleIO - system that is associated with this volume source. - type: string - required: - - gateway - - secretRef - - system - type: object - secret: - description: 'Secret represents a secret that should populate - this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' - properties: - defaultMode: - description: 'Optional: mode bits to use on created files - by default. Must be a value between 0 and 0777. Defaults - to 0644. Directories within the path are not affected by - this setting. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - items: - description: If unspecified, each key-value pair in the Data - field of the referenced Secret will be projected into the - volume as a file whose name is the key and content is the - value. If specified, the listed keys will be projected into - the specified paths, and unlisted keys will not be present. - If a key is specified which is not present in the Secret, - the volume setup will error unless it is marked optional. - Paths must be relative and may not contain the '..' path - or start with '..'. - items: - description: Maps a string key to a path within a volume. + type: array + pool: + description: 'The rados pool name. Default is rbd. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'SecretRef is name of the authentication secret + for RBDUser. If provided overrides keyring. Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' properties: - key: - description: The key to project. + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string - mode: - description: 'Optional: mode bits to use on this file, - must be a value between 0 and 0777. If not specified, - the volume defaultMode will be used. This might be - in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode - bits set.' - format: int32 - type: integer - path: - description: The relative path of the file to map the - key to. May not be an absolute path. May not contain - the path element '..'. May not start with the string - '..'. + type: object + user: + description: 'The rados user name. Default is admin. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: ScaleIO represents a ScaleIO persistent volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: The host address of the ScaleIO API Gateway. + type: string + protectionDomain: + description: The name of the ScaleIO Protection Domain for + the configured storage. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef references to the secret for ScaleIO + user and other sensitive information. If this is not provided, + Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string - required: - - key - - path type: object - type: array - optional: - description: Specify whether the Secret or its keys must be - defined - type: boolean - secretName: - description: 'Name of the secret in the pod''s namespace to - use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' - type: string - type: object - storageos: - description: StorageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. - properties: - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: SecretRef specifies the secret to use for obtaining - the StorageOS API credentials. If not specified, default - values will be attempted. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - volumeName: - description: VolumeName is the human-readable name of the - StorageOS volume. Volume names are only unique within a - namespace. - type: string - volumeNamespace: - description: VolumeNamespace specifies the scope of the volume - within StorageOS. If no namespace is specified then the - Pod's namespace will be used. This allows the Kubernetes - name scoping to be mirrored within StorageOS for tighter - integration. Set VolumeName to any name to override the - default behaviour. Set to "default" if you are not using - namespaces within StorageOS. Namespaces that do not pre-exist - within StorageOS will be created. - type: string - type: object - vsphereVolume: - description: VsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine - properties: - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - storagePolicyID: - description: Storage Policy Based Management (SPBM) profile - ID associated with the StoragePolicyName. - type: string - storagePolicyName: - description: Storage Policy Based Management (SPBM) profile - name. - type: string - volumePath: - description: Path that identifies vSphere volume vmdk - type: string - required: - - volumePath - type: object - required: - - name - type: object - type: array - type: object - status: - description: 'Most recent observed status of the Alertmanager cluster. Read-only. - Not included when requesting from the apiserver, only from the Prometheus - Operator API itself. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - availableReplicas: - description: Total number of available pods (ready for at least minReadySeconds) - targeted by this Alertmanager cluster. - format: int32 - type: integer - paused: - description: Represents whether any actions on the underlaying managed - objects are being performed. Only delete actions will be performed. - type: boolean - replicas: - description: Total number of non-terminated pods targeted by this Alertmanager - cluster (their labels match the selector). - format: int32 - type: integer - unavailableReplicas: - description: Total number of unavailable pods targeted by this Alertmanager - cluster. - format: int32 - type: integer - updatedReplicas: - description: Total number of non-terminated pods targeted by this Alertmanager - cluster that have the desired version spec. - format: int32 - type: integer - required: - - availableReplicas - - paused - - replicas - - unavailableReplicas - - updatedReplicas - type: object - required: - - spec - type: object - version: v1 - versions: - - name: v1 + sslEnabled: + description: Flag to enable/disable SSL communication with + Gateway, default false + type: boolean + storageMode: + description: Indicates whether the storage for a volume should + be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. + type: string + storagePool: + description: The ScaleIO Storage Pool associated with the + protection domain. + type: string + system: + description: The name of the storage system as configured + in ScaleIO. + type: string + volumeName: + description: The name of a volume already created in the ScaleIO + system that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'Secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the Data + field of the referenced Secret will be projected into the + volume as a file whose name is the key and content is the + value. If specified, the listed keys will be projected into + the specified paths, and unlisted keys will not be present. + If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. + Paths must be relative and may not contain the '..' path + or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might be + in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode + bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map the + key to. May not be an absolute path. May not contain + the path element '..'. May not start with the string + '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: Specify whether the Secret or its keys must be + defined + type: boolean + secretName: + description: 'Name of the secret in the pod''s namespace to + use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: StorageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef specifies the secret to use for obtaining + the StorageOS API credentials. If not specified, default + values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: VolumeName is the human-readable name of the + StorageOS volume. Volume names are only unique within a + namespace. + type: string + volumeNamespace: + description: VolumeNamespace specifies the scope of the volume + within StorageOS. If no namespace is specified then the + Pod's namespace will be used. This allows the Kubernetes + name scoping to be mirrored within StorageOS for tighter + integration. Set VolumeName to any name to override the + default behaviour. Set to "default" if you are not using + namespaces within StorageOS. Namespaces that do not pre-exist + within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: VsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: Storage Policy Based Management (SPBM) profile + ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: Storage Policy Based Management (SPBM) profile + name. + type: string + volumePath: + description: Path that identifies vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + status: + description: 'Most recent observed status of the Alertmanager cluster. Read-only. + Not included when requesting from the apiserver, only from the Prometheus + Operator API itself. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + availableReplicas: + description: Total number of available pods (ready for at least minReadySeconds) + targeted by this Alertmanager cluster. + format: int32 + type: integer + paused: + description: Represents whether any actions on the underlaying managed + objects are being performed. Only delete actions will be performed. + type: boolean + replicas: + description: Total number of non-terminated pods targeted by this Alertmanager + cluster (their labels match the selector). + format: int32 + type: integer + unavailableReplicas: + description: Total number of unavailable pods targeted by this Alertmanager + cluster. + format: int32 + type: integer + updatedReplicas: + description: Total number of non-terminated pods targeted by this Alertmanager + cluster that have the desired version spec. + format: int32 + type: integer + required: + - availableReplicas + - paused + - replicas + - unavailableReplicas + - updatedReplicas + type: object + required: + - spec + type: object + additionalPrinterColumns: + - jsonPath: .spec.version + description: The version of Alertmanager + name: Version + type: string + - jsonPath: .spec.replicas + description: The desired replicas number of Alertmanagers + name: Replicas + type: integer + - jsonPath: .metadata.creationTimestamp + name: Age + type: date served: true storage: true + subresources: {} diff --git a/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-podmonitor.yaml b/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-podmonitor.yaml index ab2af4e7cb..71cb4b2130 100755 --- a/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-podmonitor.yaml +++ b/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-podmonitor.yaml @@ -1,10 +1,9 @@ # https://raw.githubusercontent.com/coreos/prometheus-operator/release-0.38/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.2.4 - helm.sh/hook: crd-install creationTimestamp: null name: podmonitors.monitoring.coreos.com spec: @@ -16,246 +15,246 @@ spec: singular: podmonitor preserveUnknownFields: false scope: Namespaced - validation: - openAPIV3Schema: - description: PodMonitor defines monitoring for a set of pods. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Specification of desired Pod selection for target discovery - by Prometheus. - properties: - jobLabel: - description: The label to use to retrieve the job name from. - type: string - namespaceSelector: - description: Selector to select which namespaces the Endpoints objects - are discovered from. - properties: - any: - description: Boolean describing whether all namespaces are selected - in contrast to a list restricting them. - type: boolean - matchNames: - description: List of namespace names. - items: - type: string - type: array - type: object - podMetricsEndpoints: - description: A list of endpoints allowed as part of this PodMonitor. - items: - description: PodMetricsEndpoint defines a scrapeable endpoint of a - Kubernetes Pod serving Prometheus metrics. + versions: + - name: v1 + schema: + openAPIV3Schema: + description: PodMonitor defines monitoring for a set of pods. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of desired Pod selection for target discovery + by Prometheus. + properties: + jobLabel: + description: The label to use to retrieve the job name from. + type: string + namespaceSelector: + description: Selector to select which namespaces the Endpoints objects + are discovered from. properties: - honorLabels: - description: HonorLabels chooses the metric's labels on collisions - with target labels. + any: + description: Boolean describing whether all namespaces are selected + in contrast to a list restricting them. type: boolean - honorTimestamps: - description: HonorTimestamps controls whether Prometheus respects - the timestamps present in scraped data. - type: boolean - interval: - description: Interval at which metrics should be scraped - type: string - metricRelabelings: - description: MetricRelabelConfigs to apply to samples before ingestion. + matchNames: + description: List of namespace names. items: - description: 'RelabelConfig allows dynamic rewriting of the - label set, being applied to samples before ingestion. It defines - ``-section of Prometheus configuration. - More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' - properties: - action: - description: Action to perform based on regex matching. - Default is 'replace' - type: string - modulus: - description: Modulus to take of the hash of the source label - values. - format: int64 - type: integer - regex: - description: Regular expression against which the extracted - value is matched. Default is '(.*)' - type: string - replacement: - description: Replacement value against which a regex replace - is performed if the regular expression matches. Regex - capture groups are available. Default is '$1' - type: string - separator: - description: Separator placed between concatenated source - label values. default is ';'. - type: string - sourceLabels: - description: The source labels select values from existing - labels. Their content is concatenated using the configured - separator and matched against the configured regular expression - for the replace, keep, and drop actions. - items: + type: string + type: array + type: object + podMetricsEndpoints: + description: A list of endpoints allowed as part of this PodMonitor. + items: + description: PodMetricsEndpoint defines a scrapeable endpoint of a + Kubernetes Pod serving Prometheus metrics. + properties: + honorLabels: + description: HonorLabels chooses the metric's labels on collisions + with target labels. + type: boolean + honorTimestamps: + description: HonorTimestamps controls whether Prometheus respects + the timestamps present in scraped data. + type: boolean + interval: + description: Interval at which metrics should be scraped + type: string + metricRelabelings: + description: MetricRelabelConfigs to apply to samples before ingestion. + items: + description: 'RelabelConfig allows dynamic rewriting of the + label set, being applied to samples before ingestion. It defines + ``-section of Prometheus configuration. + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' + properties: + action: + description: Action to perform based on regex matching. + Default is 'replace' type: string - type: array - targetLabel: - description: Label to which the resulting value is written - in a replace action. It is mandatory for replace actions. - Regex capture groups are available. + modulus: + description: Modulus to take of the hash of the source label + values. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. Default is '(.*)' + type: string + replacement: + description: Replacement value against which a regex replace + is performed if the regular expression matches. Regex + capture groups are available. Default is '$1' + type: string + separator: + description: Separator placed between concatenated source + label values. default is ';'. + type: string + sourceLabels: + description: The source labels select values from existing + labels. Their content is concatenated using the configured + separator and matched against the configured regular expression + for the replace, keep, and drop actions. + items: + type: string + type: array + targetLabel: + description: Label to which the resulting value is written + in a replace action. It is mandatory for replace actions. + Regex capture groups are available. + type: string + type: object + type: array + params: + additionalProperties: + items: type: string + type: array + description: Optional HTTP URL parameters type: object - type: array - params: - additionalProperties: + path: + description: HTTP path to scrape for metrics. + type: string + port: + description: Name of the pod port this endpoint refers to. Mutually + exclusive with targetPort. + type: string + proxyUrl: + description: ProxyURL eg http://proxyserver:2195 Directs scrapes + to proxy through this endpoint. + type: string + relabelings: + description: 'RelabelConfigs to apply to samples before ingestion. + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' items: - type: string + description: 'RelabelConfig allows dynamic rewriting of the + label set, being applied to samples before ingestion. It defines + ``-section of Prometheus configuration. + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' + properties: + action: + description: Action to perform based on regex matching. + Default is 'replace' + type: string + modulus: + description: Modulus to take of the hash of the source label + values. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. Default is '(.*)' + type: string + replacement: + description: Replacement value against which a regex replace + is performed if the regular expression matches. Regex + capture groups are available. Default is '$1' + type: string + separator: + description: Separator placed between concatenated source + label values. default is ';'. + type: string + sourceLabels: + description: The source labels select values from existing + labels. Their content is concatenated using the configured + separator and matched against the configured regular expression + for the replace, keep, and drop actions. + items: + type: string + type: array + targetLabel: + description: Label to which the resulting value is written + in a replace action. It is mandatory for replace actions. + Regex capture groups are available. + type: string + type: object type: array - description: Optional HTTP URL parameters - type: object - path: - description: HTTP path to scrape for metrics. - type: string - port: - description: Name of the pod port this endpoint refers to. Mutually - exclusive with targetPort. - type: string - proxyUrl: - description: ProxyURL eg http://proxyserver:2195 Directs scrapes - to proxy through this endpoint. - type: string - relabelings: - description: 'RelabelConfigs to apply to samples before ingestion. - More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' + scheme: + description: HTTP scheme to use for scraping. + type: string + scrapeTimeout: + description: Timeout after which the scrape is ended + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: 'Deprecated: Use ''port'' instead.' + x-kubernetes-int-or-string: true + type: object + type: array + podTargetLabels: + description: PodTargetLabels transfers labels on the Kubernetes Pod + onto the target. + items: + type: string + type: array + sampleLimit: + description: SampleLimit defines per-scrape limit on number of scraped + samples that will be accepted. + format: int64 + type: integer + selector: + description: Selector to select Pod objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. items: - description: 'RelabelConfig allows dynamic rewriting of the - label set, being applied to samples before ingestion. It defines - ``-section of Prometheus configuration. - More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' + description: A label selector requirement is a selector that contains + values, a key, and an operator that relates the key and values. properties: - action: - description: Action to perform based on regex matching. - Default is 'replace' - type: string - modulus: - description: Modulus to take of the hash of the source label - values. - format: int64 - type: integer - regex: - description: Regular expression against which the extracted - value is matched. Default is '(.*)' - type: string - replacement: - description: Replacement value against which a regex replace - is performed if the regular expression matches. Regex - capture groups are available. Default is '$1' + key: + description: key is the label key that the selector applies + to. type: string - separator: - description: Separator placed between concatenated source - label values. default is ';'. + operator: + description: operator represents a key's relationship to a + set of values. Valid operators are In, NotIn, Exists and + DoesNotExist. type: string - sourceLabels: - description: The source labels select values from existing - labels. Their content is concatenated using the configured - separator and matched against the configured regular expression - for the replace, keep, and drop actions. + values: + description: values is an array of string values. If the operator + is In or NotIn, the values array must be non-empty. If the + operator is Exists or DoesNotExist, the values array must + be empty. This array is replaced during a strategic merge + patch. items: type: string type: array - targetLabel: - description: Label to which the resulting value is written - in a replace action. It is mandatory for replace actions. - Regex capture groups are available. - type: string + required: + - key + - operator type: object type: array - scheme: - description: HTTP scheme to use for scraping. - type: string - scrapeTimeout: - description: Timeout after which the scrape is ended - type: string - targetPort: - anyOf: - - type: integer - - type: string - description: 'Deprecated: Use ''port'' instead.' - x-kubernetes-int-or-string: true - type: object - type: array - podTargetLabels: - description: PodTargetLabels transfers labels on the Kubernetes Pod - onto the target. - items: - type: string - type: array - sampleLimit: - description: SampleLimit defines per-scrape limit on number of scraped - samples that will be accepted. - format: int64 - type: integer - selector: - description: Selector to select Pod objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains - values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies - to. - type: string - operator: - description: operator represents a key's relationship to a - set of values. Valid operators are In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator - is In or NotIn, the values array must be non-empty. If the - operator is Exists or DoesNotExist, the values array must - be empty. This array is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator is + "In", and the values array contains only "value". The requirements + are ANDed. type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator is - "In", and the values array contains only "value". The requirements - are ANDed. - type: object - type: object - required: - - podMetricsEndpoints - - selector - type: object - required: - - spec - type: object - version: v1 - versions: - - name: v1 + type: object + required: + - podMetricsEndpoints + - selector + type: object + required: + - spec + type: object served: true storage: true + diff --git a/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-prometheus.yaml b/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-prometheus.yaml index 3699396f1c..669325a996 100755 --- a/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-prometheus.yaml +++ b/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-prometheus.yaml @@ -1,25 +1,12 @@ # https://raw.githubusercontent.com/coreos/prometheus-operator/release-0.38/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.2.4 - helm.sh/hook: crd-install creationTimestamp: null name: prometheuses.monitoring.coreos.com spec: - additionalPrinterColumns: - - JSONPath: .spec.version - description: The version of Prometheus - name: Version - type: string - - JSONPath: .spec.replicas - description: The desired replicas number of Prometheuses - name: Replicas - type: integer - - JSONPath: .metadata.creationTimestamp - name: Age - type: date group: monitoring.coreos.com names: kind: Prometheus @@ -28,2278 +15,3469 @@ spec: singular: prometheus preserveUnknownFields: false scope: Namespaced - subresources: {} - validation: - openAPIV3Schema: - description: Prometheus defines a Prometheus deployment. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the Prometheus cluster. - More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - additionalAlertManagerConfigs: - description: 'AdditionalAlertManagerConfigs allows specifying a key - of a Secret containing additional Prometheus AlertManager configurations. - AlertManager configurations specified are appended to the configurations - generated by the Prometheus Operator. Job configurations specified - must have the form as specified in the official Prometheus documentation: - https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alertmanager_config. - As AlertManager configs are appended, the user is responsible to make - sure it is valid. Note that using this feature may expose the possibility - to break upgrades of Prometheus. It is advised to review Prometheus - release notes to ensure that no incompatible AlertManager configs - are going to break Prometheus after the upgrade.' - properties: - key: - description: The key of the secret to select from. Must be a valid - secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - additionalAlertRelabelConfigs: - description: 'AdditionalAlertRelabelConfigs allows specifying a key - of a Secret containing additional Prometheus alert relabel configurations. - Alert relabel configurations specified are appended to the configurations - generated by the Prometheus Operator. Alert relabel configurations - specified must have the form as specified in the official Prometheus - documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs. - As alert relabel configs are appended, the user is responsible to - make sure it is valid. Note that using this feature may expose the - possibility to break upgrades of Prometheus. It is advised to review - Prometheus release notes to ensure that no incompatible alert relabel - configs are going to break Prometheus after the upgrade.' - properties: - key: - description: The key of the secret to select from. Must be a valid - secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - additionalScrapeConfigs: - description: 'AdditionalScrapeConfigs allows specifying a key of a Secret - containing additional Prometheus scrape configurations. Scrape configurations - specified are appended to the configurations generated by the Prometheus - Operator. Job configurations specified must have the form as specified - in the official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. - As scrape configs are appended, the user is responsible to make sure - it is valid. Note that using this feature may expose the possibility - to break upgrades of Prometheus. It is advised to review Prometheus - release notes to ensure that no incompatible scrape configs are going - to break Prometheus after the upgrade.' - properties: - key: - description: The key of the secret to select from. Must be a valid - secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - affinity: - description: If specified, the pod's scheduling constraints. - properties: - nodeAffinity: - description: Describes node affinity scheduling rules for the pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes - that satisfy the affinity expressions specified by this field, - but it may choose a node that violates one or more of the - expressions. The node that is most preferred is the one with - the greatest sum of weights, i.e. for each node that meets - all of the scheduling requirements (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by iterating through - the elements of this field and adding "weight" to the sum - if the node matches the corresponding matchExpressions; the - node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches all - objects with implicit weight 0 (i.e. it's a no-op). A null - preferred scheduling term matches no objects (i.e. is also - a no-op). + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Prometheus defines a Prometheus deployment. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'Specification of the desired behavior of the Prometheus cluster. + More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + additionalAlertManagerConfigs: + description: 'AdditionalAlertManagerConfigs allows specifying a key + of a Secret containing additional Prometheus AlertManager configurations. + AlertManager configurations specified are appended to the configurations + generated by the Prometheus Operator. Job configurations specified + must have the form as specified in the official Prometheus documentation: + https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alertmanager_config. + As AlertManager configs are appended, the user is responsible to make + sure it is valid. Note that using this feature may expose the possibility + to break upgrades of Prometheus. It is advised to review Prometheus + release notes to ensure that no incompatible AlertManager configs + are going to break Prometheus after the upgrade.' + properties: + key: + description: The key of the secret to select from. Must be a valid + secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + additionalAlertRelabelConfigs: + description: 'AdditionalAlertRelabelConfigs allows specifying a key + of a Secret containing additional Prometheus alert relabel configurations. + Alert relabel configurations specified are appended to the configurations + generated by the Prometheus Operator. Alert relabel configurations + specified must have the form as specified in the official Prometheus + documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs. + As alert relabel configs are appended, the user is responsible to + make sure it is valid. Note that using this feature may expose the + possibility to break upgrades of Prometheus. It is advised to review + Prometheus release notes to ensure that no incompatible alert relabel + configs are going to break Prometheus after the upgrade.' + properties: + key: + description: The key of the secret to select from. Must be a valid + secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + additionalScrapeConfigs: + description: 'AdditionalScrapeConfigs allows specifying a key of a Secret + containing additional Prometheus scrape configurations. Scrape configurations + specified are appended to the configurations generated by the Prometheus + Operator. Job configurations specified must have the form as specified + in the official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. + As scrape configs are appended, the user is responsible to make sure + it is valid. Note that using this feature may expose the possibility + to break upgrades of Prometheus. It is advised to review Prometheus + release notes to ensure that no incompatible scrape configs are going + to break Prometheus after the upgrade.' + properties: + key: + description: The key of the secret to select from. Must be a valid + secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + affinity: + description: If specified, the pod's scheduling constraints. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes + that satisfy the affinity expressions specified by this field, + but it may choose a node that violates one or more of the + expressions. The node that is most preferred is the one with + the greatest sum of weights, i.e. for each node that meets + all of the scheduling requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating through + the elements of this field and adding "weight" to the sum + if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all + objects with implicit weight 0 (i.e. it's a no-op). A null + preferred scheduling term matches no objects (i.e. is also + a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. properties: - preference: - description: A node selector term, associated with the - corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: A node selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. + nodeSelectorTerms: + description: Required. A list of node selector terms. The + terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes + that satisfy the affinity expressions specified by this field, + but it may choose a node that violates one or more of the + expressions. The node that is most preferred is the one with + the greatest sum of weights, i.e. for each node that meets + all of the scheduling requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating through + the elements of this field and adding "weight" to the sum + if the node has pods which matches the corresponding podAffinityTerm; + the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the - operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be - empty. If the operator is Gt or Lt, the values - array must have a single element, which will - be interpreted as an integer. This array is - replaced during a strategic merge patch. + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey matches + that of any node on which any of the selected pods + is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may not + try to eventually evict the pod from its node. When there + are multiple elements, the lists of nodes corresponding to + each podAffinityTerm are intersected, i.e. all terms must + be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) that + this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of pods + is running + properties: + labelSelector: + description: A label query over a set of resources, in + this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any + node on which any of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some other + pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes + that satisfy the anti-affinity expressions specified by this + field, but it may choose a node that violates one or more + of the expressions. The node that is most preferred is the + one with the greatest sum of weights, i.e. for each node that + meets all of the scheduling requirements (resource request, + requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field + and adding "weight" to the sum if the node has pods which + matches the corresponding podAffinityTerm; the node(s) with + the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object type: array - required: - - key - - operator + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object type: object - type: array - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: A node selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey matches + that of any node on which any of the selected pods + is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will not + be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms must + be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) that + this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of pods + is running + properties: + labelSelector: + description: A label query over a set of resources, in + this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any + node on which any of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + alerting: + description: Define details regarding alerting. + properties: + alertmanagers: + description: AlertmanagerEndpoints Prometheus should fire alerts + against. + items: + description: AlertmanagerEndpoints defines a selection of a single + Endpoints object containing alertmanager IPs to fire alerts + against. + properties: + apiVersion: + description: Version of the Alertmanager API that Prometheus + uses to send alerts. It can be "v1" or "v2". + type: string + bearerTokenFile: + description: BearerTokenFile to read from filesystem to use + when authenticating to Alertmanager. + type: string + name: + description: Name of Endpoints object in Namespace. + type: string + namespace: + description: Namespace of Endpoints object. + type: string + pathPrefix: + description: Prefix for the HTTP path alerts are pushed to. + type: string + port: + anyOf: + - type: integer + - type: string + description: Port the Alertmanager API is exposed on. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use when firing alerts. + type: string + tlsConfig: + description: TLS Config to use for alertmanager connection. + properties: + ca: + description: Stuct containing the CA cert to use for the + targets. + properties: + configMap: + description: ConfigMap containing data to use for + the targets. properties: key: - description: The label key that the selector - applies to. + description: The key to select. type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists, DoesNotExist. Gt, and Lt. + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' type: string - values: - description: An array of string values. If the - operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be - empty. If the operator is Gt or Lt, the values - array must have a single element, which will - be interpreted as an integer. This array is - replaced during a strategic merge patch. - items: - type: string - type: array + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean required: - key - - operator type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding - nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to an update), the system may or may not try to - eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. The - terms are ORed. - items: - description: A null or empty node selector term matches - no objects. The requirements of them are ANDed. The - TopologySelectorTerm type implements a subset of the - NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: A node selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. + secret: + description: Secret containing data to use for the + targets. properties: key: - description: The label key that the selector - applies to. + description: The key of the secret to select from. Must + be a valid secret key. type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists, DoesNotExist. Gt, and Lt. + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' type: string - values: - description: An array of string values. If the - operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be - empty. If the operator is Gt or Lt, the values - array must have a single element, which will - be interpreted as an integer. This array is - replaced during a strategic merge patch. - items: - type: string - type: array + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean required: - key - - operator type: object - type: array - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: A node selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. + type: object + caFile: + description: Path to the CA cert in the Prometheus container + to use for the targets. + type: string + cert: + description: Struct containing the client cert file for + the targets. + properties: + configMap: + description: ConfigMap containing data to use for + the targets. properties: key: - description: The label key that the selector - applies to. + description: The key to select. type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists, DoesNotExist. Gt, and Lt. + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' type: string - values: - description: An array of string values. If the - operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be - empty. If the operator is Gt or Lt, the values - array must have a single element, which will - be interpreted as an integer. This array is - replaced during a strategic merge patch. - items: - type: string - type: array + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean required: - key - - operator type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: Describes pod affinity scheduling rules (e.g. co-locate - this pod in the same node, zone, etc. as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes - that satisfy the affinity expressions specified by this field, - but it may choose a node that violates one or more of the - expressions. The node that is most preferred is the one with - the greatest sum of weights, i.e. for each node that meets - all of the scheduling requirements (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by iterating through - the elements of this field and adding "weight" to the sum - if the node has pods which matches the corresponding podAffinityTerm; - the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: A label selector requirement is - a selector that contains values, a key, and - an operator that relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If - the operator is Exists or DoesNotExist, - the values array must be empty. This array - is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". - The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces - the labelSelector applies to (matches against); - null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey matches - that of any node on which any of the selected pods - is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to a pod label update), the system may or may not - try to eventually evict the pod from its node. When there - are multiple elements, the lists of nodes corresponding to - each podAffinityTerm are intersected, i.e. all terms must - be satisfied. - items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) that - this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of pods - is running - properties: - labelSelector: - description: A label query over a set of resources, in - this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. + secret: + description: Secret containing data to use for the + targets. properties: key: - description: key is the label key that the selector - applies to. + description: The key of the secret to select from. Must + be a valid secret key. type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values - array must be non-empty. If the operator is - Exists or DoesNotExist, the values array must - be empty. This array is replaced during a - strategic merge patch. - items: - type: string - type: array + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean required: - key - - operator type: object - type: array - matchLabels: - additionalProperties: + type: object + certFile: + description: Path to the client cert file in the Prometheus + container for the targets. + type: string + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keyFile: + description: Path to the client key file in the Prometheus + container for the targets. + type: string + keySecret: + description: Secret containing the client key file for + the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces the - labelSelector applies to (matches against); null or - empty list means "this pod's namespace" - items: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of any - node on which any of the selected pods is running. Empty - topologyKey is not allowed. + type: object + required: + - name + - namespace + - port + type: object + type: array + required: + - alertmanagers + type: object + apiserverConfig: + description: APIServerConfig allows specifying a host and auth methods + to access apiserver. If left empty, Prometheus is assumed to run inside + of the cluster and will discover API servers automatically and use + the pod's CA certificate and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount/. + properties: + basicAuth: + description: BasicAuth allow an endpoint to authenticate over basic + authentication + properties: + password: + description: The secret in the service monitor namespace that + contains the password for authentication. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean required: - - topologyKey + - key type: object - type: array - type: object - podAntiAffinity: - description: Describes pod anti-affinity scheduling rules (e.g. - avoid putting this pod in the same node, zone, etc. as some other - pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes - that satisfy the anti-affinity expressions specified by this - field, but it may choose a node that violates one or more - of the expressions. The node that is most preferred is the - one with the greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource request, - requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field - and adding "weight" to the sum if the node has pods which - matches the corresponding podAffinityTerm; the node(s) with - the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred node(s) + username: + description: The secret in the service monitor namespace that + contains the username for authentication. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + bearerToken: + description: Bearer token for accessing apiserver. + type: string + bearerTokenFile: + description: File to read bearer token for accessing apiserver. + type: string + host: + description: Host of apiserver. A valid string consisting of a hostname + or IP followed by an optional port number + type: string + tlsConfig: + description: TLS Config to use for accessing apiserver. + properties: + ca: + description: Stuct containing the CA cert to use for the targets. properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. + configMap: + description: ConfigMap containing data to use for the targets. properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: A label selector requirement is - a selector that contains values, a key, and - an operator that relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If - the operator is Exists or DoesNotExist, - the values array must be empty. This array - is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". - The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces - the labelSelector applies to (matches against); - null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey matches - that of any node on which any of the selected pods - is running. Empty topologyKey is not allowed. + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean required: - - topologyKey + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key type: object - weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by - this field are not met at scheduling time, the pod will not - be scheduled onto the node. If the anti-affinity requirements - specified by this field cease to be met at some point during - pod execution (e.g. due to a pod label update), the system - may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. all terms must - be satisfied. - items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) that - this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of pods - is running + caFile: + description: Path to the CA cert in the Prometheus container + to use for the targets. + type: string + cert: + description: Struct containing the client cert file for the + targets. properties: - labelSelector: - description: A label query over a set of resources, in - this case pods. + configMap: + description: ConfigMap containing data to use for the targets. properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values - array must be non-empty. If the operator is - Exists or DoesNotExist, the values array must - be empty. This array is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. - type: object + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key type: object - namespaces: - description: namespaces specifies which namespaces the - labelSelector applies to (matches against); null or - empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of any - node on which any of the selected pods is running. Empty - topologyKey is not allowed. - type: string - required: - - topologyKey type: object - type: array - type: object - type: object - alerting: - description: Define details regarding alerting. - properties: - alertmanagers: - description: AlertmanagerEndpoints Prometheus should fire alerts - against. - items: - description: AlertmanagerEndpoints defines a selection of a single - Endpoints object containing alertmanager IPs to fire alerts - against. - properties: - apiVersion: - description: Version of the Alertmanager API that Prometheus - uses to send alerts. It can be "v1" or "v2". - type: string - bearerTokenFile: - description: BearerTokenFile to read from filesystem to use - when authenticating to Alertmanager. + certFile: + description: Path to the client cert file in the Prometheus + container for the targets. type: string - name: - description: Name of Endpoints object in Namespace. + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keyFile: + description: Path to the client key file in the Prometheus container + for the targets. type: string - namespace: - description: Namespace of Endpoints object. + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. type: string - pathPrefix: - description: Prefix for the HTTP path alerts are pushed to. + type: object + required: + - host + type: object + arbitraryFSAccessThroughSMs: + description: ArbitraryFSAccessThroughSMs configures whether configuration + based on a service monitor can access arbitrary files on the file + system of the Prometheus container e.g. bearer token files. + properties: + deny: + type: boolean + type: object + baseImage: + description: Base image to use for a Prometheus deployment. + type: string + configMaps: + description: ConfigMaps is a list of ConfigMaps in the same namespace + as the Prometheus object, which shall be mounted into the Prometheus + Pods. The ConfigMaps are mounted into /etc/prometheus/configmaps/. + items: + type: string + type: array + containers: + description: 'Containers allows injecting additional containers or modifying + operator generated containers. This can be used to allow adding an + authentication proxy to a Prometheus pod or to change the behavior + of an operator generated container. Containers described here modify + an operator generated container if they share the same name and modifications + are done via a strategic merge patch. The current container names + are: `prometheus`, `prometheus-config-reloader`, `rules-configmap-reloader`, + and `thanos-sidecar`. Overriding containers is entirely outside the + scope of what the maintainers will support and by doing so, you accept + that this behaviour may break at any time without notice.' + items: + description: A single application container that you want to run within + a pod. + properties: + args: + description: 'Arguments to the entrypoint. The docker image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will be + unchanged. The $(VAR_NAME) syntax can be escaped with a double + $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + regardless of whether the variable exists or not. Cannot be + updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: type: string - port: - anyOf: - - type: integer - - type: string - description: Port the Alertmanager API is exposed on. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use when firing alerts. + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The + docker image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. The $(VAR_NAME) syntax + can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references + will never be expanded, regardless of whether the variable exists + or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: type: string - tlsConfig: - description: TLS Config to use for alertmanager connection. + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. properties: - ca: - description: Stuct containing the CA cert to use for the - targets. + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a + variable cannot be resolved, the reference in the input + string will be unchanged. The $(VAR_NAME) syntax can be + escaped with a double $$, ie: $$(VAR_NAME). Escaped references + will never be expanded, regardless of whether the variable + exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. properties: - configMap: - description: ConfigMap containing data to use for - the targets. + configMapKeyRef: + description: Selects a key of a ConfigMap. properties: key: description: The key to select. type: string name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the ConfigMap or - its key must be defined + description: Specify whether the ConfigMap or its + key must be defined type: boolean required: - key type: object - secret: - description: Secret containing data to use for the - targets. + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, metadata.labels, metadata.annotations, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". type: string - name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + fieldPath: + description: Path of the field to select in the + specified API version. type: string - optional: - description: Specify whether the Secret or its - key must be defined - type: boolean required: - - key + - fieldPath type: object - type: object - caFile: - description: Path to the CA cert in the Prometheus container - to use for the targets. - type: string - cert: - description: Struct containing the client cert file for - the targets. - properties: - configMap: - description: ConfigMap containing data to use for - the targets. + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' properties: - key: - description: The key to select. + containerName: + description: 'Container name: required for volumes, + optional for env vars' type: string - name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + divisor: + description: Specifies the output format of the + exposed resources, defaults to "1" + type: string + resource: + description: 'Required: resource to select' type: string - optional: - description: Specify whether the ConfigMap or - its key must be defined - type: boolean required: - - key + - resource type: object - secret: - description: Secret containing data to use for the - targets. + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace properties: key: description: The key of the secret to select from. Must be a valid secret key. type: string name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its - key must be defined + description: Specify whether the Secret or its key + must be defined type: boolean required: - key type: object type: object - certFile: - description: Path to the client cert file in the Prometheus - container for the targets. - type: string - insecureSkipVerify: - description: Disable target certificate validation. - type: boolean - keyFile: - description: Path to the client key file in the Prometheus - container for the targets. - type: string - keySecret: - description: Secret containing the client key file for - the targets. + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be a + C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key will + take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of + ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key - must be defined + description: Specify whether the ConfigMap must be defined type: boolean - required: - - key type: object - serverName: - description: Used to verify the hostname for the targets. + prefix: + description: An optional identifier to prepend to each key + in the ConfigMap. Must be a C_IDENTIFIER. type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object type: object - required: - - name - - namespace - - port - type: object - type: array - required: - - alertmanagers - type: object - apiserverConfig: - description: APIServerConfig allows specifying a host and auth methods - to access apiserver. If left empty, Prometheus is assumed to run inside - of the cluster and will discover API servers automatically and use - the pod's CA certificate and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount/. - properties: - basicAuth: - description: BasicAuth allow an endpoint to authenticate over basic - authentication - properties: - password: - description: The secret in the service monitor namespace that - contains the password for authentication. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - username: - description: The secret in the service monitor namespace that - contains the username for authentication. + type: array + image: + description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take in + response to container lifecycle events. Cannot be updated. properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should + be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: implement + a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The reason for termination is passed to + the handler. The Pod''s termination grace period countdown + begins before the PreStop hooked is executed. Regardless + of the outcome of the handler, the container will eventually + terminate within the Pod''s termination grace period. Other + management of the container blocks until the hook completes + or until the termination grace period is reached. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should + be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: implement + a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object type: object - type: object - bearerToken: - description: Bearer token for accessing apiserver. - type: string - bearerTokenFile: - description: File to read bearer token for accessing apiserver. - type: string - host: - description: Host of apiserver. A valid string consisting of a hostname - or IP followed by an optional port number - type: string - tlsConfig: - description: TLS Config to use for accessing apiserver. - properties: - ca: - description: Stuct containing the CA cert to use for the targets. + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: - configMap: - description: ConfigMap containing data to use for the targets. + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. properties: - key: - description: The key to select. + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. type: string - optional: - description: Specify whether the ConfigMap or its key - must be defined - type: boolean required: - - key + - port type: object - secret: - description: Secret containing data to use for the targets. + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true required: - - key + - port type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer type: object - caFile: - description: Path to the CA cert in the Prometheus container - to use for the targets. + name: + description: Name of the container specified as a DNS_LABEL. Each + container in a pod must have a unique name (DNS_LABEL). Cannot + be updated. type: string - cert: - description: Struct containing the client cert file for the - targets. + ports: + description: List of ports to expose from the container. Exposing + a port here gives the system additional information about the + network connections a container uses, but is primarily informational. + Not specifying a port here DOES NOT prevent that port from being + exposed. Any port which is listening on the default "0.0.0.0" + address inside a container will be accessible from the network. + Cannot be updated. + items: + description: ContainerPort represents a network port in a single + container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, + this must be a valid port number, 0 < x < 65536. If HostNetwork + is specified, this must match ContainerPort. Most containers + do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod must + have a unique name. Name for the port that can be referred + to by services. + type: string + protocol: + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + description: 'Periodic probe of container service readiness. Container + will be removed from service endpoints if the probe fails. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: - configMap: - description: ConfigMap containing data to use for the targets. + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. properties: - key: - description: The key to select. + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. type: string - optional: - description: Specify whether the ConfigMap or its key - must be defined - type: boolean required: - - key + - port type: object - secret: - description: Secret containing data to use for the targets. + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true required: - - key + - port type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer type: object - certFile: - description: Path to the client cert file in the Prometheus - container for the targets. - type: string - insecureSkipVerify: - description: Disable target certificate validation. - type: boolean - keyFile: - description: Path to the client key file in the Prometheus container - for the targets. - type: string - keySecret: - description: Secret containing the client key file for the targets. + resources: + description: 'Compute Resources required by this container. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key + limits: + additionalProperties: + type: string + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + type: string + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object type: object - serverName: - description: Used to verify the hostname for the targets. - type: string - type: object - required: - - host - type: object - arbitraryFSAccessThroughSMs: - description: ArbitraryFSAccessThroughSMs configures whether configuration - based on a service monitor can access arbitrary files on the file - system of the Prometheus container e.g. bearer token files. - properties: - deny: - type: boolean - type: object - baseImage: - description: Base image to use for a Prometheus deployment. - type: string - configMaps: - description: ConfigMaps is a list of ConfigMaps in the same namespace - as the Prometheus object, which shall be mounted into the Prometheus - Pods. The ConfigMaps are mounted into /etc/prometheus/configmaps/. - items: - type: string - type: array - containers: - description: 'Containers allows injecting additional containers or modifying - operator generated containers. This can be used to allow adding an - authentication proxy to a Prometheus pod or to change the behavior - of an operator generated container. Containers described here modify - an operator generated container if they share the same name and modifications - are done via a strategic merge patch. The current container names - are: `prometheus`, `prometheus-config-reloader`, `rules-configmap-reloader`, - and `thanos-sidecar`. Overriding containers is entirely outside the - scope of what the maintainers will support and by doing so, you accept - that this behaviour may break at any time without notice.' - items: - description: A single application container that you want to run within - a pod. - properties: - args: - description: 'Arguments to the entrypoint. The docker image''s - CMD is used if this is not provided. Variable references $(VAR_NAME) - are expanded using the container''s environment. If a variable - cannot be resolved, the reference in the input string will be - unchanged. The $(VAR_NAME) syntax can be escaped with a double - $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - regardless of whether the variable exists or not. Cannot be - updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' - items: - type: string - type: array - command: - description: 'Entrypoint array. Not executed within a shell. The - docker image''s ENTRYPOINT is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container''s - environment. If a variable cannot be resolved, the reference - in the input string will be unchanged. The $(VAR_NAME) syntax - can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references - will never be expanded, regardless of whether the variable exists - or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' - items: - type: string - type: array - env: - description: List of environment variables to set in the container. - Cannot be updated. - items: - description: EnvVar represents an environment variable present - in a Container. + securityContext: + description: 'Security options the pod should run with. More info: + https://kubernetes.io/docs/concepts/policy/security-context/ + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' properties: - name: - description: Name of the environment variable. Must be a - C_IDENTIFIER. - type: string - value: - description: 'Variable references $(VAR_NAME) are expanded - using the previous defined environment variables in the - container and any service environment variables. If a - variable cannot be resolved, the reference in the input - string will be unchanged. The $(VAR_NAME) syntax can be - escaped with a double $$, ie: $$(VAR_NAME). Escaped references - will never be expanded, regardless of whether the variable - exists or not. Defaults to "".' + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a + process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the + container runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in + privileged containers are essentially equivalent to root + on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use + for the containers. The default is DefaultProcMount which + uses the container runtime defaults for readonly paths and + masked paths. This requires the ProcMountType feature flag + to be enabled. type: string - valueFrom: - description: Source for the environment variable's value. - Cannot be used if value is not empty. + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. + Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail + to start the container if it does. If unset or false, no + such validation will be performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata if + unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the ConfigMap or its - key must be defined - type: boolean - required: - - key - type: object - fieldRef: - description: 'Selects a field of the pod: supports metadata.name, - metadata.namespace, metadata.labels, metadata.annotations, - spec.nodeName, spec.serviceAccountName, status.hostIP, - status.podIP, status.podIPs.' - properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the - specified API version. - type: string - required: - - fieldPath - type: object - resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, requests.memory - and requests.ephemeral-storage) are currently supported.' - properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' - type: string - divisor: - description: Specifies the output format of the - exposed resources, defaults to "1" - type: string - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - secretKeyRef: - description: Selects a key of a secret in the pod's - namespace - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. This field is beta-level and may be + disabled with the WindowsRunAsUserName feature flag. + type: string type: object - required: - - name type: object - type: array - envFrom: - description: List of sources to populate environment variables - in the container. The keys defined within a source must be a - C_IDENTIFIER. All invalid keys will be reported as an event - when the container is starting. When a key exists in multiple - sources, the value associated with the last source will take - precedence. Values defined by an Env with a duplicate key will - take precedence. Cannot be updated. - items: - description: EnvFromSource represents the source of a set of - ConfigMaps + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod will + be restarted, just as if the livenessProbe failed. This can + be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. This + cannot be updated. This is an alpha feature enabled by the StartupProbe + feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: - configMapRef: - description: The ConfigMap to select from + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. type: string - optional: - description: Specify whether the ConfigMap must be defined - type: boolean + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port type: object - prefix: - description: An optional identifier to prepend to each key - in the ConfigMap. Must be a C_IDENTIFIER. - type: string - secretRef: - description: The Secret to select from + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string - optional: - description: Specify whether the Secret must be defined - type: boolean + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer type: object - type: array - image: - description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher level config management - to default or override container images in workload controllers - like Deployments and StatefulSets.' - type: string - imagePullPolicy: - description: 'Image pull policy. One of Always, Never, IfNotPresent. - Defaults to Always if :latest tag is specified, or IfNotPresent - otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' - type: string - lifecycle: - description: Actions that the management system should take in - response to container lifecycle events. Cannot be updated. - properties: - postStart: - description: 'PostStart is called immediately after a container - is created. If the handler fails, the container is terminated - and restarted according to its restart policy. Other management - of the container blocks until the hook completes. More info: - https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + stdin: + description: Whether this container should allocate a buffer for + stdin in the container runtime. If this is not set, reads from + stdin in the container will always result in EOF. Default is + false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin + channel after it has been opened by a single attach. When stdin + is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container + start, is empty until the first client attaches to stdin, and + then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container + is restarted. If this flag is false, a container processes that + reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s + termination message will be written is mounted into the container''s + filesystem. Message written is intended to be brief final status, + such as an assertion failure message. Will be truncated by the + node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. Defaults to /dev/termination-log. + Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. + File will use the contents of terminationMessagePath to populate + the container status message on both success and failure. FallbackToLogsOnError + will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever + is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. This is a beta feature. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. properties: - exec: - description: One and only one of the following should - be specified. Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute - inside the container, the working directory for - the command is root ('/') in the container's filesystem. - The command is simply exec'd, it is not run inside - a shell, so traditional shell instructions ('|', - etc) won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is treated - as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to - the pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: 'TCPSocket specifies an action involving - a TCP port. TCP hooks not yet supported TODO: implement - a realistic TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name type: object - preStop: - description: 'PreStop is called immediately before a container - is terminated due to an API request or management event - such as liveness/startup probe failure, preemption, resource - contention, etc. The handler is not called if the container - crashes or exits. The reason for termination is passed to - the handler. The Pod''s termination grace period countdown - begins before the PreStop hooked is executed. Regardless - of the outcome of the handler, the container will eventually - terminate within the Pod''s termination grace period. Other - management of the container blocks until the hook completes - or until the termination grace period is reached. More info: - https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within + a container. properties: - exec: - description: One and only one of the following should - be specified. Exec specifies the action to take. + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other way + around. When not set, MountPropagationNone is used. This + field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might be + configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + disableCompaction: + description: Disable prometheus compaction. + type: boolean + enableAdminAPI: + description: 'Enable access to prometheus web admin API. Defaults to + the value of `false`. WARNING: Enabling the admin APIs enables mutating + endpoints, to delete data, shutdown Prometheus, and more. Enabling + this should be done with care and the user is advised to add additional + authentication authorization via a proxy to ensure only clients authorized + to perform these actions can do so. For more information see https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis' + type: boolean + enforcedNamespaceLabel: + description: EnforcedNamespaceLabel enforces adding a namespace label + of origin for each alert and metric that is user created. The label + value will always be the namespace of the object that is being created. + type: string + evaluationInterval: + description: Interval between consecutive evaluations. + type: string + externalLabels: + additionalProperties: + type: string + description: The labels to add to any time series or alerts when communicating + with external systems (federation, remote storage, Alertmanager). + type: object + externalUrl: + description: The external URL the Prometheus instances will be available + under. This is necessary to generate correct URLs. This is necessary + if Prometheus is not served from root of a DNS name. + type: string + ignoreNamespaceSelectors: + description: IgnoreNamespaceSelectors if set to true will ignore NamespaceSelector + settings from the podmonitor and servicemonitor configs, and they + will only discover endpoints within their current namespace. Defaults + to false. + type: boolean + image: + description: Image if specified has precedence over baseImage, tag and + sha combinations. Specifying the version is still necessary to ensure + the Prometheus Operator knows what version of Prometheus is being + configured. + type: string + imagePullSecrets: + description: An optional list of references to secrets in the same namespace + to use for pulling prometheus and alertmanager images from registries + see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod + items: + description: LocalObjectReference contains enough information to let + you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + type: array + initContainers: + description: 'InitContainers allows adding initContainers to the pod + definition. Those can be used to e.g. fetch secrets for injection + into the Prometheus configuration from external sources. Any errors + during the execution of an initContainer will lead to a restart of + the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + Using initContainers for any use case other then secret fetching is + entirely outside the scope of what the maintainers will support and + by doing so, you accept that this behaviour may break at any time + without notice.' + items: + description: A single application container that you want to run within + a pod. + properties: + args: + description: 'Arguments to the entrypoint. The docker image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will be + unchanged. The $(VAR_NAME) syntax can be escaped with a double + $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + regardless of whether the variable exists or not. Cannot be + updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The + docker image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. The $(VAR_NAME) syntax + can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references + will never be expanded, regardless of whether the variable exists + or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a + variable cannot be resolved, the reference in the input + string will be unchanged. The $(VAR_NAME) syntax can be + escaped with a double $$, ie: $$(VAR_NAME). Escaped references + will never be expanded, regardless of whether the variable + exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. properties: - command: - description: Command is the command line to execute - inside the container, the working directory for - the command is root ('/') in the container's filesystem. - The command is simply exec'd, it is not run inside - a shell, so traditional shell instructions ('|', - etc) won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is treated - as live/healthy and non-zero is unhealthy. - items: - type: string - type: array + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, metadata.labels, metadata.annotations, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + description: Specifies the output format of the + exposed resources, defaults to "1" + type: string + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object type: object - httpGet: - description: HTTPGet specifies the http request to perform. + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be a + C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key will + take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of + ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from properties: - host: - description: Host name to connect to, defaults to - the pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string - required: - - port + optional: + description: Specify whether the ConfigMap must be defined + type: boolean type: object - tcpSocket: - description: 'TCPSocket specifies an action involving - a TCP port. TCP hooks not yet supported TODO: implement - a realistic TCP lifecycle hook' + prefix: + description: An optional identifier to prepend to each key + in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port + optional: + description: Specify whether the Secret must be defined + type: boolean type: object type: object - type: object - livenessProbe: - description: 'Periodic probe of container liveness. Container - will be restarted if the probe fails. Cannot be updated. More - info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes + type: array + image: + description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take in + response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should + be specified. Exec specifies the action to take. properties: - name: - description: The header field name + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. type: string - value: - description: The header field value + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - - name - - value + - port type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - name: - description: Name of the container specified as a DNS_LABEL. Each - container in a pod must have a unique name (DNS_LABEL). Cannot - be updated. - type: string - ports: - description: List of ports to expose from the container. Exposing - a port here gives the system additional information about the - network connections a container uses, but is primarily informational. - Not specifying a port here DOES NOT prevent that port from being - exposed. Any port which is listening on the default "0.0.0.0" - address inside a container will be accessible from the network. - Cannot be updated. - items: - description: ContainerPort represents a network port in a single - container. + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: implement + a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The reason for termination is passed to + the handler. The Pod''s termination grace period countdown + begins before the PreStop hooked is executed. Regardless + of the outcome of the handler, the container will eventually + terminate within the Pod''s termination grace period. Other + management of the container blocks until the hook completes + or until the termination grace period is reached. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should + be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: implement + a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: - containerPort: - description: Number of port to expose on the pod's IP address. - This must be a valid port number, 0 < x < 65536. + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. format: int32 type: integer - hostIP: - description: What host IP to bind the external port to. - type: string - hostPort: - description: Number of port to expose on the host. If specified, - this must be a valid port number, 0 < x < 65536. If HostNetwork - is specified, this must match ContainerPort. Most containers - do not need this. + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' format: int32 type: integer - name: - description: If specified, this must be an IANA_SVC_NAME - and unique within the pod. Each named port in a pod must - have a unique name. Name for the port that can be referred - to by services. - type: string - protocol: - description: Protocol for port. Must be UDP, TCP, or SCTP. - Defaults to "TCP". - type: string - required: - - containerPort - type: object - type: array - readinessProbe: - description: 'Periodic probe of container service readiness. Container - will be removed from service endpoints if the probe fails. Cannot - be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. Each + container in a pod must have a unique name (DNS_LABEL). Cannot + be updated. + type: string + ports: + description: List of ports to expose from the container. Exposing + a port here gives the system additional information about the + network connections a container uses, but is primarily informational. + Not specifying a port here DOES NOT prevent that port from being + exposed. Any port which is listening on the default "0.0.0.0" + address inside a container will be accessible from the network. + Cannot be updated. + items: + description: ContainerPort represents a network port in a single + container. properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. + containerPort: + description: Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. + hostPort: + description: Number of port to expose on the host. If specified, + this must be a valid port number, 0 < x < 65536. If HostNetwork + is specified, this must match ContainerPort. Most containers + do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod must + have a unique name. Name for the port that can be referred + to by services. type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' + protocol: + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true required: - - port - type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - resources: - description: 'Compute Resources required by this container. Cannot - be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - properties: - limits: - additionalProperties: - type: string - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + - containerPort type: object - requests: - additionalProperties: - type: string - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - securityContext: - description: 'Security options the pod should run with. More info: - https://kubernetes.io/docs/concepts/policy/security-context/ - More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' - properties: - allowPrivilegeEscalation: - description: 'AllowPrivilegeEscalation controls whether a - process can gain more privileges than its parent process. - This bool directly controls if the no_new_privs flag will - be set on the container process. AllowPrivilegeEscalation - is true always when the container is: 1) run as Privileged - 2) has CAP_SYS_ADMIN' - type: boolean - capabilities: - description: The capabilities to add/drop when running containers. - Defaults to the default set of capabilities granted by the - container runtime. - properties: - add: - description: Added capabilities - items: - description: Capability represent POSIX capabilities - type + type: array + readinessProbe: + description: 'Periodic probe of container service readiness. Container + will be removed from service endpoints if the probe fails. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. type: string - type: array - drop: - description: Removed capabilities - items: - description: Capability represent POSIX capabilities - type + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. type: string - type: array - type: object - privileged: - description: Run container in privileged mode. Processes in - privileged containers are essentially equivalent to root - on the host. Defaults to false. - type: boolean - procMount: - description: procMount denotes the type of proc mount to use - for the containers. The default is DefaultProcMount which - uses the container runtime defaults for readonly paths and - masked paths. This requires the ProcMountType feature flag - to be enabled. - type: string - readOnlyRootFilesystem: - description: Whether this container has a read-only root filesystem. - Default is false. - type: boolean - runAsGroup: - description: The GID to run the entrypoint of the container - process. Uses runtime default if unset. May also be set - in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. - format: int64 - type: integer - runAsNonRoot: - description: Indicates that the container must run as a non-root - user. If true, the Kubelet will validate the image at runtime - to ensure that it does not run as UID 0 (root) and fail - to start the container if it does. If unset or false, no - such validation will be performed. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: The UID to run the entrypoint of the container - process. Defaults to user specified in image metadata if - unspecified. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. - format: int64 - type: integer - seLinuxOptions: - description: The SELinux context to be applied to the container. - If unspecified, the container runtime will allocate a random - SELinux context for each container. May also be set in - PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. - properties: - level: - description: Level is SELinux level label that applies - to the container. - type: string - role: - description: Role is a SELinux role label that applies - to the container. - type: string - type: - description: Type is a SELinux type label that applies - to the container. + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + properties: + limits: + additionalProperties: type: string - user: - description: User is a SELinux user label that applies - to the container. + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: type: string - type: object - windowsOptions: - description: The Windows specific settings applied to all - containers. If unspecified, the options from the PodSecurityContext - will be used. If set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + securityContext: + description: 'Security options the pod should run with. More info: + https://kubernetes.io/docs/concepts/policy/security-context/ + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a + process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the + container runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in + privileged containers are essentially equivalent to root + on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use + for the containers. The default is DefaultProcMount which + uses the container runtime defaults for readonly paths and + masked paths. This requires the ProcMountType feature flag + to be enabled. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. + Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail + to start the container if it does. If unset or false, no + such validation will be performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata if + unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. This field is beta-level and may be + disabled with the WindowsRunAsUserName feature flag. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod will + be restarted, just as if the livenessProbe failed. This can + be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. This + cannot be updated. This is an alpha feature enabled by the StartupProbe + feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for + stdin in the container runtime. If this is not set, reads from + stdin in the container will always result in EOF. Default is + false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin + channel after it has been opened by a single attach. When stdin + is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container + start, is empty until the first client attaches to stdin, and + then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container + is restarted. If this flag is false, a container processes that + reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s + termination message will be written is mounted into the container''s + filesystem. Message written is intended to be brief final status, + such as an assertion failure message. Will be truncated by the + node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. Defaults to /dev/termination-log. + Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. + File will use the contents of terminationMessagePath to populate + the container status message on both success and failure. FallbackToLogsOnError + will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever + is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. This is a beta feature. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. properties: - gmsaCredentialSpec: - description: GMSACredentialSpec is where the GMSA admission - webhook (https://github.com/kubernetes-sigs/windows-gmsa) - inlines the contents of the GMSA credential spec named - by the GMSACredentialSpecName field. This field is alpha-level - and is only honored by servers that enable the WindowsGMSA - feature flag. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name of the - GMSA credential spec to use. This field is alpha-level - and is only honored by servers that enable the WindowsGMSA - feature flag. + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. type: string - runAsUserName: - description: The UserName in Windows to run the entrypoint - of the container process. Defaults to the user specified - in image metadata if unspecified. May also be set in - PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. This field is beta-level and may be - disabled with the WindowsRunAsUserName feature flag. + name: + description: name must match the name of a persistentVolumeClaim + in the pod type: string + required: + - devicePath + - name type: object - type: object - startupProbe: - description: 'StartupProbe indicates that the Pod has successfully - initialized. If specified, no other probes are executed until - this completes successfully. If this probe fails, the Pod will - be restarted, just as if the livenessProbe failed. This can - be used to provide different probe parameters at the beginning - of a Pod''s lifecycle, when it might take a long time to load - data or warm a cache, than during steady-state operation. This - cannot be updated. This is an alpha feature enabled by the StartupProbe - feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within + a container. properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other way + around. When not set, MountPropagationNone is used. This + field is beta in 1.10. type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. + name: + description: This must match the Name of a Volume. type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true required: - - port + - mountPath + - name type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might be + configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + listenLocal: + description: ListenLocal makes the Prometheus server listen on loopback, + so that it does not bind against the Pod IP. + type: boolean + logFormat: + description: Log format for Prometheus to be configured with. + type: string + logLevel: + description: Log level for Prometheus to be configured with. + type: string + nodeSelector: + additionalProperties: + type: string + description: Define which Nodes the Pods are scheduled on. + type: object + overrideHonorLabels: + description: OverrideHonorLabels if set to true overrides all user configured + honor_labels. If HonorLabels is set in ServiceMonitor or PodMonitor + to true, this overrides honor_labels to false. + type: boolean + overrideHonorTimestamps: + description: OverrideHonorTimestamps allows to globally enforce honoring + timestamps in all scrape configs. + type: boolean + paused: + description: When a Prometheus deployment is paused, no actions except + for deletion will be performed on the underlying objects. + type: boolean + podMetadata: + description: PodMetadata configures Labels and Annotations which are + propagated to the prometheus pods. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored + with a resource that may be set by external tools to store and + retrieve arbitrary metadata. They are not queryable and should + be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' type: object - stdin: - description: Whether this container should allocate a buffer for - stdin in the container runtime. If this is not set, reads from - stdin in the container will always result in EOF. Default is - false. - type: boolean - stdinOnce: - description: Whether the container runtime should close the stdin - channel after it has been opened by a single attach. When stdin - is true the stdin stream will remain open across multiple attach - sessions. If stdinOnce is set to true, stdin is opened on container - start, is empty until the first client attaches to stdin, and - then remains open and accepts data until the client disconnects, - at which time stdin is closed and remains closed until the container - is restarted. If this flag is false, a container processes that - reads from stdin will never receive an EOF. Default is false - type: boolean - terminationMessagePath: - description: 'Optional: Path at which the file to which the container''s - termination message will be written is mounted into the container''s - filesystem. Message written is intended to be brief final status, - such as an assertion failure message. Will be truncated by the - node if greater than 4096 bytes. The total message length across - all containers will be limited to 12kb. Defaults to /dev/termination-log. - Cannot be updated.' - type: string - terminationMessagePolicy: - description: Indicate how the termination message should be populated. - File will use the contents of terminationMessagePath to populate - the container status message on both success and failure. FallbackToLogsOnError - will use the last chunk of container log output if the termination - message file is empty and the container exited with an error. - The log output is limited to 2048 bytes or 80 lines, whichever - is smaller. Defaults to File. Cannot be updated. - type: string - tty: - description: Whether this container should allocate a TTY for - itself, also requires 'stdin' to be true. Default is false. - type: boolean - volumeDevices: - description: volumeDevices is the list of block devices to be - used by the container. This is a beta feature. + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to + organize and categorize (scope and select) objects. May match + selectors of replication controllers and services. More info: + http://kubernetes.io/docs/user-guide/labels' + type: object + type: object + podMonitorNamespaceSelector: + description: Namespaces to be selected for PodMonitor discovery. If + nil, only check own namespace. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. items: - description: volumeDevice describes a mapping of a raw block - device within a container. + description: A label selector requirement is a selector that contains + values, a key, and an operator that relates the key and values. properties: - devicePath: - description: devicePath is the path inside of the container - that the device will be mapped to. + key: + description: key is the label key that the selector applies + to. type: string - name: - description: name must match the name of a persistentVolumeClaim - in the pod + operator: + description: operator represents a key's relationship to a + set of values. Valid operators are In, NotIn, Exists and + DoesNotExist. type: string + values: + description: values is an array of string values. If the operator + is In or NotIn, the values array must be non-empty. If the + operator is Exists or DoesNotExist, the values array must + be empty. This array is replaced during a strategic merge + patch. + items: + type: string + type: array required: - - devicePath - - name + - key + - operator type: object type: array - volumeMounts: - description: Pod volumes to mount into the container's filesystem. - Cannot be updated. + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator is + "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + podMonitorSelector: + description: '*Experimental* PodMonitors to be selected for target discovery.' + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. items: - description: VolumeMount describes a mounting of a Volume within - a container. + description: A label selector requirement is a selector that contains + values, a key, and an operator that relates the key and values. properties: - mountPath: - description: Path within the container at which the volume - should be mounted. Must not contain ':'. - type: string - mountPropagation: - description: mountPropagation determines how mounts are - propagated from the host to container and the other way - around. When not set, MountPropagationNone is used. This - field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: Mounted read-only if true, read-write otherwise - (false or unspecified). Defaults to false. - type: boolean - subPath: - description: Path within the volume from which the container's - volume should be mounted. Defaults to "" (volume's root). + key: + description: key is the label key that the selector applies + to. type: string - subPathExpr: - description: Expanded path within the volume from which - the container's volume should be mounted. Behaves similarly - to SubPath but environment variable references $(VAR_NAME) - are expanded using the container's environment. Defaults - to "" (volume's root). SubPathExpr and SubPath are mutually - exclusive. + operator: + description: operator represents a key's relationship to a + set of values. Valid operators are In, NotIn, Exists and + DoesNotExist. type: string + values: + description: values is an array of string values. If the operator + is In or NotIn, the values array must be non-empty. If the + operator is Exists or DoesNotExist, the values array must + be empty. This array is replaced during a strategic merge + patch. + items: + type: string + type: array required: - - mountPath - - name + - key + - operator type: object type: array - workingDir: - description: Container's working directory. If not specified, - the container runtime's default will be used, which might be - configured in the container image. Cannot be updated. - type: string - required: - - name + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator is + "In", and the values array contains only "value". The requirements + are ANDed. + type: object type: object - type: array - disableCompaction: - description: Disable prometheus compaction. - type: boolean - enableAdminAPI: - description: 'Enable access to prometheus web admin API. Defaults to - the value of `false`. WARNING: Enabling the admin APIs enables mutating - endpoints, to delete data, shutdown Prometheus, and more. Enabling - this should be done with care and the user is advised to add additional - authentication authorization via a proxy to ensure only clients authorized - to perform these actions can do so. For more information see https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis' - type: boolean - enforcedNamespaceLabel: - description: EnforcedNamespaceLabel enforces adding a namespace label - of origin for each alert and metric that is user created. The label - value will always be the namespace of the object that is being created. - type: string - evaluationInterval: - description: Interval between consecutive evaluations. - type: string - externalLabels: - additionalProperties: + portName: + description: Port name used for the pods and governing service. This + defaults to web + type: string + priorityClassName: + description: Priority class assigned to the Pods type: string - description: The labels to add to any time series or alerts when communicating - with external systems (federation, remote storage, Alertmanager). - type: object - externalUrl: - description: The external URL the Prometheus instances will be available - under. This is necessary to generate correct URLs. This is necessary - if Prometheus is not served from root of a DNS name. - type: string - ignoreNamespaceSelectors: - description: IgnoreNamespaceSelectors if set to true will ignore NamespaceSelector - settings from the podmonitor and servicemonitor configs, and they - will only discover endpoints within their current namespace. Defaults - to false. - type: boolean - image: - description: Image if specified has precedence over baseImage, tag and - sha combinations. Specifying the version is still necessary to ensure - the Prometheus Operator knows what version of Prometheus is being - configured. - type: string - imagePullSecrets: - description: An optional list of references to secrets in the same namespace - to use for pulling prometheus and alertmanager images from registries - see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod - items: - description: LocalObjectReference contains enough information to let - you locate the referenced object inside the same namespace. + prometheusExternalLabelName: + description: Name of Prometheus external label used to denote Prometheus + instance name. Defaults to the value of `prometheus`. External label + will _not_ be added when value is set to empty string (`""`). + type: string + query: + description: QuerySpec defines the query command line flags when starting + Prometheus. properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + lookbackDelta: + description: The delta difference allowed for retrieving metrics + during expression evaluations. + type: string + maxConcurrency: + description: Number of concurrent queries that can be run at once. + format: int32 + type: integer + maxSamples: + description: Maximum number of samples a single query can load into + memory. Note that queries will fail if they would load more samples + than this into memory, so this also limits the number of samples + a query can return. + format: int32 + type: integer + timeout: + description: Maximum time a query may take before being aborted. type: string type: object - type: array - initContainers: - description: 'InitContainers allows adding initContainers to the pod - definition. Those can be used to e.g. fetch secrets for injection - into the Prometheus configuration from external sources. Any errors - during the execution of an initContainer will lead to a restart of - the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - Using initContainers for any use case other then secret fetching is - entirely outside the scope of what the maintainers will support and - by doing so, you accept that this behaviour may break at any time - without notice.' - items: - description: A single application container that you want to run within - a pod. - properties: - args: - description: 'Arguments to the entrypoint. The docker image''s - CMD is used if this is not provided. Variable references $(VAR_NAME) - are expanded using the container''s environment. If a variable - cannot be resolved, the reference in the input string will be - unchanged. The $(VAR_NAME) syntax can be escaped with a double - $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - regardless of whether the variable exists or not. Cannot be - updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' - items: - type: string - type: array - command: - description: 'Entrypoint array. Not executed within a shell. The - docker image''s ENTRYPOINT is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container''s - environment. If a variable cannot be resolved, the reference - in the input string will be unchanged. The $(VAR_NAME) syntax - can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references - will never be expanded, regardless of whether the variable exists - or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' - items: - type: string - type: array - env: - description: List of environment variables to set in the container. - Cannot be updated. - items: - description: EnvVar represents an environment variable present - in a Container. + remoteRead: + description: If specified, the remote_read spec. This is an experimental + feature, it may change in any upcoming release in a breaking way. + items: + description: RemoteReadSpec defines the remote_read configuration + for prometheus. + properties: + basicAuth: + description: BasicAuth for the URL. properties: - name: - description: Name of the environment variable. Must be a - C_IDENTIFIER. - type: string - value: - description: 'Variable references $(VAR_NAME) are expanded - using the previous defined environment variables in the - container and any service environment variables. If a - variable cannot be resolved, the reference in the input - string will be unchanged. The $(VAR_NAME) syntax can be - escaped with a double $$, ie: $$(VAR_NAME). Escaped references - will never be expanded, regardless of whether the variable - exists or not. Defaults to "".' - type: string - valueFrom: - description: Source for the environment variable's value. - Cannot be used if value is not empty. + password: + description: The secret in the service monitor namespace that + contains the password for authentication. properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the ConfigMap or its - key must be defined - type: boolean - required: - - key - type: object - fieldRef: - description: 'Selects a field of the pod: supports metadata.name, - metadata.namespace, metadata.labels, metadata.annotations, - spec.nodeName, spec.serviceAccountName, status.hostIP, - status.podIP, status.podIPs.' - properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the - specified API version. - type: string - required: - - fieldPath - type: object - resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, requests.memory - and requests.ephemeral-storage) are currently supported.' - properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' - type: string - divisor: - description: Specifies the output format of the - exposed resources, defaults to "1" - type: string - resource: - description: 'Required: resource to select' - type: string - required: - - resource + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + username: + description: The secret in the service monitor namespace that + contains the username for authentication. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + bearerToken: + description: bearer token for remote read. + type: string + bearerTokenFile: + description: File to read bearer token for remote read. + type: string + proxyUrl: + description: Optional ProxyURL + type: string + readRecent: + description: Whether reads should be made for queries for time + ranges that the local storage should have complete data for. + type: boolean + remoteTimeout: + description: Timeout for requests to the remote read endpoint. + type: string + requiredMatchers: + additionalProperties: + type: string + description: An optional list of equality matchers which have + to be present in a selector to query the remote read endpoint. + type: object + tlsConfig: + description: TLS Config to use for remote read. + properties: + ca: + description: Stuct containing the CA cert to use for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key type: object - secretKeyRef: - description: Selects a key of a secret in the pod's - namespace + secret: + description: Secret containing data to use for the targets. properties: key: description: The key of the secret to select from. Must @@ -2318,1172 +3496,1006 @@ spec: - key type: object type: object - required: - - name + caFile: + description: Path to the CA cert in the Prometheus container + to use for the targets. + type: string + cert: + description: Struct containing the client cert file for the + targets. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + certFile: + description: Path to the client cert file in the Prometheus + container for the targets. + type: string + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keyFile: + description: Path to the client key file in the Prometheus + container for the targets. + type: string + keySecret: + description: Secret containing the client key file for the + targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. + type: string type: object - type: array - envFrom: - description: List of sources to populate environment variables - in the container. The keys defined within a source must be a - C_IDENTIFIER. All invalid keys will be reported as an event - when the container is starting. When a key exists in multiple - sources, the value associated with the last source will take - precedence. Values defined by an Env with a duplicate key will - take precedence. Cannot be updated. - items: - description: EnvFromSource represents the source of a set of - ConfigMaps + url: + description: The URL of the endpoint to send samples to. + type: string + required: + - url + type: object + type: array + remoteWrite: + description: If specified, the remote_write spec. This is an experimental + feature, it may change in any upcoming release in a breaking way. + items: + description: RemoteWriteSpec defines the remote_write configuration + for prometheus. + properties: + basicAuth: + description: BasicAuth for the URL. properties: - configMapRef: - description: The ConfigMap to select from + password: + description: The secret in the service monitor namespace that + contains the password for authentication. properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the ConfigMap must be defined + description: Specify whether the Secret or its key must + be defined type: boolean + required: + - key type: object - prefix: - description: An optional identifier to prepend to each key - in the ConfigMap. Must be a C_IDENTIFIER. - type: string - secretRef: - description: The Secret to select from + username: + description: The secret in the service monitor namespace that + contains the username for authentication. properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret must be defined + description: Specify whether the Secret or its key must + be defined type: boolean + required: + - key type: object type: object - type: array - image: - description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher level config management - to default or override container images in workload controllers - like Deployments and StatefulSets.' - type: string - imagePullPolicy: - description: 'Image pull policy. One of Always, Never, IfNotPresent. - Defaults to Always if :latest tag is specified, or IfNotPresent - otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' - type: string - lifecycle: - description: Actions that the management system should take in - response to container lifecycle events. Cannot be updated. - properties: - postStart: - description: 'PostStart is called immediately after a container - is created. If the handler fails, the container is terminated - and restarted according to its restart policy. Other management - of the container blocks until the hook completes. More info: - https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' - properties: - exec: - description: One and only one of the following should - be specified. Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute - inside the container, the working directory for - the command is root ('/') in the container's filesystem. - The command is simply exec'd, it is not run inside - a shell, so traditional shell instructions ('|', - etc) won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is treated - as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to - the pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: 'TCPSocket specifies an action involving - a TCP port. TCP hooks not yet supported TODO: implement - a realistic TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - description: 'PreStop is called immediately before a container - is terminated due to an API request or management event - such as liveness/startup probe failure, preemption, resource - contention, etc. The handler is not called if the container - crashes or exits. The reason for termination is passed to - the handler. The Pod''s termination grace period countdown - begins before the PreStop hooked is executed. Regardless - of the outcome of the handler, the container will eventually - terminate within the Pod''s termination grace period. Other - management of the container blocks until the hook completes - or until the termination grace period is reached. More info: - https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' - properties: - exec: - description: One and only one of the following should - be specified. Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute - inside the container, the working directory for - the command is root ('/') in the container's filesystem. - The command is simply exec'd, it is not run inside - a shell, so traditional shell instructions ('|', - etc) won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is treated - as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to - the pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: 'TCPSocket specifies an action involving - a TCP port. TCP hooks not yet supported TODO: implement - a realistic TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - description: 'Periodic probe of container liveness. Container - will be restarted if the probe fails. Cannot be updated. More - info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - name: - description: Name of the container specified as a DNS_LABEL. Each - container in a pod must have a unique name (DNS_LABEL). Cannot - be updated. - type: string - ports: - description: List of ports to expose from the container. Exposing - a port here gives the system additional information about the - network connections a container uses, but is primarily informational. - Not specifying a port here DOES NOT prevent that port from being - exposed. Any port which is listening on the default "0.0.0.0" - address inside a container will be accessible from the network. - Cannot be updated. - items: - description: ContainerPort represents a network port in a single - container. + bearerToken: + description: File to read bearer token for remote write. + type: string + bearerTokenFile: + description: File to read bearer token for remote write. + type: string + proxyUrl: + description: Optional ProxyURL + type: string + queueConfig: + description: QueueConfig allows tuning of the remote write queue + parameters. properties: - containerPort: - description: Number of port to expose on the pod's IP address. - This must be a valid port number, 0 < x < 65536. - format: int32 - type: integer - hostIP: - description: What host IP to bind the external port to. + batchSendDeadline: + description: BatchSendDeadline is the maximum time a sample + will wait in buffer. type: string - hostPort: - description: Number of port to expose on the host. If specified, - this must be a valid port number, 0 < x < 65536. If HostNetwork - is specified, this must match ContainerPort. Most containers - do not need this. - format: int32 + capacity: + description: Capacity is the number of samples to buffer per + shard before we start dropping them. type: integer - name: - description: If specified, this must be an IANA_SVC_NAME - and unique within the pod. Each named port in a pod must - have a unique name. Name for the port that can be referred - to by services. + maxBackoff: + description: MaxBackoff is the maximum retry delay. type: string - protocol: - description: Protocol for port. Must be UDP, TCP, or SCTP. - Defaults to "TCP". + maxRetries: + description: MaxRetries is the maximum number of times to + retry a batch on recoverable errors. + type: integer + maxSamplesPerSend: + description: MaxSamplesPerSend is the maximum number of samples + per send. + type: integer + maxShards: + description: MaxShards is the maximum number of shards, i.e. + amount of concurrency. + type: integer + minBackoff: + description: MinBackoff is the initial retry delay. Gets doubled + for every retry. type: string - required: - - containerPort + minShards: + description: MinShards is the minimum number of shards, i.e. + amount of concurrency. + type: integer type: object - type: array - readinessProbe: - description: 'Periodic probe of container service readiness. Container - will be removed from service endpoints if the probe fails. Cannot - be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes + remoteTimeout: + description: Timeout for requests to the remote write endpoint. + type: string + tlsConfig: + description: TLS Config to use for remote write. + properties: + ca: + description: Stuct containing the CA cert to use for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. properties: + key: + description: The key to select. + type: string name: - description: The header field name + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. type: string - value: - description: The header field value + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean required: - - name - - value + - key type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - resources: - description: 'Compute Resources required by this container. Cannot - be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - properties: - limits: - additionalProperties: + type: object + caFile: + description: Path to the CA cert in the Prometheus container + to use for the targets. type: string - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: + cert: + description: Struct containing the client cert file for the + targets. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + certFile: + description: Path to the client cert file in the Prometheus + container for the targets. type: string - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - securityContext: - description: 'Security options the pod should run with. More info: - https://kubernetes.io/docs/concepts/policy/security-context/ - More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' - properties: - allowPrivilegeEscalation: - description: 'AllowPrivilegeEscalation controls whether a - process can gain more privileges than its parent process. - This bool directly controls if the no_new_privs flag will - be set on the container process. AllowPrivilegeEscalation - is true always when the container is: 1) run as Privileged - 2) has CAP_SYS_ADMIN' - type: boolean - capabilities: - description: The capabilities to add/drop when running containers. - Defaults to the default set of capabilities granted by the - container runtime. - properties: - add: - description: Added capabilities - items: - description: Capability represent POSIX capabilities - type + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keyFile: + description: Path to the client key file in the Prometheus + container for the targets. + type: string + keySecret: + description: Secret containing the client key file for the + targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. type: string - type: array - drop: - description: Removed capabilities - items: - description: Capability represent POSIX capabilities - type + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string - type: array - type: object - privileged: - description: Run container in privileged mode. Processes in - privileged containers are essentially equivalent to root - on the host. Defaults to false. - type: boolean - procMount: - description: procMount denotes the type of proc mount to use - for the containers. The default is DefaultProcMount which - uses the container runtime defaults for readonly paths and - masked paths. This requires the ProcMountType feature flag - to be enabled. - type: string - readOnlyRootFilesystem: - description: Whether this container has a read-only root filesystem. - Default is false. - type: boolean - runAsGroup: - description: The GID to run the entrypoint of the container - process. Uses runtime default if unset. May also be set - in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. - format: int64 - type: integer - runAsNonRoot: - description: Indicates that the container must run as a non-root - user. If true, the Kubelet will validate the image at runtime - to ensure that it does not run as UID 0 (root) and fail - to start the container if it does. If unset or false, no - such validation will be performed. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: The UID to run the entrypoint of the container - process. Defaults to user specified in image metadata if - unspecified. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. - format: int64 - type: integer - seLinuxOptions: - description: The SELinux context to be applied to the container. - If unspecified, the container runtime will allocate a random - SELinux context for each container. May also be set in - PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + url: + description: The URL of the endpoint to send samples to. + type: string + writeRelabelConfigs: + description: The list of remote write relabel configurations. + items: + description: 'RelabelConfig allows dynamic rewriting of the + label set, being applied to samples before ingestion. It defines + ``-section of Prometheus configuration. + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' properties: - level: - description: Level is SELinux level label that applies - to the container. - type: string - role: - description: Role is a SELinux role label that applies - to the container. + action: + description: Action to perform based on regex matching. + Default is 'replace' type: string - type: - description: Type is a SELinux type label that applies - to the container. - type: string - user: - description: User is a SELinux user label that applies - to the container. - type: string - type: object - windowsOptions: - description: The Windows specific settings applied to all - containers. If unspecified, the options from the PodSecurityContext - will be used. If set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. - properties: - gmsaCredentialSpec: - description: GMSACredentialSpec is where the GMSA admission - webhook (https://github.com/kubernetes-sigs/windows-gmsa) - inlines the contents of the GMSA credential spec named - by the GMSACredentialSpecName field. This field is alpha-level - and is only honored by servers that enable the WindowsGMSA - feature flag. + modulus: + description: Modulus to take of the hash of the source label + values. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. Default is '(.*)' type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name of the - GMSA credential spec to use. This field is alpha-level - and is only honored by servers that enable the WindowsGMSA - feature flag. + replacement: + description: Replacement value against which a regex replace + is performed if the regular expression matches. Regex + capture groups are available. Default is '$1' type: string - runAsUserName: - description: The UserName in Windows to run the entrypoint - of the container process. Defaults to the user specified - in image metadata if unspecified. May also be set in - PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. This field is beta-level and may be - disabled with the WindowsRunAsUserName feature flag. + separator: + description: Separator placed between concatenated source + label values. default is ';'. type: string - type: object - type: object - startupProbe: - description: 'StartupProbe indicates that the Pod has successfully - initialized. If specified, no other probes are executed until - this completes successfully. If this probe fails, the Pod will - be restarted, just as if the livenessProbe failed. This can - be used to provide different probe parameters at the beginning - of a Pod''s lifecycle, when it might take a long time to load - data or warm a cache, than during steady-state operation. This - cannot be updated. This is an alpha feature enabled by the StartupProbe - feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. + sourceLabels: + description: The source labels select values from existing + labels. Their content is concatenated using the configured + separator and matched against the configured regular expression + for the replace, keep, and drop actions. items: type: string type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. + targetLabel: + description: Label to which the resulting value is written + in a replace action. It is mandatory for replace actions. + Regex capture groups are available. type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer + type: array + required: + - url + type: object + type: array + replicaExternalLabelName: + description: Name of Prometheus external label used to denote replica + name. Defaults to the value of `prometheus_replica`. External label + will _not_ be added when value is set to empty string (`""`). + type: string + replicas: + description: Number of instances to deploy for a Prometheus deployment. + format: int32 + type: integer + resources: + description: Define resources requests and limits for single Pods. + properties: + limits: + additionalProperties: + type: string + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' type: object - stdin: - description: Whether this container should allocate a buffer for - stdin in the container runtime. If this is not set, reads from - stdin in the container will always result in EOF. Default is - false. - type: boolean - stdinOnce: - description: Whether the container runtime should close the stdin - channel after it has been opened by a single attach. When stdin - is true the stdin stream will remain open across multiple attach - sessions. If stdinOnce is set to true, stdin is opened on container - start, is empty until the first client attaches to stdin, and - then remains open and accepts data until the client disconnects, - at which time stdin is closed and remains closed until the container - is restarted. If this flag is false, a container processes that - reads from stdin will never receive an EOF. Default is false - type: boolean - terminationMessagePath: - description: 'Optional: Path at which the file to which the container''s - termination message will be written is mounted into the container''s - filesystem. Message written is intended to be brief final status, - such as an assertion failure message. Will be truncated by the - node if greater than 4096 bytes. The total message length across - all containers will be limited to 12kb. Defaults to /dev/termination-log. - Cannot be updated.' - type: string - terminationMessagePolicy: - description: Indicate how the termination message should be populated. - File will use the contents of terminationMessagePath to populate - the container status message on both success and failure. FallbackToLogsOnError - will use the last chunk of container log output if the termination - message file is empty and the container exited with an error. - The log output is limited to 2048 bytes or 80 lines, whichever - is smaller. Defaults to File. Cannot be updated. - type: string - tty: - description: Whether this container should allocate a TTY for - itself, also requires 'stdin' to be true. Default is false. - type: boolean - volumeDevices: - description: volumeDevices is the list of block devices to be - used by the container. This is a beta feature. + requests: + additionalProperties: + type: string + description: 'Requests describes the minimum amount of compute resources + required. If Requests is omitted for a container, it defaults + to Limits if that is explicitly specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + retention: + description: Time duration Prometheus shall retain data for. Default + is '24h', and must match the regular expression `[0-9]+(ms|s|m|h|d|w|y)` + (milliseconds seconds minutes hours days weeks years). + type: string + retentionSize: + description: Maximum amount of disk space used by blocks. + type: string + routePrefix: + description: The route prefix Prometheus registers HTTP handlers for. + This is useful, if using ExternalURL and a proxy is rewriting HTTP + routes of a request, and the actual ExternalURL is still true, but + the server serves requests under a different route prefix. For example + for use with `kubectl proxy`. + type: string + ruleNamespaceSelector: + description: Namespaces to be selected for PrometheusRules discovery. + If unspecified, only the same namespace as the Prometheus object is + in is used. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. items: - description: volumeDevice describes a mapping of a raw block - device within a container. + description: A label selector requirement is a selector that contains + values, a key, and an operator that relates the key and values. properties: - devicePath: - description: devicePath is the path inside of the container - that the device will be mapped to. + key: + description: key is the label key that the selector applies + to. type: string - name: - description: name must match the name of a persistentVolumeClaim - in the pod + operator: + description: operator represents a key's relationship to a + set of values. Valid operators are In, NotIn, Exists and + DoesNotExist. type: string + values: + description: values is an array of string values. If the operator + is In or NotIn, the values array must be non-empty. If the + operator is Exists or DoesNotExist, the values array must + be empty. This array is replaced during a strategic merge + patch. + items: + type: string + type: array required: - - devicePath - - name + - key + - operator type: object type: array - volumeMounts: - description: Pod volumes to mount into the container's filesystem. - Cannot be updated. + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator is + "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + ruleSelector: + description: A selector to select which PrometheusRules to mount for + loading alerting rules from. Until (excluding) Prometheus Operator + v0.24.0 Prometheus Operator will migrate any legacy rule ConfigMaps + to PrometheusRule custom resources selected by RuleSelector. Make + sure it does not match any config maps that you do not want to be + migrated. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. items: - description: VolumeMount describes a mounting of a Volume within - a container. + description: A label selector requirement is a selector that contains + values, a key, and an operator that relates the key and values. properties: - mountPath: - description: Path within the container at which the volume - should be mounted. Must not contain ':'. - type: string - mountPropagation: - description: mountPropagation determines how mounts are - propagated from the host to container and the other way - around. When not set, MountPropagationNone is used. This - field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: Mounted read-only if true, read-write otherwise - (false or unspecified). Defaults to false. - type: boolean - subPath: - description: Path within the volume from which the container's - volume should be mounted. Defaults to "" (volume's root). + key: + description: key is the label key that the selector applies + to. type: string - subPathExpr: - description: Expanded path within the volume from which - the container's volume should be mounted. Behaves similarly - to SubPath but environment variable references $(VAR_NAME) - are expanded using the container's environment. Defaults - to "" (volume's root). SubPathExpr and SubPath are mutually - exclusive. + operator: + description: operator represents a key's relationship to a + set of values. Valid operators are In, NotIn, Exists and + DoesNotExist. type: string + values: + description: values is an array of string values. If the operator + is In or NotIn, the values array must be non-empty. If the + operator is Exists or DoesNotExist, the values array must + be empty. This array is replaced during a strategic merge + patch. + items: + type: string + type: array required: - - mountPath - - name + - key + - operator type: object type: array - workingDir: - description: Container's working directory. If not specified, - the container runtime's default will be used, which might be - configured in the container image. Cannot be updated. - type: string - required: - - name + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator is + "In", and the values array contains only "value". The requirements + are ANDed. + type: object type: object - type: array - listenLocal: - description: ListenLocal makes the Prometheus server listen on loopback, - so that it does not bind against the Pod IP. - type: boolean - logFormat: - description: Log format for Prometheus to be configured with. - type: string - logLevel: - description: Log level for Prometheus to be configured with. - type: string - nodeSelector: - additionalProperties: - type: string - description: Define which Nodes the Pods are scheduled on. - type: object - overrideHonorLabels: - description: OverrideHonorLabels if set to true overrides all user configured - honor_labels. If HonorLabels is set in ServiceMonitor or PodMonitor - to true, this overrides honor_labels to false. - type: boolean - overrideHonorTimestamps: - description: OverrideHonorTimestamps allows to globally enforce honoring - timestamps in all scrape configs. - type: boolean - paused: - description: When a Prometheus deployment is paused, no actions except - for deletion will be performed on the underlying objects. - type: boolean - podMetadata: - description: PodMetadata configures Labels and Annotations which are - propagated to the prometheus pods. - properties: - annotations: - additionalProperties: - type: string - description: 'Annotations is an unstructured key value map stored - with a resource that may be set by external tools to store and - retrieve arbitrary metadata. They are not queryable and should - be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' - type: object - labels: - additionalProperties: - type: string - description: 'Map of string keys and values that can be used to - organize and categorize (scope and select) objects. May match - selectors of replication controllers and services. More info: - http://kubernetes.io/docs/user-guide/labels' - type: object - type: object - podMonitorNamespaceSelector: - description: Namespaces to be selected for PodMonitor discovery. If - nil, only check own namespace. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains - values, a key, and an operator that relates the key and values. + rules: + description: /--rules.*/ command-line arguments. + properties: + alert: + description: /--rules.alert.*/ command-line arguments properties: - key: - description: key is the label key that the selector applies - to. + forGracePeriod: + description: Minimum duration between alert and restored 'for' + state. This is maintained only for alerts with configured + 'for' time greater than grace period. type: string - operator: - description: operator represents a key's relationship to a - set of values. Valid operators are In, NotIn, Exists and - DoesNotExist. + forOutageTolerance: + description: Max time to tolerate prometheus outage for restoring + 'for' state of alert. + type: string + resendDelay: + description: Minimum amount of time to wait before resending + an alert to Alertmanager. type: string - values: - description: values is an array of string values. If the operator - is In or NotIn, the values array must be non-empty. If the - operator is Exists or DoesNotExist, the values array must - be empty. This array is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator is - "In", and the values array contains only "value". The requirements - are ANDed. - type: object - type: object - podMonitorSelector: - description: '*Experimental* PodMonitors to be selected for target discovery.' - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains - values, a key, and an operator that relates the key and values. + type: object + scrapeInterval: + description: Interval between consecutive scrapes. + type: string + secrets: + description: Secrets is a list of Secrets in the same namespace as the + Prometheus object, which shall be mounted into the Prometheus Pods. + The Secrets are mounted into /etc/prometheus/secrets/. + items: + type: string + type: array + securityContext: + description: SecurityContext holds pod-level security attributes and + common container settings. This defaults to the default PodSecurityContext. + properties: + fsGroup: + description: "A special supplemental group that applies to all containers + in a pod. Some volume types allow the Kubelet to change the ownership + of that volume to be owned by the pod: \n 1. The owning GID will + be the FSGroup 2. The setgid bit is set (new files created in + the volume will be owned by FSGroup) 3. The permission bits are + OR'd with rw-rw---- \n If unset, the Kubelet will not modify the + ownership and permissions of any volume." + format: int64 + type: integer + runAsGroup: + description: The GID to run the entrypoint of the container process. + Uses runtime default if unset. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail to start + the container if it does. If unset or false, no such validation + will be performed. May also be set in SecurityContext. If set + in both SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. May + also be set in SecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux + context for each container. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. properties: - key: - description: key is the label key that the selector applies - to. + level: + description: Level is SELinux level label that applies to the + container. type: string - operator: - description: operator represents a key's relationship to a - set of values. Valid operators are In, NotIn, Exists and - DoesNotExist. + role: + description: Role is a SELinux role label that applies to the + container. type: string - values: - description: values is an array of string values. If the operator - is In or NotIn, the values array must be non-empty. If the - operator is Exists or DoesNotExist, the values array must - be empty. This array is replaced during a strategic merge - patch. - items: + type: + description: Type is a SELinux type label that applies to the + container. + type: string + user: + description: User is a SELinux user label that applies to the + container. + type: string + type: object + supplementalGroups: + description: A list of groups applied to the first process run in + each container, in addition to the container's primary GID. If + unspecified, no groups will be added to any container. + items: + format: int64 + type: integer + type: array + sysctls: + description: Sysctls hold a list of namespaced sysctls used for + the pod. Pods with unsupported sysctls (by the container runtime) + might fail to launch. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set type: string - type: array - required: - - key - - operator + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named by + the GMSACredentialSpecName field. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA + credential spec to use. This field is alpha-level and is only + honored by servers that enable the WindowsGMSA feature flag. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint of + the container process. Defaults to the user specified in image + metadata if unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. This + field is beta-level and may be disabled with the WindowsRunAsUserName + feature flag. + type: string type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator is - "In", and the values array contains only "value". The requirements - are ANDed. - type: object - type: object - portName: - description: Port name used for the pods and governing service. This - defaults to web - type: string - priorityClassName: - description: Priority class assigned to the Pods - type: string - prometheusExternalLabelName: - description: Name of Prometheus external label used to denote Prometheus - instance name. Defaults to the value of `prometheus`. External label - will _not_ be added when value is set to empty string (`""`). - type: string - query: - description: QuerySpec defines the query command line flags when starting - Prometheus. - properties: - lookbackDelta: - description: The delta difference allowed for retrieving metrics - during expression evaluations. - type: string - maxConcurrency: - description: Number of concurrent queries that can be run at once. - format: int32 - type: integer - maxSamples: - description: Maximum number of samples a single query can load into - memory. Note that queries will fail if they would load more samples - than this into memory, so this also limits the number of samples - a query can return. - format: int32 - type: integer - timeout: - description: Maximum time a query may take before being aborted. - type: string - type: object - remoteRead: - description: If specified, the remote_read spec. This is an experimental - feature, it may change in any upcoming release in a breaking way. - items: - description: RemoteReadSpec defines the remote_read configuration - for prometheus. + type: object + serviceAccountName: + description: ServiceAccountName is the name of the ServiceAccount to + use to run the Prometheus Pods. + type: string + serviceMonitorNamespaceSelector: + description: Namespaces to be selected for ServiceMonitor discovery. + If nil, only check own namespace. properties: - basicAuth: - description: BasicAuth for the URL. + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains + values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to a + set of values. Valid operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator + is In or NotIn, the values array must be non-empty. If the + operator is Exists or DoesNotExist, the values array must + be empty. This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator is + "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + serviceMonitorSelector: + description: ServiceMonitors to be selected for target discovery. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains + values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to a + set of values. Valid operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator + is In or NotIn, the values array must be non-empty. If the + operator is Exists or DoesNotExist, the values array must + be empty. This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator is + "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + sha: + description: SHA of Prometheus container image to be deployed. Defaults + to the value of `version`. Similar to a tag, but the SHA explicitly + deploys an immutable container image. Version and Tag are ignored + if SHA is set. + type: string + storage: + description: Storage spec to specify how storage shall be used. + properties: + emptyDir: + description: 'EmptyDirVolumeSource to be used by the Prometheus + StatefulSets. If specified, used in place of any volumeClaimTemplate. + More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir' properties: - password: - description: The secret in the service monitor namespace that - contains the password for authentication. + medium: + description: 'What type of storage medium should back this directory. + The default is "" which means to use the node''s default medium. + Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + description: 'Total amount of local storage required for this + EmptyDir volume. The size limit is also applicable for memory + medium. The maximum usage on memory medium EmptyDir would + be the minimum value between the SizeLimit specified here + and the sum of memory limits of all containers in a pod. The + default is nil which means that the limit is undefined. More + info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + type: string + type: object + volumeClaimTemplate: + description: A PVC spec to be used by the Prometheus StatefulSets. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint + the client submits requests to. Cannot be updated. In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + type: object + spec: + description: 'Spec defines the desired characteristics of a + volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. + accessModes: + description: 'AccessModes contains the desired access modes + the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: This field requires the VolumeSnapshotDataSource + alpha feature gate to be enabled and currently VolumeSnapshot + is the only supported data source. If the provisioner + can support VolumeSnapshot data source, it will create + a new volume and data will be restored to the volume at + the same time. If the provisioner does not support VolumeSnapshot + data source, volume will not be created and the failure + will be reported as an event. In the future, we plan to + support more data source types and the behavior of the + provisioner may change. + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, the + specified Kind must be in the core API group. For + any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources + the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + type: string + description: 'Limits describes the maximum amount of + compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + type: string + description: 'Requests describes the minimum amount + of compute resources required. If Requests is omitted + for a container, it defaults to Limits if that is + explicitly specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + selector: + description: A label query over volumes to consider for + binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + volumeMode: + description: volumeMode defines what type of volume is required + by the claim. Value of Filesystem is implied when not + included in claim spec. This is a beta feature. + type: string + volumeName: + description: VolumeName is the binding reference to the + PersistentVolume backing this claim. type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key type: object - username: - description: The secret in the service monitor namespace that - contains the username for authentication. + status: + description: 'Status represents the current information/status + of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + accessModes: + description: 'AccessModes contains the actual access modes + the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + capacity: + additionalProperties: + type: string + description: Represents the actual resources of the underlying + volume. + type: object + conditions: + description: Current Condition of persistent volume claim. + If underlying persistent volume is being resized then + the Condition will be set to 'ResizeStarted'. + items: + description: PersistentVolumeClaimCondition contails details + about state of pvc + properties: + lastProbeTime: + description: Last time we probed the condition. + format: date-time + type: string + lastTransitionTime: + description: Last time the condition transitioned + from one status to another. + format: date-time + type: string + message: + description: Human-readable message indicating details + about last transition. + type: string + reason: + description: Unique, this should be a short, machine + understandable string that gives the reason for + condition's last transition. If it reports "ResizeStarted" + that means the underlying persistent volume is being + resized. + type: string + status: + type: string + type: + description: PersistentVolumeClaimConditionType is + a valid value of PersistentVolumeClaimCondition.Type + type: string + required: + - status + - type + type: object + type: array + phase: + description: Phase represents the current phase of PersistentVolumeClaim. type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key type: object type: object - bearerToken: - description: bearer token for remote read. - type: string - bearerTokenFile: - description: File to read bearer token for remote read. - type: string - proxyUrl: - description: Optional ProxyURL - type: string - readRecent: - description: Whether reads should be made for queries for time - ranges that the local storage should have complete data for. - type: boolean - remoteTimeout: - description: Timeout for requests to the remote read endpoint. + type: object + tag: + description: Tag of Prometheus container image to be deployed. Defaults + to the value of `version`. Version is ignored if Tag is set. + type: string + thanos: + description: "Thanos configuration allows configuring various aspects + of a Prometheus server in a Thanos environment. \n This section is + experimental, it may change significantly without deprecation notice + in any release. \n This is experimental and may change significantly + without backward compatibility in any release." + properties: + baseImage: + description: Thanos base image if other than default. type: string - requiredMatchers: - additionalProperties: - type: string - description: An optional list of equality matchers which have - to be present in a selector to query the remote read endpoint. - type: object - tlsConfig: - description: TLS Config to use for remote read. + grpcServerTlsConfig: + description: 'GRPCServerTLSConfig configures the gRPC server from + which Thanos Querier reads recorded rule data. Note: Currently + only the CAFile, CertFile, and KeyFile fields are supported. Maps + to the ''--grpc-server-tls-*'' CLI args.' properties: ca: description: Stuct containing the CA cert to use for the targets. properties: configMap: - description: ConfigMap containing data to use for the - targets. + description: ConfigMap containing data to use for the targets. properties: key: description: The key to select. type: string name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the ConfigMap or its - key must be defined + description: Specify whether the ConfigMap or its key + must be defined type: boolean required: - key @@ -3497,12 +4509,11 @@ spec: type: string name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key - must be defined + description: Specify whether the Secret or its key must + be defined type: boolean required: - key @@ -3517,20 +4528,18 @@ spec: targets. properties: configMap: - description: ConfigMap containing data to use for the - targets. + description: ConfigMap containing data to use for the targets. properties: key: description: The key to select. type: string name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the ConfigMap or its - key must be defined + description: Specify whether the ConfigMap or its key + must be defined type: boolean required: - key @@ -3544,12 +4553,11 @@ spec: type: string name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the Secret or its key - must be defined + description: Specify whether the Secret or its key must + be defined type: boolean required: - key @@ -3563,12 +4571,11 @@ spec: description: Disable target certificate validation. type: boolean keyFile: - description: Path to the client key file in the Prometheus - container for the targets. + description: Path to the client key file in the Prometheus container + for the targets. type: string keySecret: - description: Secret containing the client key file for the - targets. + description: Secret containing the client key file for the targets. properties: key: description: The key of the secret to select from. Must @@ -3589,2415 +4596,1406 @@ spec: description: Used to verify the hostname for the targets. type: string type: object - url: - description: The URL of the endpoint to send samples to. + image: + description: Image if specified has precedence over baseImage, tag + and sha combinations. Specifying the version is still necessary + to ensure the Prometheus Operator knows what version of Thanos + is being configured. type: string - required: - - url - type: object - type: array - remoteWrite: - description: If specified, the remote_write spec. This is an experimental - feature, it may change in any upcoming release in a breaking way. - items: - description: RemoteWriteSpec defines the remote_write configuration - for prometheus. - properties: - basicAuth: - description: BasicAuth for the URL. + listenLocal: + description: ListenLocal makes the Thanos sidecar listen on loopback, + so that it does not bind against the Pod IP. + type: boolean + objectStorageConfig: + description: ObjectStorageConfig configures object storage in Thanos. properties: - password: - description: The secret in the service monitor namespace that - contains the password for authentication. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + resources: + description: Resources defines the resource requirements for the + Thanos sidecar. If not provided, no requests/limits will be set + properties: + limits: + additionalProperties: + type: string + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' type: object - username: - description: The secret in the service monitor namespace that - contains the username for authentication. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key + requests: + additionalProperties: + type: string + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' type: object type: object - bearerToken: - description: File to read bearer token for remote write. + sha: + description: SHA of Thanos container image to be deployed. Defaults + to the value of `version`. Similar to a tag, but the SHA explicitly + deploys an immutable container image. Version and Tag are ignored + if SHA is set. type: string - bearerTokenFile: - description: File to read bearer token for remote write. - type: string - proxyUrl: - description: Optional ProxyURL + tag: + description: Tag of Thanos sidecar container image to be deployed. + Defaults to the value of `version`. Version is ignored if Tag + is set. type: string - queueConfig: - description: QueueConfig allows tuning of the remote write queue - parameters. + tracingConfig: + description: TracingConfig configures tracing in Thanos. This is + an experimental feature, it may change in any upcoming release + in a breaking way. properties: - batchSendDeadline: - description: BatchSendDeadline is the maximum time a sample - will wait in buffer. - type: string - capacity: - description: Capacity is the number of samples to buffer per - shard before we start dropping them. - type: integer - maxBackoff: - description: MaxBackoff is the maximum retry delay. + key: + description: The key of the secret to select from. Must be + a valid secret key. type: string - maxRetries: - description: MaxRetries is the maximum number of times to - retry a batch on recoverable errors. - type: integer - maxSamplesPerSend: - description: MaxSamplesPerSend is the maximum number of samples - per send. - type: integer - maxShards: - description: MaxShards is the maximum number of shards, i.e. - amount of concurrency. - type: integer - minBackoff: - description: MinBackoff is the initial retry delay. Gets doubled - for every retry. + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string - minShards: - description: MinShards is the minimum number of shards, i.e. - amount of concurrency. - type: integer + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key type: object - remoteTimeout: - description: Timeout for requests to the remote write endpoint. + version: + description: Version describes the version of Thanos to use. type: string - tlsConfig: - description: TLS Config to use for remote write. - properties: - ca: - description: Stuct containing the CA cert to use for the targets. - properties: - configMap: - description: ConfigMap containing data to use for the - targets. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the ConfigMap or its - key must be defined - type: boolean - required: - - key - type: object - secret: - description: Secret containing data to use for the targets. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - type: object - caFile: - description: Path to the CA cert in the Prometheus container - to use for the targets. - type: string - cert: - description: Struct containing the client cert file for the - targets. - properties: - configMap: - description: ConfigMap containing data to use for the - targets. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the ConfigMap or its - key must be defined - type: boolean - required: - - key - type: object - secret: - description: Secret containing data to use for the targets. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - type: object - certFile: - description: Path to the client cert file in the Prometheus - container for the targets. - type: string - insecureSkipVerify: - description: Disable target certificate validation. - type: boolean - keyFile: - description: Path to the client key file in the Prometheus - container for the targets. - type: string - keySecret: - description: Secret containing the client key file for the - targets. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - serverName: - description: Used to verify the hostname for the targets. - type: string - type: object - url: - description: The URL of the endpoint to send samples to. - type: string - writeRelabelConfigs: - description: The list of remote write relabel configurations. - items: - description: 'RelabelConfig allows dynamic rewriting of the - label set, being applied to samples before ingestion. It defines - ``-section of Prometheus configuration. - More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' - properties: - action: - description: Action to perform based on regex matching. - Default is 'replace' - type: string - modulus: - description: Modulus to take of the hash of the source label - values. - format: int64 - type: integer - regex: - description: Regular expression against which the extracted - value is matched. Default is '(.*)' - type: string - replacement: - description: Replacement value against which a regex replace - is performed if the regular expression matches. Regex - capture groups are available. Default is '$1' - type: string - separator: - description: Separator placed between concatenated source - label values. default is ';'. - type: string - sourceLabels: - description: The source labels select values from existing - labels. Their content is concatenated using the configured - separator and matched against the configured regular expression - for the replace, keep, and drop actions. - items: - type: string - type: array - targetLabel: - description: Label to which the resulting value is written - in a replace action. It is mandatory for replace actions. - Regex capture groups are available. - type: string - type: object - type: array - required: - - url type: object - type: array - replicaExternalLabelName: - description: Name of Prometheus external label used to denote replica - name. Defaults to the value of `prometheus_replica`. External label - will _not_ be added when value is set to empty string (`""`). - type: string - replicas: - description: Number of instances to deploy for a Prometheus deployment. - format: int32 - type: integer - resources: - description: Define resources requests and limits for single Pods. - properties: - limits: - additionalProperties: - type: string - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - type: string - description: 'Requests describes the minimum amount of compute resources - required. If Requests is omitted for a container, it defaults - to Limits if that is explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - retention: - description: Time duration Prometheus shall retain data for. Default - is '24h', and must match the regular expression `[0-9]+(ms|s|m|h|d|w|y)` - (milliseconds seconds minutes hours days weeks years). - type: string - retentionSize: - description: Maximum amount of disk space used by blocks. - type: string - routePrefix: - description: The route prefix Prometheus registers HTTP handlers for. - This is useful, if using ExternalURL and a proxy is rewriting HTTP - routes of a request, and the actual ExternalURL is still true, but - the server serves requests under a different route prefix. For example - for use with `kubectl proxy`. - type: string - ruleNamespaceSelector: - description: Namespaces to be selected for PrometheusRules discovery. - If unspecified, only the same namespace as the Prometheus object is - in is used. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains - values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies - to. - type: string - operator: - description: operator represents a key's relationship to a - set of values. Valid operators are In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator - is In or NotIn, the values array must be non-empty. If the - operator is Exists or DoesNotExist, the values array must - be empty. This array is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator is - "In", and the values array contains only "value". The requirements - are ANDed. - type: object - type: object - ruleSelector: - description: A selector to select which PrometheusRules to mount for - loading alerting rules from. Until (excluding) Prometheus Operator - v0.24.0 Prometheus Operator will migrate any legacy rule ConfigMaps - to PrometheusRule custom resources selected by RuleSelector. Make - sure it does not match any config maps that you do not want to be - migrated. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains - values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies - to. - type: string - operator: - description: operator represents a key's relationship to a - set of values. Valid operators are In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator - is In or NotIn, the values array must be non-empty. If the - operator is Exists or DoesNotExist, the values array must - be empty. This array is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator is - "In", and the values array contains only "value". The requirements - are ANDed. - type: object - type: object - rules: - description: /--rules.*/ command-line arguments. - properties: - alert: - description: /--rules.alert.*/ command-line arguments + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple using the matching + operator . properties: - forGracePeriod: - description: Minimum duration between alert and restored 'for' - state. This is maintained only for alerts with configured - 'for' time greater than grace period. + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. type: string - forOutageTolerance: - description: Max time to tolerate prometheus outage for restoring - 'for' state of alert. + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, operator + must be Exists; this combination means to match all values and + all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. Exists + is equivalent to wildcard for value, so that a pod can tolerate + all taints of a particular category. type: string - resendDelay: - description: Minimum amount of time to wait before resending - an alert to Alertmanager. + tolerationSeconds: + description: TolerationSeconds represents the period of time the + toleration (which must be of effect NoExecute, otherwise this + field is ignored) tolerates the taint. By default, it is not + set, which means tolerate the taint forever (do not evict). + Zero and negative values will be treated as 0 (evict immediately) + by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise + just a regular string. type: string type: object - type: object - scrapeInterval: - description: Interval between consecutive scrapes. - type: string - secrets: - description: Secrets is a list of Secrets in the same namespace as the - Prometheus object, which shall be mounted into the Prometheus Pods. - The Secrets are mounted into /etc/prometheus/secrets/. - items: + type: array + version: + description: Version of Prometheus to be deployed. type: string - type: array - securityContext: - description: SecurityContext holds pod-level security attributes and - common container settings. This defaults to the default PodSecurityContext. - properties: - fsGroup: - description: "A special supplemental group that applies to all containers - in a pod. Some volume types allow the Kubelet to change the ownership - of that volume to be owned by the pod: \n 1. The owning GID will - be the FSGroup 2. The setgid bit is set (new files created in - the volume will be owned by FSGroup) 3. The permission bits are - OR'd with rw-rw---- \n If unset, the Kubelet will not modify the - ownership and permissions of any volume." - format: int64 - type: integer - runAsGroup: - description: The GID to run the entrypoint of the container process. - Uses runtime default if unset. May also be set in SecurityContext. If - set in both SecurityContext and PodSecurityContext, the value - specified in SecurityContext takes precedence for that container. - format: int64 - type: integer - runAsNonRoot: - description: Indicates that the container must run as a non-root - user. If true, the Kubelet will validate the image at runtime - to ensure that it does not run as UID 0 (root) and fail to start - the container if it does. If unset or false, no such validation - will be performed. May also be set in SecurityContext. If set - in both SecurityContext and PodSecurityContext, the value specified - in SecurityContext takes precedence. - type: boolean - runAsUser: - description: The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. May - also be set in SecurityContext. If set in both SecurityContext - and PodSecurityContext, the value specified in SecurityContext - takes precedence for that container. - format: int64 - type: integer - seLinuxOptions: - description: The SELinux context to be applied to all containers. - If unspecified, the container runtime will allocate a random SELinux - context for each container. May also be set in SecurityContext. If - set in both SecurityContext and PodSecurityContext, the value - specified in SecurityContext takes precedence for that container. + volumeMounts: + description: VolumeMounts allows configuration of additional VolumeMounts + on the output StatefulSet definition. VolumeMounts specified will + be appended to other VolumeMounts in the prometheus container, that + are generated as a result of StorageSpec objects. + items: + description: VolumeMount describes a mounting of a Volume within a + container. properties: - level: - description: Level is SELinux level label that applies to the - container. - type: string - role: - description: Role is a SELinux role label that applies to the - container. - type: string - type: - description: Type is a SELinux type label that applies to the - container. + mountPath: + description: Path within the container at which the volume should + be mounted. Must not contain ':'. type: string - user: - description: User is a SELinux user label that applies to the - container. + mountPropagation: + description: mountPropagation determines how mounts are propagated + from the host to container and the other way around. When not + set, MountPropagationNone is used. This field is beta in 1.10. type: string - type: object - supplementalGroups: - description: A list of groups applied to the first process run in - each container, in addition to the container's primary GID. If - unspecified, no groups will be added to any container. - items: - format: int64 - type: integer - type: array - sysctls: - description: Sysctls hold a list of namespaced sysctls used for - the pod. Pods with unsupported sysctls (by the container runtime) - might fail to launch. - items: - description: Sysctl defines a kernel parameter to be set - properties: - name: - description: Name of a property to set - type: string - value: - description: Value of a property to set - type: string - required: - - name - - value - type: object - type: array - windowsOptions: - description: The Windows specific settings applied to all containers. - If unspecified, the options within a container's SecurityContext - will be used. If set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. - properties: - gmsaCredentialSpec: - description: GMSACredentialSpec is where the GMSA admission - webhook (https://github.com/kubernetes-sigs/windows-gmsa) - inlines the contents of the GMSA credential spec named by - the GMSACredentialSpecName field. This field is alpha-level - and is only honored by servers that enable the WindowsGMSA - feature flag. + name: + description: This must match the Name of a Volume. type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name of the GMSA - credential spec to use. This field is alpha-level and is only - honored by servers that enable the WindowsGMSA feature flag. + readOnly: + description: Mounted read-only if true, read-write otherwise (false + or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). type: string - runAsUserName: - description: The UserName in Windows to run the entrypoint of - the container process. Defaults to the user specified in image - metadata if unspecified. May also be set in PodSecurityContext. - If set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. This - field is beta-level and may be disabled with the WindowsRunAsUserName - feature flag. + subPathExpr: + description: Expanded path within the volume from which the container's + volume should be mounted. Behaves similarly to SubPath but environment + variable references $(VAR_NAME) are expanded using the container's + environment. Defaults to "" (volume's root). SubPathExpr and + SubPath are mutually exclusive. type: string + required: + - mountPath + - name type: object - type: object - serviceAccountName: - description: ServiceAccountName is the name of the ServiceAccount to - use to run the Prometheus Pods. - type: string - serviceMonitorNamespaceSelector: - description: Namespaces to be selected for ServiceMonitor discovery. - If nil, only check own namespace. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains - values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies - to. - type: string - operator: - description: operator represents a key's relationship to a - set of values. Valid operators are In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator - is In or NotIn, the values array must be non-empty. If the - operator is Exists or DoesNotExist, the values array must - be empty. This array is replaced during a strategic merge - patch. - items: + type: array + volumes: + description: Volumes allows configuration of additional volumes on the + output StatefulSet definition. Volumes specified will be appended + to other volumes that are generated as a result of StorageSpec objects. + items: + description: Volume represents a named volume in a pod that may be + accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'AWSElasticBlockStore represents an AWS Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from compromising + the machine' type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator is - "In", and the values array contains only "value". The requirements - are ANDed. - type: object - type: object - serviceMonitorSelector: - description: ServiceMonitors to be selected for target discovery. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains - values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies - to. - type: string - operator: - description: operator represents a key's relationship to a - set of values. Valid operators are In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator - is In or NotIn, the values array must be non-empty. If the - operator is Exists or DoesNotExist, the values array must - be empty. This array is replaced during a strategic merge - patch. - items: + partition: + description: 'The partition in the volume that you want to + mount. If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition + as "1". Similarly, the volume partition for /dev/sda is + "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'Specify "true" to force and set the ReadOnly + property in VolumeMounts to "true". If omitted, the default + is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'Unique ID of the persistent disk resource in + AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator is - "In", and the values array contains only "value". The requirements - are ANDed. - type: object - type: object - sha: - description: SHA of Prometheus container image to be deployed. Defaults - to the value of `version`. Similar to a tag, but the SHA explicitly - deploys an immutable container image. Version and Tag are ignored - if SHA is set. - type: string - storage: - description: Storage spec to specify how storage shall be used. - properties: - emptyDir: - description: 'EmptyDirVolumeSource to be used by the Prometheus - StatefulSets. If specified, used in place of any volumeClaimTemplate. - More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir' - properties: - medium: - description: 'What type of storage medium should back this directory. - The default is "" which means to use the node''s default medium. - Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' - type: string - sizeLimit: - description: 'Total amount of local storage required for this - EmptyDir volume. The size limit is also applicable for memory - medium. The maximum usage on memory medium EmptyDir would - be the minimum value between the SizeLimit specified here - and the sum of memory limits of all containers in a pod. The - default is nil which means that the limit is undefined. More - info: http://kubernetes.io/docs/user-guide/volumes#emptydir' - type: string - type: object - volumeClaimTemplate: - description: A PVC spec to be used by the Prometheus StatefulSets. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource - this object represents. Servers may infer this from the endpoint - the client submits requests to. Cannot be updated. In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + required: + - volumeID + type: object + azureDisk: + description: AzureDisk represents an Azure Data Disk mount on + the host and bind mount to the pod. + properties: + cachingMode: + description: 'Host Caching mode: None, Read Only, Read Write.' + type: string + diskName: + description: The Name of the data disk in the blob storage + type: string + diskURI: + description: The URI the data disk in the blob storage + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'Expected values Shared: multiple blob disks + per storage account Dedicated: single blob disk per storage + account Managed: azure managed data disk (only in managed + availability set). defaults to shared' + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: AzureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: the name of secret that contains Azure Storage + Account Name and Key + type: string + shareName: + description: Share Name + type: string + required: + - secretName + - shareName type: object - spec: - description: 'Spec defines the desired characteristics of a - volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + cephfs: + description: CephFS represents a Ceph FS mount on the host that + shares a pod's lifetime properties: - accessModes: - description: 'AccessModes contains the desired access modes - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + monitors: + description: 'Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' items: type: string type: array - dataSource: - description: This field requires the VolumeSnapshotDataSource - alpha feature gate to be enabled and currently VolumeSnapshot - is the only supported data source. If the provisioner - can support VolumeSnapshot data source, it will create - a new volume and data will be restored to the volume at - the same time. If the provisioner does not support VolumeSnapshot - data source, volume will not be created and the failure - will be reported as an event. In the future, we plan to - support more data source types and the behavior of the - provisioner may change. + path: + description: 'Optional: Used as the mounted root, rather than + the full Ceph tree, default is /' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. More + info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'Optional: SecretFile is the path to key ring + for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'Optional: SecretRef is reference to the authentication + secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' properties: - apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, the - specified Kind must be in the core API group. For - any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string name: - description: Name is the name of resource being referenced + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - type: string - description: 'Limits describes the maximum amount of - compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - type: string - description: 'Requests describes the minimum amount - of compute resources required. If Requests is omitted - for a container, it defaults to Limits if that is - explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - selector: - description: A label query over volumes to consider for - binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be empty. - This array is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. - type: object type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required - by the claim. Value of Filesystem is implied when not - included in claim spec. This is a beta feature. - type: string - volumeName: - description: VolumeName is the binding reference to the - PersistentVolume backing this claim. + user: + description: 'Optional: User is the rados user name, default + is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' type: string + required: + - monitors type: object - status: - description: 'Status represents the current information/status - of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + cinder: + description: 'Cinder represents a cinder volume attached and mounted + on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' properties: - accessModes: - description: 'AccessModes contains the actual access modes - the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - capacity: - additionalProperties: - type: string - description: Represents the actual resources of the underlying - volume. + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Examples: "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. More + info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'Optional: points to a secret object containing + parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string type: object - conditions: - description: Current Condition of persistent volume claim. - If underlying persistent volume is being resized then - the Condition will be set to 'ResizeStarted'. + volumeID: + description: 'volume id used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: ConfigMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the Data + field of the referenced ConfigMap will be projected into + the volume as a file whose name is the key and content is + the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the + ConfigMap, the volume setup will error unless it is marked + optional. Paths must be relative and may not contain the + '..' path or start with '..'. items: - description: PersistentVolumeClaimCondition contails details - about state of pvc + description: Maps a string key to a path within a volume. properties: - lastProbeTime: - description: Last time we probed the condition. - format: date-time - type: string - lastTransitionTime: - description: Last time the condition transitioned - from one status to another. - format: date-time - type: string - message: - description: Human-readable message indicating details - about last transition. - type: string - reason: - description: Unique, this should be a short, machine - understandable string that gives the reason for - condition's last transition. If it reports "ResizeStarted" - that means the underlying persistent volume is being - resized. - type: string - status: + key: + description: The key to project. type: string - type: - description: PersistentVolumeClaimConditionType is - a valid value of PersistentVolumeClaimCondition.Type + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might be + in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode + bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map the + key to. May not be an absolute path. May not contain + the path element '..'. May not start with the string + '..'. type: string required: - - status - - type + - key + - path type: object type: array - phase: - description: Phase represents the current phase of PersistentVolumeClaim. + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string + optional: + description: Specify whether the ConfigMap or its keys must + be defined + type: boolean type: object - type: object - type: object - tag: - description: Tag of Prometheus container image to be deployed. Defaults - to the value of `version`. Version is ignored if Tag is set. - type: string - thanos: - description: "Thanos configuration allows configuring various aspects - of a Prometheus server in a Thanos environment. \n This section is - experimental, it may change significantly without deprecation notice - in any release. \n This is experimental and may change significantly - without backward compatibility in any release." - properties: - baseImage: - description: Thanos base image if other than default. - type: string - grpcServerTlsConfig: - description: 'GRPCServerTLSConfig configures the gRPC server from - which Thanos Querier reads recorded rule data. Note: Currently - only the CAFile, CertFile, and KeyFile fields are supported. Maps - to the ''--grpc-server-tls-*'' CLI args.' - properties: - ca: - description: Stuct containing the CA cert to use for the targets. + csi: + description: CSI (Container Storage Interface) represents storage + that is handled by an external CSI driver (Alpha feature). properties: - configMap: - description: ConfigMap containing data to use for the targets. + driver: + description: Driver is the name of the CSI driver that handles + this volume. Consult with your admin for the correct name + as registered in the cluster. + type: string + fsType: + description: Filesystem type to mount. Ex. "ext4", "xfs", + "ntfs". If not provided, the empty value is passed to the + associated CSI driver which will determine the default filesystem + to apply. + type: string + nodePublishSecretRef: + description: NodePublishSecretRef is a reference to the secret + object containing sensitive information to pass to the CSI + driver to complete the CSI NodePublishVolume and NodeUnpublishVolume + calls. This field is optional, and may be empty if no secret + is required. If the secret object contains more than one + secret, all secret references are passed. properties: - key: - description: The key to select. - type: string name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' type: string - optional: - description: Specify whether the ConfigMap or its key - must be defined - type: boolean - required: - - key type: object - secret: - description: Secret containing data to use for the targets. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key + readOnly: + description: Specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: VolumeAttributes stores driver-specific properties + that are passed to the CSI driver. Consult your driver's + documentation for supported values. type: object + required: + - driver type: object - caFile: - description: Path to the CA cert in the Prometheus container - to use for the targets. - type: string - cert: - description: Struct containing the client cert file for the - targets. + downwardAPI: + description: DownwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might be + in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode + bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path name + of the file to be created. Must not be absolute or + contain the ''..'' path. Must be utf-8 encoded. The + first item of the relative path must not start with + ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + requests.cpu and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + description: Specifies the output format of the + exposed resources, defaults to "1" + type: string + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'EmptyDir represents a temporary directory that shares + a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'What type of storage medium should back this + directory. The default is "" which means to use the node''s + default medium. Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + description: 'Total amount of local storage required for this + EmptyDir volume. The size limit is also applicable for memory + medium. The maximum usage on memory medium EmptyDir would + be the minimum value between the SizeLimit specified here + and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + type: string + type: object + fc: + description: FC represents a Fibre Channel resource that is attached + to a kubelet's host machine and then exposed to the pod. + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from compromising + the machine' + type: string + lun: + description: 'Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'Optional: FC target worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: 'Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be + set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: FlexVolume represents a generic volume resource that + is provisioned/attached using an exec based plugin. properties: - configMap: - description: ConfigMap containing data to use for the targets. + driver: + description: Driver is the name of the driver to use for this + volume. + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". The default filesystem depends on FlexVolume + script. + type: string + options: + additionalProperties: + type: string + description: 'Optional: Extra command options if any.' + type: object + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'Optional: SecretRef is reference to the secret + object containing sensitive information to pass to the plugin + scripts. This may be empty if no secret object is specified. + If the secret object contains more than one secret, all + secrets are passed to the plugin scripts.' properties: - key: - description: The key to select. - type: string name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' type: string - optional: - description: Specify whether the ConfigMap or its key - must be defined - type: boolean - required: - - key type: object - secret: - description: Secret containing data to use for the targets. + required: + - driver + type: object + flocker: + description: Flocker represents a Flocker volume attached to a + kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: Name of the dataset stored as metadata -> name + on the dataset for Flocker should be considered as deprecated + type: string + datasetUUID: + description: UUID of the dataset. This is unique identifier + of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'GCEPersistentDisk represents a GCE Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from compromising + the machine' + type: string + partition: + description: 'The partition in the volume that you want to + mount. If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition + as "1". Similarly, the volume partition for /dev/sda is + "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'Unique name of the PD resource in GCE. Used + to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'GitRepo represents a git repository at a particular + revision. DEPRECATED: GitRepo is deprecated. To provision a + container with a git repo, mount an EmptyDir into an InitContainer + that clones the repo using git, then mount the EmptyDir into + the Pod''s container.' + properties: + directory: + description: Target directory name. Must not contain or start + with '..'. If '.' is supplied, the volume directory will + be the git repository. Otherwise, if specified, the volume + will contain the git repository in the subdirectory with + the given name. + type: string + repository: + description: Repository URL + type: string + revision: + description: Commit hash for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'Glusterfs represents a Glusterfs mount on the host + that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'EndpointsName is the endpoint name that details + Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'Path is the Glusterfs volume path. More info: + https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'ReadOnly here will force the Glusterfs volume + to be mounted with read-only permissions. Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'HostPath represents a pre-existing file or directory + on the host machine that is directly exposed to the container. + This is generally used for system agents or other privileged + things that are allowed to see the host machine. Most containers + will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host directory + mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'Path of the directory on the host. If the path + is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'Type for HostPath Volume Defaults to "" More + info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'ISCSI represents an ISCSI Disk resource that is + attached to a kubelet''s host machine and then exposed to the + pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: whether support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: whether support iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from compromising + the machine' + type: string + initiatorName: + description: Custom iSCSI Initiator Name. If initiatorName + is specified with iscsiInterface simultaneously, new iSCSI + interface : will be created + for the connection. + type: string + iqn: + description: Target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iSCSI Interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: iSCSI Target Portal List. The portal is either + an IP or ip_addr:port if the port is other than default + (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: CHAP Secret for iSCSI target and initiator authentication properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key type: object + targetPortal: + description: iSCSI Target Portal. The Portal is either an + IP or ip_addr:port if the port is other than default (typically + TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal type: object - certFile: - description: Path to the client cert file in the Prometheus - container for the targets. - type: string - insecureSkipVerify: - description: Disable target certificate validation. - type: boolean - keyFile: - description: Path to the client key file in the Prometheus container - for the targets. + name: + description: 'Volume''s name. Must be a DNS_LABEL and unique within + the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string - keySecret: - description: Secret containing the client key file for the targets. + nfs: + description: 'NFS represents an NFS mount on the host that shares + a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. + path: + description: 'Path that is exported by the NFS server. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + readOnly: + description: 'ReadOnly here will force the NFS export to be + mounted with read-only permissions. Defaults to false. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'Server is the hostname or IP address of the + NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' type: string - optional: - description: Specify whether the Secret or its key must - be defined + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'PersistentVolumeClaimVolumeSource represents a reference + to a PersistentVolumeClaim in the same namespace. More info: + https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'ClaimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: Will force the ReadOnly setting in VolumeMounts. + Default false. type: boolean required: - - key + - claimName type: object - serverName: - description: Used to verify the hostname for the targets. - type: string - type: object - image: - description: Image if specified has precedence over baseImage, tag - and sha combinations. Specifying the version is still necessary - to ensure the Prometheus Operator knows what version of Thanos - is being configured. - type: string - listenLocal: - description: ListenLocal makes the Thanos sidecar listen on loopback, - so that it does not bind against the Pod IP. - type: boolean - objectStorageConfig: - description: ObjectStorageConfig configures object storage in Thanos. - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - resources: - description: Resources defines the resource requirements for the - Thanos sidecar. If not provided, no requests/limits will be set - properties: - limits: - additionalProperties: - type: string - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + photonPersistentDisk: + description: PhotonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: ID that identifies Photon Controller persistent + disk + type: string + required: + - pdID type: object - requests: - additionalProperties: - type: string - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + portworxVolume: + description: PortworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: FSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: VolumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID type: object - type: object - sha: - description: SHA of Thanos container image to be deployed. Defaults - to the value of `version`. Similar to a tag, but the SHA explicitly - deploys an immutable container image. Version and Tag are ignored - if SHA is set. - type: string - tag: - description: Tag of Thanos sidecar container image to be deployed. - Defaults to the value of `version`. Version is ignored if Tag - is set. - type: string - tracingConfig: - description: TracingConfig configures tracing in Thanos. This is - an experimental feature, it may change in any upcoming release - in a breaking way. - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - version: - description: Version describes the version of Thanos to use. - type: string - type: object - tolerations: - description: If specified, the pod's tolerations. - items: - description: The pod this Toleration is attached to tolerates any - taint that matches the triple using the matching - operator . - properties: - effect: - description: Effect indicates the taint effect to match. Empty - means match all taint effects. When specified, allowed values - are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies - to. Empty means match all taint keys. If the key is empty, operator - must be Exists; this combination means to match all values and - all keys. - type: string - operator: - description: Operator represents a key's relationship to the value. - Valid operators are Exists and Equal. Defaults to Equal. Exists - is equivalent to wildcard for value, so that a pod can tolerate - all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time the - toleration (which must be of effect NoExecute, otherwise this - field is ignored) tolerates the taint. By default, it is not - set, which means tolerate the taint forever (do not evict). - Zero and negative values will be treated as 0 (evict immediately) - by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches to. - If the operator is Exists, the value should be empty, otherwise - just a regular string. - type: string - type: object - type: array - version: - description: Version of Prometheus to be deployed. - type: string - volumeMounts: - description: VolumeMounts allows configuration of additional VolumeMounts - on the output StatefulSet definition. VolumeMounts specified will - be appended to other VolumeMounts in the prometheus container, that - are generated as a result of StorageSpec objects. - items: - description: VolumeMount describes a mounting of a Volume within a - container. - properties: - mountPath: - description: Path within the container at which the volume should - be mounted. Must not contain ':'. - type: string - mountPropagation: - description: mountPropagation determines how mounts are propagated - from the host to container and the other way around. When not - set, MountPropagationNone is used. This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: Mounted read-only if true, read-write otherwise (false - or unspecified). Defaults to false. - type: boolean - subPath: - description: Path within the volume from which the container's - volume should be mounted. Defaults to "" (volume's root). - type: string - subPathExpr: - description: Expanded path within the volume from which the container's - volume should be mounted. Behaves similarly to SubPath but environment - variable references $(VAR_NAME) are expanded using the container's - environment. Defaults to "" (volume's root). SubPathExpr and - SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array - volumes: - description: Volumes allows configuration of additional volumes on the - output StatefulSet definition. Volumes specified will be appended - to other volumes that are generated as a result of StorageSpec objects. - items: - description: Volume represents a named volume in a pod that may be - accessed by any container in the pod. - properties: - awsElasticBlockStore: - description: 'AWSElasticBlockStore represents an AWS Disk resource - that is attached to a kubelet''s host machine and then exposed - to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' - properties: - fsType: - description: 'Filesystem type of the volume that you want - to mount. Tip: Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", "xfs", "ntfs". - Implicitly inferred to be "ext4" if unspecified. More info: - https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - TODO: how do we prevent errors in the filesystem from compromising - the machine' - type: string - partition: - description: 'The partition in the volume that you want to - mount. If omitted, the default is to mount by volume name. - Examples: For volume /dev/sda1, you specify the partition - as "1". Similarly, the volume partition for /dev/sda is - "0" (or you can leave the property empty).' - format: int32 - type: integer - readOnly: - description: 'Specify "true" to force and set the ReadOnly - property in VolumeMounts to "true". If omitted, the default - is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' - type: boolean - volumeID: - description: 'Unique ID of the persistent disk resource in - AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' - type: string - required: - - volumeID - type: object - azureDisk: - description: AzureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. - properties: - cachingMode: - description: 'Host Caching mode: None, Read Only, Read Write.' - type: string - diskName: - description: The Name of the data disk in the blob storage - type: string - diskURI: - description: The URI the data disk in the blob storage - type: string - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - kind: - description: 'Expected values Shared: multiple blob disks - per storage account Dedicated: single blob disk per storage - account Managed: azure managed data disk (only in managed - availability set). defaults to shared' - type: string - readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. - type: boolean - required: - - diskName - - diskURI - type: object - azureFile: - description: AzureFile represents an Azure File Service mount - on the host and bind mount to the pod. - properties: - readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. - type: boolean - secretName: - description: the name of secret that contains Azure Storage - Account Name and Key - type: string - shareName: - description: Share Name - type: string - required: - - secretName - - shareName - type: object - cephfs: - description: CephFS represents a Ceph FS mount on the host that - shares a pod's lifetime - properties: - monitors: - description: 'Required: Monitors is a collection of Ceph monitors - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - items: + projected: + description: Items for all in one resources secrets, configmaps, + and downward API + properties: + defaultMode: + description: Mode bits to use on created files by default. + Must be a value between 0 and 0777. Directories within the + path are not affected by this setting. This might be in + conflict with other options that affect the file mode, like + fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: list of volume projections + items: + description: Projection that may be projected along with + other supported volume types + properties: + configMap: + description: information about the configMap data to + project + properties: + items: + description: If unspecified, each key-value pair + in the Data field of the referenced ConfigMap + will be projected into the volume as a file whose + name is the key and content is the value. If specified, + the listed keys will be projected into the specified + paths, and unlisted keys will not be present. + If a key is specified which is not present in + the ConfigMap, the volume setup will error unless + it is marked optional. Paths must be relative + and may not contain the '..' path or start with + '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on + this file, must be a value between 0 and + 0777. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can be + other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + keys must be defined + type: boolean + type: object + downwardAPI: + description: information about the downwardAPI data + to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name + and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits to use on + this file, must be a value between 0 and + 0777. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can be + other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + description: Specifies the output format + of the exposed resources, defaults to + "1" + type: string + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: information about the secret data to project + properties: + items: + description: If unspecified, each key-value pair + in the Data field of the referenced Secret will + be projected into the volume as a file whose name + is the key and content is the value. If specified, + the listed keys will be projected into the specified + paths, and unlisted keys will not be present. + If a key is specified which is not present in + the Secret, the volume setup will error unless + it is marked optional. Paths must be relative + and may not contain the '..' path or start with + '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on + this file, must be a value between 0 and + 0777. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can be + other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + type: object + serviceAccountToken: + description: information about the serviceAccountToken + data to project + properties: + audience: + description: Audience is the intended audience of + the token. A recipient of a token must identify + itself with an identifier specified in the audience + of the token, and otherwise should reject the + token. The audience defaults to the identifier + of the apiserver. + type: string + expirationSeconds: + description: ExpirationSeconds is the requested + duration of validity of the service account token. + As the token approaches expiration, the kubelet + volume plugin will proactively rotate the service + account token. The kubelet will start trying to + rotate the token if the token is older than 80 + percent of its time to live or if the token is + older than 24 hours.Defaults to 1 hour and must + be at least 10 minutes. + format: int64 + type: integer + path: + description: Path is the path relative to the mount + point of the file to project the token into. + type: string + required: + - path + type: object + type: object + type: array + required: + - sources + type: object + quobyte: + description: Quobyte represents a Quobyte mount on the host that + shares a pod's lifetime + properties: + group: + description: Group to map volume access to Default is no group type: string - type: array - path: - description: 'Optional: Used as the mounted root, rather than - the full Ceph tree, default is /' - type: string - readOnly: - description: 'Optional: Defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. More - info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - type: boolean - secretFile: - description: 'Optional: SecretFile is the path to key ring - for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - type: string - secretRef: - description: 'Optional: SecretRef is reference to the authentication - secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - user: - description: 'Optional: User is the rados user name, default - is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - type: string - required: - - monitors - type: object - cinder: - description: 'Cinder represents a cinder volume attached and mounted - on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' - properties: - fsType: - description: 'Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Examples: "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md' - type: string - readOnly: - description: 'Optional: Defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. More - info: https://examples.k8s.io/mysql-cinder-pd/README.md' - type: boolean - secretRef: - description: 'Optional: points to a secret object containing - parameters used to connect to OpenStack.' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + readOnly: + description: ReadOnly here will force the Quobyte volume to + be mounted with read-only permissions. Defaults to false. + type: boolean + registry: + description: Registry represents a single or multiple Quobyte + Registry services specified as a string as host:port pair + (multiple entries are separated with commas) which acts + as the central registry for volumes + type: string + tenant: + description: Tenant owning the given Quobyte volume in the + Backend Used with dynamically provisioned Quobyte volumes, + value is set by the plugin + type: string + user: + description: User to map volume access to Defaults to serivceaccount + user + type: string + volume: + description: Volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'RBD represents a Rados Block Device mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from compromising + the machine' + type: string + image: + description: 'The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'Keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: type: string - type: object - volumeID: - description: 'volume id used to identify the volume in cinder. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md' - type: string - required: - - volumeID - type: object - configMap: - description: ConfigMap represents a configMap that should populate - this volume - properties: - defaultMode: - description: 'Optional: mode bits to use on created files - by default. Must be a value between 0 and 0777. Defaults - to 0644. Directories within the path are not affected by - this setting. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - items: - description: If unspecified, each key-value pair in the Data - field of the referenced ConfigMap will be projected into - the volume as a file whose name is the key and content is - the value. If specified, the listed keys will be projected - into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the - ConfigMap, the volume setup will error unless it is marked - optional. Paths must be relative and may not contain the - '..' path or start with '..'. - items: - description: Maps a string key to a path within a volume. + type: array + pool: + description: 'The rados pool name. Default is rbd. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'SecretRef is name of the authentication secret + for RBDUser. If provided overrides keyring. Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' properties: - key: - description: The key to project. - type: string - mode: - description: 'Optional: mode bits to use on this file, - must be a value between 0 and 0777. If not specified, - the volume defaultMode will be used. This might be - in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode - bits set.' - format: int32 - type: integer - path: - description: The relative path of the file to map the - key to. May not be an absolute path. May not contain - the path element '..'. May not start with the string - '..'. + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string - required: - - key - - path type: object - type: array - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the ConfigMap or its keys must - be defined - type: boolean - type: object - csi: - description: CSI (Container Storage Interface) represents storage - that is handled by an external CSI driver (Alpha feature). - properties: - driver: - description: Driver is the name of the CSI driver that handles - this volume. Consult with your admin for the correct name - as registered in the cluster. - type: string - fsType: - description: Filesystem type to mount. Ex. "ext4", "xfs", - "ntfs". If not provided, the empty value is passed to the - associated CSI driver which will determine the default filesystem - to apply. - type: string - nodePublishSecretRef: - description: NodePublishSecretRef is a reference to the secret - object containing sensitive information to pass to the CSI - driver to complete the CSI NodePublishVolume and NodeUnpublishVolume - calls. This field is optional, and may be empty if no secret - is required. If the secret object contains more than one - secret, all secret references are passed. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - readOnly: - description: Specifies a read-only configuration for the volume. - Defaults to false (read/write). - type: boolean - volumeAttributes: - additionalProperties: + user: + description: 'The rados user name. Default is admin. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' type: string - description: VolumeAttributes stores driver-specific properties - that are passed to the CSI driver. Consult your driver's - documentation for supported values. - type: object - required: - - driver - type: object - downwardAPI: - description: DownwardAPI represents downward API about the pod - that should populate this volume - properties: - defaultMode: - description: 'Optional: mode bits to use on created files - by default. Must be a value between 0 and 0777. Defaults - to 0644. Directories within the path are not affected by - this setting. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - items: - description: Items is a list of downward API volume file - items: - description: DownwardAPIVolumeFile represents information - to create the file containing the pod field + required: + - image + - monitors + type: object + scaleIO: + description: ScaleIO represents a ScaleIO persistent volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: The host address of the ScaleIO API Gateway. + type: string + protectionDomain: + description: The name of the ScaleIO Protection Domain for + the configured storage. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef references to the secret for ScaleIO + user and other sensitive information. If this is not provided, + Login operation will fail. properties: - fieldRef: - description: 'Required: Selects a field of the pod: - only annotations, labels, name and namespace are supported.' - properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the - specified API version. - type: string - required: - - fieldPath - type: object - mode: - description: 'Optional: mode bits to use on this file, - must be a value between 0 and 0777. If not specified, - the volume defaultMode will be used. This might be - in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode - bits set.' - format: int32 - type: integer - path: - description: 'Required: Path is the relative path name - of the file to be created. Must not be absolute or - contain the ''..'' path. Must be utf-8 encoded. The - first item of the relative path must not start with - ''..''' + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string - resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - requests.cpu and requests.memory) are currently supported.' - properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' - type: string - divisor: - description: Specifies the output format of the - exposed resources, defaults to "1" - type: string - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - required: - - path type: object - type: array - type: object - emptyDir: - description: 'EmptyDir represents a temporary directory that shares - a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' - properties: - medium: - description: 'What type of storage medium should back this - directory. The default is "" which means to use the node''s - default medium. Must be an empty string (default) or Memory. - More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' - type: string - sizeLimit: - description: 'Total amount of local storage required for this - EmptyDir volume. The size limit is also applicable for memory - medium. The maximum usage on memory medium EmptyDir would - be the minimum value between the SizeLimit specified here - and the sum of memory limits of all containers in a pod. - The default is nil which means that the limit is undefined. - More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' - type: string - type: object - fc: - description: FC represents a Fibre Channel resource that is attached - to a kubelet's host machine and then exposed to the pod. - properties: - fsType: - description: 'Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - TODO: how do we prevent errors in the filesystem from compromising - the machine' - type: string - lun: - description: 'Optional: FC target lun number' - format: int32 - type: integer - readOnly: - description: 'Optional: Defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts.' - type: boolean - targetWWNs: - description: 'Optional: FC target worldwide names (WWNs)' - items: + sslEnabled: + description: Flag to enable/disable SSL communication with + Gateway, default false + type: boolean + storageMode: + description: Indicates whether the storage for a volume should + be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. type: string - type: array - wwids: - description: 'Optional: FC volume world wide identifiers (wwids) - Either wwids or combination of targetWWNs and lun must be - set, but not both simultaneously.' - items: + storagePool: + description: The ScaleIO Storage Pool associated with the + protection domain. type: string - type: array - type: object - flexVolume: - description: FlexVolume represents a generic volume resource that - is provisioned/attached using an exec based plugin. - properties: - driver: - description: Driver is the name of the driver to use for this - volume. - type: string - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". The default filesystem depends on FlexVolume - script. - type: string - options: - additionalProperties: + system: + description: The name of the storage system as configured + in ScaleIO. type: string - description: 'Optional: Extra command options if any.' - type: object - readOnly: - description: 'Optional: Defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts.' - type: boolean - secretRef: - description: 'Optional: SecretRef is reference to the secret - object containing sensitive information to pass to the plugin - scripts. This may be empty if no secret object is specified. - If the secret object contains more than one secret, all - secrets are passed to the plugin scripts.' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - required: - - driver - type: object - flocker: - description: Flocker represents a Flocker volume attached to a - kubelet's host machine. This depends on the Flocker control - service being running - properties: - datasetName: - description: Name of the dataset stored as metadata -> name - on the dataset for Flocker should be considered as deprecated - type: string - datasetUUID: - description: UUID of the dataset. This is unique identifier - of a Flocker dataset - type: string - type: object - gcePersistentDisk: - description: 'GCEPersistentDisk represents a GCE Disk resource - that is attached to a kubelet''s host machine and then exposed - to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - properties: - fsType: - description: 'Filesystem type of the volume that you want - to mount. Tip: Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", "xfs", "ntfs". - Implicitly inferred to be "ext4" if unspecified. More info: - https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - TODO: how do we prevent errors in the filesystem from compromising - the machine' - type: string - partition: - description: 'The partition in the volume that you want to - mount. If omitted, the default is to mount by volume name. - Examples: For volume /dev/sda1, you specify the partition - as "1". Similarly, the volume partition for /dev/sda is - "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - format: int32 - type: integer - pdName: - description: 'Unique name of the PD resource in GCE. Used - to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - type: string - readOnly: - description: 'ReadOnly here will force the ReadOnly setting - in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - type: boolean - required: - - pdName - type: object - gitRepo: - description: 'GitRepo represents a git repository at a particular - revision. DEPRECATED: GitRepo is deprecated. To provision a - container with a git repo, mount an EmptyDir into an InitContainer - that clones the repo using git, then mount the EmptyDir into - the Pod''s container.' - properties: - directory: - description: Target directory name. Must not contain or start - with '..'. If '.' is supplied, the volume directory will - be the git repository. Otherwise, if specified, the volume - will contain the git repository in the subdirectory with - the given name. - type: string - repository: - description: Repository URL - type: string - revision: - description: Commit hash for the specified revision. - type: string - required: - - repository - type: object - glusterfs: - description: 'Glusterfs represents a Glusterfs mount on the host - that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' - properties: - endpoints: - description: 'EndpointsName is the endpoint name that details - Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' - type: string - path: - description: 'Path is the Glusterfs volume path. More info: - https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' - type: string - readOnly: - description: 'ReadOnly here will force the Glusterfs volume - to be mounted with read-only permissions. Defaults to false. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' - type: boolean - required: - - endpoints - - path - type: object - hostPath: - description: 'HostPath represents a pre-existing file or directory - on the host machine that is directly exposed to the container. - This is generally used for system agents or other privileged - things that are allowed to see the host machine. Most containers - will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - --- TODO(jonesdl) We need to restrict who can use host directory - mounts and who can/can not mount host directories as read/write.' - properties: - path: - description: 'Path of the directory on the host. If the path - is a symlink, it will follow the link to the real path. - More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' - type: string - type: - description: 'Type for HostPath Volume Defaults to "" More - info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' - type: string - required: - - path - type: object - iscsi: - description: 'ISCSI represents an ISCSI Disk resource that is - attached to a kubelet''s host machine and then exposed to the - pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' - properties: - chapAuthDiscovery: - description: whether support iSCSI Discovery CHAP authentication - type: boolean - chapAuthSession: - description: whether support iSCSI Session CHAP authentication - type: boolean - fsType: - description: 'Filesystem type of the volume that you want - to mount. Tip: Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", "xfs", "ntfs". - Implicitly inferred to be "ext4" if unspecified. More info: - https://kubernetes.io/docs/concepts/storage/volumes#iscsi - TODO: how do we prevent errors in the filesystem from compromising - the machine' - type: string - initiatorName: - description: Custom iSCSI Initiator Name. If initiatorName - is specified with iscsiInterface simultaneously, new iSCSI - interface : will be created - for the connection. - type: string - iqn: - description: Target iSCSI Qualified Name. - type: string - iscsiInterface: - description: iSCSI Interface Name that uses an iSCSI transport. - Defaults to 'default' (tcp). - type: string - lun: - description: iSCSI Target Lun number. - format: int32 - type: integer - portals: - description: iSCSI Target Portal List. The portal is either - an IP or ip_addr:port if the port is other than default - (typically TCP ports 860 and 3260). - items: + volumeName: + description: The name of a volume already created in the ScaleIO + system that is associated with this volume source. type: string - type: array - readOnly: - description: ReadOnly here will force the ReadOnly setting - in VolumeMounts. Defaults to false. - type: boolean - secretRef: - description: CHAP Secret for iSCSI target and initiator authentication - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - targetPortal: - description: iSCSI Target Portal. The Portal is either an - IP or ip_addr:port if the port is other than default (typically - TCP ports 860 and 3260). - type: string - required: - - iqn - - lun - - targetPortal - type: object - name: - description: 'Volume''s name. Must be a DNS_LABEL and unique within - the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - nfs: - description: 'NFS represents an NFS mount on the host that shares - a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - properties: - path: - description: 'Path that is exported by the NFS server. More - info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - type: string - readOnly: - description: 'ReadOnly here will force the NFS export to be - mounted with read-only permissions. Defaults to false. More - info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - type: boolean - server: - description: 'Server is the hostname or IP address of the - NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - type: string - required: - - path - - server - type: object - persistentVolumeClaim: - description: 'PersistentVolumeClaimVolumeSource represents a reference - to a PersistentVolumeClaim in the same namespace. More info: - https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - claimName: - description: 'ClaimName is the name of a PersistentVolumeClaim - in the same namespace as the pod using this volume. More - info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - type: string - readOnly: - description: Will force the ReadOnly setting in VolumeMounts. - Default false. - type: boolean - required: - - claimName - type: object - photonPersistentDisk: - description: PhotonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine - properties: - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - pdID: - description: ID that identifies Photon Controller persistent - disk - type: string - required: - - pdID - type: object - portworxVolume: - description: PortworxVolume represents a portworx volume attached - and mounted on kubelets host machine - properties: - fsType: - description: FSType represents the filesystem type to mount - Must be a filesystem type supported by the host operating - system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" - if unspecified. - type: string - readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. - type: boolean - volumeID: - description: VolumeID uniquely identifies a Portworx volume - type: string - required: - - volumeID - type: object - projected: - description: Items for all in one resources secrets, configmaps, - and downward API - properties: - defaultMode: - description: Mode bits to use on created files by default. - Must be a value between 0 and 0777. Directories within the - path are not affected by this setting. This might be in - conflict with other options that affect the file mode, like - fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - sources: - description: list of volume projections - items: - description: Projection that may be projected along with - other supported volume types - properties: - configMap: - description: information about the configMap data to - project - properties: - items: - description: If unspecified, each key-value pair - in the Data field of the referenced ConfigMap - will be projected into the volume as a file whose - name is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. - If a key is specified which is not present in - the ConfigMap, the volume setup will error unless - it is marked optional. Paths must be relative - and may not contain the '..' path or start with - '..'. - items: - description: Maps a string key to a path within - a volume. - properties: - key: - description: The key to project. - type: string - mode: - description: 'Optional: mode bits to use on - this file, must be a value between 0 and - 0777. If not specified, the volume defaultMode - will be used. This might be in conflict - with other options that affect the file - mode, like fsGroup, and the result can be - other mode bits set.' - format: int32 - type: integer - path: - description: The relative path of the file - to map the key to. May not be an absolute - path. May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the ConfigMap or its - keys must be defined - type: boolean - type: object - downwardAPI: - description: information about the downwardAPI data - to project - properties: - items: - description: Items is a list of DownwardAPIVolume - file - items: - description: DownwardAPIVolumeFile represents - information to create the file containing the - pod field - properties: - fieldRef: - description: 'Required: Selects a field of - the pod: only annotations, labels, name - and namespace are supported.' - properties: - apiVersion: - description: Version of the schema the - FieldPath is written in terms of, defaults - to "v1". - type: string - fieldPath: - description: Path of the field to select - in the specified API version. - type: string - required: - - fieldPath - type: object - mode: - description: 'Optional: mode bits to use on - this file, must be a value between 0 and - 0777. If not specified, the volume defaultMode - will be used. This might be in conflict - with other options that affect the file - mode, like fsGroup, and the result can be - other mode bits set.' - format: int32 - type: integer - path: - description: 'Required: Path is the relative - path name of the file to be created. Must - not be absolute or contain the ''..'' path. - Must be utf-8 encoded. The first item of - the relative path must not start with ''..''' - type: string - resourceFieldRef: - description: 'Selects a resource of the container: - only resources limits and requests (limits.cpu, - limits.memory, requests.cpu and requests.memory) - are currently supported.' - properties: - containerName: - description: 'Container name: required - for volumes, optional for env vars' - type: string - divisor: - description: Specifies the output format - of the exposed resources, defaults to - "1" - type: string - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - required: - - path - type: object - type: array - type: object - secret: - description: information about the secret data to project - properties: - items: - description: If unspecified, each key-value pair - in the Data field of the referenced Secret will - be projected into the volume as a file whose name - is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. - If a key is specified which is not present in - the Secret, the volume setup will error unless - it is marked optional. Paths must be relative - and may not contain the '..' path or start with - '..'. - items: - description: Maps a string key to a path within - a volume. - properties: - key: - description: The key to project. - type: string - mode: - description: 'Optional: mode bits to use on - this file, must be a value between 0 and - 0777. If not specified, the volume defaultMode - will be used. This might be in conflict - with other options that affect the file - mode, like fsGroup, and the result can be - other mode bits set.' - format: int32 - type: integer - path: - description: The relative path of the file - to map the key to. May not be an absolute - path. May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - type: object - serviceAccountToken: - description: information about the serviceAccountToken - data to project - properties: - audience: - description: Audience is the intended audience of - the token. A recipient of a token must identify - itself with an identifier specified in the audience - of the token, and otherwise should reject the - token. The audience defaults to the identifier - of the apiserver. - type: string - expirationSeconds: - description: ExpirationSeconds is the requested - duration of validity of the service account token. - As the token approaches expiration, the kubelet - volume plugin will proactively rotate the service - account token. The kubelet will start trying to - rotate the token if the token is older than 80 - percent of its time to live or if the token is - older than 24 hours.Defaults to 1 hour and must - be at least 10 minutes. - format: int64 - type: integer - path: - description: Path is the path relative to the mount - point of the file to project the token into. - type: string - required: - - path - type: object - type: object - type: array - required: - - sources - type: object - quobyte: - description: Quobyte represents a Quobyte mount on the host that - shares a pod's lifetime - properties: - group: - description: Group to map volume access to Default is no group - type: string - readOnly: - description: ReadOnly here will force the Quobyte volume to - be mounted with read-only permissions. Defaults to false. - type: boolean - registry: - description: Registry represents a single or multiple Quobyte - Registry services specified as a string as host:port pair - (multiple entries are separated with commas) which acts - as the central registry for volumes - type: string - tenant: - description: Tenant owning the given Quobyte volume in the - Backend Used with dynamically provisioned Quobyte volumes, - value is set by the plugin - type: string - user: - description: User to map volume access to Defaults to serivceaccount - user - type: string - volume: - description: Volume is a string that references an already - created Quobyte volume by name. - type: string - required: - - registry - - volume - type: object - rbd: - description: 'RBD represents a Rados Block Device mount on the - host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' - properties: - fsType: - description: 'Filesystem type of the volume that you want - to mount. Tip: Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", "xfs", "ntfs". - Implicitly inferred to be "ext4" if unspecified. More info: - https://kubernetes.io/docs/concepts/storage/volumes#rbd - TODO: how do we prevent errors in the filesystem from compromising - the machine' - type: string - image: - description: 'The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - keyring: - description: 'Keyring is the path to key ring for RBDUser. - Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - monitors: - description: 'A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'Secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer items: + description: If unspecified, each key-value pair in the Data + field of the referenced Secret will be projected into the + volume as a file whose name is the key and content is the + value. If specified, the listed keys will be projected into + the specified paths, and unlisted keys will not be present. + If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. + Paths must be relative and may not contain the '..' path + or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might be + in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode + bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map the + key to. May not be an absolute path. May not contain + the path element '..'. May not start with the string + '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: Specify whether the Secret or its keys must be + defined + type: boolean + secretName: + description: 'Name of the secret in the pod''s namespace to + use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' type: string - type: array - pool: - description: 'The rados pool name. Default is rbd. More info: - https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - readOnly: - description: 'ReadOnly here will force the ReadOnly setting - in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: boolean - secretRef: - description: 'SecretRef is name of the authentication secret - for RBDUser. If provided overrides keyring. Default is nil. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - user: - description: 'The rados user name. Default is admin. More - info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - required: - - image - - monitors - type: object - scaleIO: - description: ScaleIO represents a ScaleIO persistent volume attached - and mounted on Kubernetes nodes. - properties: - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Default is "xfs". - type: string - gateway: - description: The host address of the ScaleIO API Gateway. - type: string - protectionDomain: - description: The name of the ScaleIO Protection Domain for - the configured storage. - type: string - readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: SecretRef references to the secret for ScaleIO - user and other sensitive information. If this is not provided, - Login operation will fail. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - sslEnabled: - description: Flag to enable/disable SSL communication with - Gateway, default false - type: boolean - storageMode: - description: Indicates whether the storage for a volume should - be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. - type: string - storagePool: - description: The ScaleIO Storage Pool associated with the - protection domain. - type: string - system: - description: The name of the storage system as configured - in ScaleIO. - type: string - volumeName: - description: The name of a volume already created in the ScaleIO - system that is associated with this volume source. - type: string - required: - - gateway - - secretRef - - system - type: object - secret: - description: 'Secret represents a secret that should populate - this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' - properties: - defaultMode: - description: 'Optional: mode bits to use on created files - by default. Must be a value between 0 and 0777. Defaults - to 0644. Directories within the path are not affected by - this setting. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - items: - description: If unspecified, each key-value pair in the Data - field of the referenced Secret will be projected into the - volume as a file whose name is the key and content is the - value. If specified, the listed keys will be projected into - the specified paths, and unlisted keys will not be present. - If a key is specified which is not present in the Secret, - the volume setup will error unless it is marked optional. - Paths must be relative and may not contain the '..' path - or start with '..'. - items: - description: Maps a string key to a path within a volume. + type: object + storageos: + description: StorageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef specifies the secret to use for obtaining + the StorageOS API credentials. If not specified, default + values will be attempted. properties: - key: - description: The key to project. - type: string - mode: - description: 'Optional: mode bits to use on this file, - must be a value between 0 and 0777. If not specified, - the volume defaultMode will be used. This might be - in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode - bits set.' - format: int32 - type: integer - path: - description: The relative path of the file to map the - key to. May not be an absolute path. May not contain - the path element '..'. May not start with the string - '..'. + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string - required: - - key - - path type: object - type: array - optional: - description: Specify whether the Secret or its keys must be - defined - type: boolean - secretName: - description: 'Name of the secret in the pod''s namespace to - use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' - type: string - type: object - storageos: - description: StorageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. - properties: - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: SecretRef specifies the secret to use for obtaining - the StorageOS API credentials. If not specified, default - values will be attempted. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - volumeName: - description: VolumeName is the human-readable name of the - StorageOS volume. Volume names are only unique within a - namespace. - type: string - volumeNamespace: - description: VolumeNamespace specifies the scope of the volume - within StorageOS. If no namespace is specified then the - Pod's namespace will be used. This allows the Kubernetes - name scoping to be mirrored within StorageOS for tighter - integration. Set VolumeName to any name to override the - default behaviour. Set to "default" if you are not using - namespaces within StorageOS. Namespaces that do not pre-exist - within StorageOS will be created. - type: string - type: object - vsphereVolume: - description: VsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine - properties: - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - storagePolicyID: - description: Storage Policy Based Management (SPBM) profile - ID associated with the StoragePolicyName. - type: string - storagePolicyName: - description: Storage Policy Based Management (SPBM) profile - name. - type: string - volumePath: - description: Path that identifies vSphere volume vmdk - type: string - required: - - volumePath - type: object - required: - - name - type: object - type: array - walCompression: - description: Enable compression of the write-ahead log using Snappy. - This flag is only available in versions of Prometheus >= 2.11.0. - type: boolean - type: object - status: - description: 'Most recent observed status of the Prometheus cluster. Read-only. - Not included when requesting from the apiserver, only from the Prometheus - Operator API itself. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - availableReplicas: - description: Total number of available pods (ready for at least minReadySeconds) - targeted by this Prometheus deployment. - format: int32 - type: integer - paused: - description: Represents whether any actions on the underlaying managed - objects are being performed. Only delete actions will be performed. - type: boolean - replicas: - description: Total number of non-terminated pods targeted by this Prometheus - deployment (their labels match the selector). - format: int32 - type: integer - unavailableReplicas: - description: Total number of unavailable pods targeted by this Prometheus - deployment. - format: int32 - type: integer - updatedReplicas: - description: Total number of non-terminated pods targeted by this Prometheus - deployment that have the desired version spec. - format: int32 - type: integer - required: - - availableReplicas - - paused - - replicas - - unavailableReplicas - - updatedReplicas - type: object - required: - - spec - type: object - version: v1 - versions: - - name: v1 + volumeName: + description: VolumeName is the human-readable name of the + StorageOS volume. Volume names are only unique within a + namespace. + type: string + volumeNamespace: + description: VolumeNamespace specifies the scope of the volume + within StorageOS. If no namespace is specified then the + Pod's namespace will be used. This allows the Kubernetes + name scoping to be mirrored within StorageOS for tighter + integration. Set VolumeName to any name to override the + default behaviour. Set to "default" if you are not using + namespaces within StorageOS. Namespaces that do not pre-exist + within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: VsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: Storage Policy Based Management (SPBM) profile + ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: Storage Policy Based Management (SPBM) profile + name. + type: string + volumePath: + description: Path that identifies vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + walCompression: + description: Enable compression of the write-ahead log using Snappy. + This flag is only available in versions of Prometheus >= 2.11.0. + type: boolean + type: object + status: + description: 'Most recent observed status of the Prometheus cluster. Read-only. + Not included when requesting from the apiserver, only from the Prometheus + Operator API itself. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + availableReplicas: + description: Total number of available pods (ready for at least minReadySeconds) + targeted by this Prometheus deployment. + format: int32 + type: integer + paused: + description: Represents whether any actions on the underlaying managed + objects are being performed. Only delete actions will be performed. + type: boolean + replicas: + description: Total number of non-terminated pods targeted by this Prometheus + deployment (their labels match the selector). + format: int32 + type: integer + unavailableReplicas: + description: Total number of unavailable pods targeted by this Prometheus + deployment. + format: int32 + type: integer + updatedReplicas: + description: Total number of non-terminated pods targeted by this Prometheus + deployment that have the desired version spec. + format: int32 + type: integer + required: + - availableReplicas + - paused + - replicas + - unavailableReplicas + - updatedReplicas + type: object + required: + - spec + type: object + additionalPrinterColumns: + - jsonPath: .spec.version + description: The version of Prometheus + name: Version + type: string + - jsonPath: .spec.replicas + description: The desired replicas number of Prometheuses + name: Replicas + type: integer + - jsonPath: .metadata.creationTimestamp + name: Age + type: date served: true storage: true + subresources: {} diff --git a/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-prometheusrules.yaml b/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-prometheusrules.yaml index 3f5cb49239..5a059789a3 100755 --- a/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-prometheusrules.yaml +++ b/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-prometheusrules.yaml @@ -1,10 +1,9 @@ # https://raw.githubusercontent.com/coreos/prometheus-operator/release-0.38/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.2.4 - helm.sh/hook: crd-install creationTimestamp: null name: prometheusrules.monitoring.coreos.com spec: @@ -16,77 +15,77 @@ spec: singular: prometheusrule preserveUnknownFields: false scope: Namespaced - validation: - openAPIV3Schema: - description: PrometheusRule defines alerting rules for a Prometheus instance - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Specification of desired alerting rule definitions for Prometheus. - properties: - groups: - description: Content of Prometheus rule file - items: - description: 'RuleGroup is a list of sequentially evaluated recording - and alerting rules. Note: PartialResponseStrategy is only used by - ThanosRuler and will be ignored by Prometheus instances. Valid - values for this field are ''warn'' or ''abort''. More info: https://github.com/thanos-io/thanos/blob/master/docs/components/rule.md#partial-response' - properties: - interval: - type: string - name: - type: string - partial_response_strategy: - type: string - rules: - items: - description: Rule describes an alerting or recording rule. - properties: - alert: - type: string - annotations: - additionalProperties: - type: string - type: object - expr: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - for: - type: string - labels: - additionalProperties: - type: string - type: object - record: - type: string - required: - - expr - type: object - type: array - required: - - name - - rules - type: object - type: array - type: object - required: - - spec - type: object - version: v1 versions: - name: v1 + schema: + openAPIV3Schema: + description: PrometheusRule defines alerting rules for a Prometheus instance + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of desired alerting rule definitions for Prometheus. + properties: + groups: + description: Content of Prometheus rule file + items: + description: 'RuleGroup is a list of sequentially evaluated recording + and alerting rules. Note: PartialResponseStrategy is only used by + ThanosRuler and will be ignored by Prometheus instances. Valid + values for this field are ''warn'' or ''abort''. More info: https://github.com/thanos-io/thanos/blob/master/docs/components/rule.md#partial-response' + properties: + interval: + type: string + name: + type: string + partial_response_strategy: + type: string + rules: + items: + description: Rule describes an alerting or recording rule. + properties: + alert: + type: string + annotations: + additionalProperties: + type: string + type: object + expr: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + for: + type: string + labels: + additionalProperties: + type: string + type: object + record: + type: string + required: + - expr + type: object + type: array + required: + - name + - rules + type: object + type: array + type: object + required: + - spec + type: object served: true storage: true + diff --git a/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-servicemonitor.yaml b/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-servicemonitor.yaml index e631c2c090..2177a97072 100755 --- a/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-servicemonitor.yaml +++ b/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-servicemonitor.yaml @@ -1,10 +1,9 @@ # https://raw.githubusercontent.com/coreos/prometheus-operator/release-0.38/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.2.4 - helm.sh/hook: crd-install creationTimestamp: null name: servicemonitors.monitoring.coreos.com spec: @@ -16,445 +15,445 @@ spec: singular: servicemonitor preserveUnknownFields: false scope: Namespaced - validation: - openAPIV3Schema: - description: ServiceMonitor defines monitoring for a set of services. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Specification of desired Service selection for target discovery - by Prometheus. - properties: - endpoints: - description: A list of endpoints allowed as part of this ServiceMonitor. - items: - description: Endpoint defines a scrapeable endpoint serving Prometheus - metrics. - properties: - basicAuth: - description: 'BasicAuth allow an endpoint to authenticate over - basic authentication More info: https://prometheus.io/docs/operating/configuration/#endpoints' - properties: - password: - description: The secret in the service monitor namespace that - contains the password for authentication. + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ServiceMonitor defines monitoring for a set of services. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of desired Service selection for target discovery + by Prometheus. + properties: + endpoints: + description: A list of endpoints allowed as part of this ServiceMonitor. + items: + description: Endpoint defines a scrapeable endpoint serving Prometheus + metrics. + properties: + basicAuth: + description: 'BasicAuth allow an endpoint to authenticate over + basic authentication More info: https://prometheus.io/docs/operating/configuration/#endpoints' + properties: + password: + description: The secret in the service monitor namespace that + contains the password for authentication. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + username: + description: The secret in the service monitor namespace that + contains the username for authentication. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + bearerTokenFile: + description: File to read bearer token for scraping targets. + type: string + bearerTokenSecret: + description: Secret to mount to read bearer token for scraping + targets. The secret needs to be in the same namespace as the + service monitor and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + honorLabels: + description: HonorLabels chooses the metric's labels on collisions + with target labels. + type: boolean + honorTimestamps: + description: HonorTimestamps controls whether Prometheus respects + the timestamps present in scraped data. + type: boolean + interval: + description: Interval at which metrics should be scraped + type: string + metricRelabelings: + description: MetricRelabelConfigs to apply to samples before ingestion. + items: + description: 'RelabelConfig allows dynamic rewriting of the + label set, being applied to samples before ingestion. It defines + ``-section of Prometheus configuration. + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. + action: + description: Action to perform based on regex matching. + Default is 'replace' + type: string + modulus: + description: Modulus to take of the hash of the source label + values. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. Default is '(.*)' + type: string + replacement: + description: Replacement value against which a regex replace + is performed if the regular expression matches. Regex + capture groups are available. Default is '$1' type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + separator: + description: Separator placed between concatenated source + label values. default is ';'. + type: string + sourceLabels: + description: The source labels select values from existing + labels. Their content is concatenated using the configured + separator and matched against the configured regular expression + for the replace, keep, and drop actions. + items: + type: string + type: array + targetLabel: + description: Label to which the resulting value is written + in a replace action. It is mandatory for replace actions. + Regex capture groups are available. type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key type: object - username: - description: The secret in the service monitor namespace that - contains the username for authentication. + type: array + params: + additionalProperties: + items: + type: string + type: array + description: Optional HTTP URL parameters + type: object + path: + description: HTTP path to scrape for metrics. + type: string + port: + description: Name of the service port this endpoint refers to. + Mutually exclusive with targetPort. + type: string + proxyUrl: + description: ProxyURL eg http://proxyserver:2195 Directs scrapes + to proxy through this endpoint. + type: string + relabelings: + description: 'RelabelConfigs to apply to samples before scraping. + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' + items: + description: 'RelabelConfig allows dynamic rewriting of the + label set, being applied to samples before ingestion. It defines + ``-section of Prometheus configuration. + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. + action: + description: Action to perform based on regex matching. + Default is 'replace' + type: string + modulus: + description: Modulus to take of the hash of the source label + values. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. Default is '(.*)' type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + replacement: + description: Replacement value against which a regex replace + is performed if the regular expression matches. Regex + capture groups are available. Default is '$1' + type: string + separator: + description: Separator placed between concatenated source + label values. default is ';'. + type: string + sourceLabels: + description: The source labels select values from existing + labels. Their content is concatenated using the configured + separator and matched against the configured regular expression + for the replace, keep, and drop actions. + items: + type: string + type: array + targetLabel: + description: Label to which the resulting value is written + in a replace action. It is mandatory for replace actions. + Regex capture groups are available. type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key type: object - type: object - bearerTokenFile: - description: File to read bearer token for scraping targets. - type: string - bearerTokenSecret: - description: Secret to mount to read bearer token for scraping - targets. The secret needs to be in the same namespace as the - service monitor and accessible by the Prometheus Operator. - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be - defined - type: boolean - required: - - key - type: object - honorLabels: - description: HonorLabels chooses the metric's labels on collisions - with target labels. - type: boolean - honorTimestamps: - description: HonorTimestamps controls whether Prometheus respects - the timestamps present in scraped data. - type: boolean - interval: - description: Interval at which metrics should be scraped - type: string - metricRelabelings: - description: MetricRelabelConfigs to apply to samples before ingestion. - items: - description: 'RelabelConfig allows dynamic rewriting of the - label set, being applied to samples before ingestion. It defines - ``-section of Prometheus configuration. - More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' + type: array + scheme: + description: HTTP scheme to use for scraping. + type: string + scrapeTimeout: + description: Timeout after which the scrape is ended + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: Name or number of the pod port this endpoint refers + to. Mutually exclusive with port. + x-kubernetes-int-or-string: true + tlsConfig: + description: TLS configuration to use when scraping the endpoint properties: - action: - description: Action to perform based on regex matching. - Default is 'replace' + ca: + description: Stuct containing the CA cert to use for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + caFile: + description: Path to the CA cert in the Prometheus container + to use for the targets. type: string - modulus: - description: Modulus to take of the hash of the source label - values. - format: int64 - type: integer - regex: - description: Regular expression against which the extracted - value is matched. Default is '(.*)' + cert: + description: Struct containing the client cert file for the + targets. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + certFile: + description: Path to the client cert file in the Prometheus + container for the targets. type: string - replacement: - description: Replacement value against which a regex replace - is performed if the regular expression matches. Regex - capture groups are available. Default is '$1' + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keyFile: + description: Path to the client key file in the Prometheus + container for the targets. type: string - separator: - description: Separator placed between concatenated source - label values. default is ';'. - type: string - sourceLabels: - description: The source labels select values from existing - labels. Their content is concatenated using the configured - separator and matched against the configured regular expression - for the replace, keep, and drop actions. - items: - type: string - type: array - targetLabel: - description: Label to which the resulting value is written - in a replace action. It is mandatory for replace actions. - Regex capture groups are available. + keySecret: + description: Secret containing the client key file for the + targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. type: string type: object + type: object + type: array + jobLabel: + description: The label to use to retrieve the job name from. + type: string + namespaceSelector: + description: Selector to select which namespaces the Endpoints objects + are discovered from. + properties: + any: + description: Boolean describing whether all namespaces are selected + in contrast to a list restricting them. + type: boolean + matchNames: + description: List of namespace names. + items: + type: string type: array - params: - additionalProperties: - items: - type: string - type: array - description: Optional HTTP URL parameters - type: object - path: - description: HTTP path to scrape for metrics. - type: string - port: - description: Name of the service port this endpoint refers to. - Mutually exclusive with targetPort. - type: string - proxyUrl: - description: ProxyURL eg http://proxyserver:2195 Directs scrapes - to proxy through this endpoint. - type: string - relabelings: - description: 'RelabelConfigs to apply to samples before scraping. - More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' + type: object + podTargetLabels: + description: PodTargetLabels transfers labels on the Kubernetes Pod + onto the target. + items: + type: string + type: array + sampleLimit: + description: SampleLimit defines per-scrape limit on number of scraped + samples that will be accepted. + format: int64 + type: integer + selector: + description: Selector to select Endpoints objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. items: - description: 'RelabelConfig allows dynamic rewriting of the - label set, being applied to samples before ingestion. It defines - ``-section of Prometheus configuration. - More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' + description: A label selector requirement is a selector that contains + values, a key, and an operator that relates the key and values. properties: - action: - description: Action to perform based on regex matching. - Default is 'replace' - type: string - modulus: - description: Modulus to take of the hash of the source label - values. - format: int64 - type: integer - regex: - description: Regular expression against which the extracted - value is matched. Default is '(.*)' + key: + description: key is the label key that the selector applies + to. type: string - replacement: - description: Replacement value against which a regex replace - is performed if the regular expression matches. Regex - capture groups are available. Default is '$1' + operator: + description: operator represents a key's relationship to a + set of values. Valid operators are In, NotIn, Exists and + DoesNotExist. type: string - separator: - description: Separator placed between concatenated source - label values. default is ';'. - type: string - sourceLabels: - description: The source labels select values from existing - labels. Their content is concatenated using the configured - separator and matched against the configured regular expression - for the replace, keep, and drop actions. + values: + description: values is an array of string values. If the operator + is In or NotIn, the values array must be non-empty. If the + operator is Exists or DoesNotExist, the values array must + be empty. This array is replaced during a strategic merge + patch. items: type: string type: array - targetLabel: - description: Label to which the resulting value is written - in a replace action. It is mandatory for replace actions. - Regex capture groups are available. - type: string + required: + - key + - operator type: object type: array - scheme: - description: HTTP scheme to use for scraping. - type: string - scrapeTimeout: - description: Timeout after which the scrape is ended - type: string - targetPort: - anyOf: - - type: integer - - type: string - description: Name or number of the pod port this endpoint refers - to. Mutually exclusive with port. - x-kubernetes-int-or-string: true - tlsConfig: - description: TLS configuration to use when scraping the endpoint - properties: - ca: - description: Stuct containing the CA cert to use for the targets. - properties: - configMap: - description: ConfigMap containing data to use for the - targets. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the ConfigMap or its - key must be defined - type: boolean - required: - - key - type: object - secret: - description: Secret containing data to use for the targets. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - type: object - caFile: - description: Path to the CA cert in the Prometheus container - to use for the targets. - type: string - cert: - description: Struct containing the client cert file for the - targets. - properties: - configMap: - description: ConfigMap containing data to use for the - targets. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the ConfigMap or its - key must be defined - type: boolean - required: - - key - type: object - secret: - description: Secret containing data to use for the targets. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - type: object - certFile: - description: Path to the client cert file in the Prometheus - container for the targets. - type: string - insecureSkipVerify: - description: Disable target certificate validation. - type: boolean - keyFile: - description: Path to the client key file in the Prometheus - container for the targets. - type: string - keySecret: - description: Secret containing the client key file for the - targets. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - serverName: - description: Used to verify the hostname for the targets. - type: string + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator is + "In", and the values array contains only "value". The requirements + are ANDed. type: object type: object - type: array - jobLabel: - description: The label to use to retrieve the job name from. - type: string - namespaceSelector: - description: Selector to select which namespaces the Endpoints objects - are discovered from. - properties: - any: - description: Boolean describing whether all namespaces are selected - in contrast to a list restricting them. - type: boolean - matchNames: - description: List of namespace names. - items: - type: string - type: array - type: object - podTargetLabels: - description: PodTargetLabels transfers labels on the Kubernetes Pod - onto the target. - items: - type: string - type: array - sampleLimit: - description: SampleLimit defines per-scrape limit on number of scraped - samples that will be accepted. - format: int64 - type: integer - selector: - description: Selector to select Endpoints objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains - values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies - to. - type: string - operator: - description: operator represents a key's relationship to a - set of values. Valid operators are In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator - is In or NotIn, the values array must be non-empty. If the - operator is Exists or DoesNotExist, the values array must - be empty. This array is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator is - "In", and the values array contains only "value". The requirements - are ANDed. - type: object - type: object - targetLabels: - description: TargetLabels transfers labels on the Kubernetes Service - onto the target. - items: - type: string - type: array - required: - - endpoints - - selector - type: object - required: - - spec - type: object - version: v1 - versions: - - name: v1 + targetLabels: + description: TargetLabels transfers labels on the Kubernetes Service + onto the target. + items: + type: string + type: array + required: + - endpoints + - selector + type: object + required: + - spec + type: object served: true storage: true + diff --git a/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-thanosrulers.yaml b/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-thanosrulers.yaml index e7b935a998..f43e18004a 100755 --- a/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-thanosrulers.yaml +++ b/kubernetes/helm_charts/monitoring/prometheus-operator/crds/crd-thanosrulers.yaml @@ -1,10 +1,9 @@ # https://raw.githubusercontent.com/coreos/prometheus-operator/release-0.38/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.2.4 - helm.sh/hook: crd-install creationTimestamp: null name: thanosrulers.monitoring.coreos.com spec: @@ -16,4711 +15,4711 @@ spec: singular: thanosruler preserveUnknownFields: false scope: Namespaced - validation: - openAPIV3Schema: - description: ThanosRuler defines a ThanosRuler deployment. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the ThanosRuler cluster. - More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - affinity: - description: If specified, the pod's scheduling constraints. - properties: - nodeAffinity: - description: Describes node affinity scheduling rules for the pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes - that satisfy the affinity expressions specified by this field, - but it may choose a node that violates one or more of the - expressions. The node that is most preferred is the one with - the greatest sum of weights, i.e. for each node that meets - all of the scheduling requirements (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by iterating through - the elements of this field and adding "weight" to the sum - if the node matches the corresponding matchExpressions; the - node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches all - objects with implicit weight 0 (i.e. it's a no-op). A null - preferred scheduling term matches no objects (i.e. is also - a no-op). + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ThanosRuler defines a ThanosRuler deployment. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'Specification of the desired behavior of the ThanosRuler cluster. + More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + affinity: + description: If specified, the pod's scheduling constraints. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes + that satisfy the affinity expressions specified by this field, + but it may choose a node that violates one or more of the + expressions. The node that is most preferred is the one with + the greatest sum of weights, i.e. for each node that meets + all of the scheduling requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating through + the elements of this field and adding "weight" to the sum + if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all + objects with implicit weight 0 (i.e. it's a no-op). A null + preferred scheduling term matches no objects (i.e. is also + a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. properties: - preference: - description: A node selector term, associated with the - corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: A node selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the - operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be - empty. If the operator is Gt or Lt, the values - array must have a single element, which will - be interpreted as an integer. This array is - replaced during a strategic merge patch. - items: + nodeSelectorTerms: + description: Required. A list of node selector terms. The + terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: The label key that the selector + applies to. type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: A node selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the - operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be - empty. If the operator is Gt or Lt, the values - array must have a single element, which will - be interpreted as an integer. This array is - replaced during a strategic merge patch. - items: + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding - nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array required: - - preference - - weight + - nodeSelectorTerms type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to an update), the system may or may not try to - eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. The - terms are ORed. - items: - description: A null or empty node selector term matches - no objects. The requirements of them are ANDed. The - TopologySelectorTerm type implements a subset of the - NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: A node selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes + that satisfy the affinity expressions specified by this field, + but it may choose a node that violates one or more of the + expressions. The node that is most preferred is the one with + the greatest sum of weights, i.e. for each node that meets + all of the scheduling requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating through + the elements of this field and adding "weight" to the sum + if the node has pods which matches the corresponding podAffinityTerm; + the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the - operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be - empty. If the operator is Gt or Lt, the values - array must have a single element, which will - be interpreted as an integer. This array is - replaced during a strategic merge patch. + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. items: - type: string + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: A node selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the - operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be - empty. If the operator is Gt or Lt, the values - array must have a single element, which will - be interpreted as an integer. This array is - replaced during a strategic merge patch. - items: + matchLabels: + additionalProperties: type: string - type: array - required: - - key - - operator + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: Describes pod affinity scheduling rules (e.g. co-locate - this pod in the same node, zone, etc. as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes - that satisfy the affinity expressions specified by this field, - but it may choose a node that violates one or more of the - expressions. The node that is most preferred is the one with - the greatest sum of weights, i.e. for each node that meets - all of the scheduling requirements (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by iterating through - the elements of this field and adding "weight" to the sum - if the node has pods which matches the corresponding podAffinityTerm; - the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: A label selector requirement is - a selector that contains values, a key, and - an operator that relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and DoesNotExist. + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey matches + that of any node on which any of the selected pods + is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may not + try to eventually evict the pod from its node. When there + are multiple elements, the lists of nodes corresponding to + each podAffinityTerm are intersected, i.e. all terms must + be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) that + this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of pods + is running + properties: + labelSelector: + description: A label query over a set of resources, in + this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If - the operator is Exists or DoesNotExist, - the values array must be empty. This array - is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". - The requirements are ANDed. + type: array + required: + - key + - operator type: object - type: object - namespaces: - description: namespaces specifies which namespaces - the labelSelector applies to (matches against); - null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey matches - that of any node on which any of the selected pods - is running. Empty topologyKey is not allowed. + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to a pod label update), the system may or may not - try to eventually evict the pod from its node. When there - are multiple elements, the lists of nodes corresponding to - each podAffinityTerm are intersected, i.e. all terms must - be satisfied. - items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) that - this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of pods - is running - properties: - labelSelector: - description: A label query over a set of resources, in - this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any + node on which any of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some other + pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes + that satisfy the anti-affinity expressions specified by this + field, but it may choose a node that violates one or more + of the expressions. The node that is most preferred is the + one with the greatest sum of weights, i.e. for each node that + meets all of the scheduling requirements (resource request, + requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field + and adding "weight" to the sum if the node has pods which + matches the corresponding podAffinityTerm; the node(s) with + the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values - array must be non-empty. If the operator is - Exists or DoesNotExist, the values array must - be empty. This array is replaced during a - strategic merge patch. + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. items: - type: string + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object type: array - required: - - key - - operator + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object type: object - type: array - matchLabels: - additionalProperties: + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey matches + that of any node on which any of the selected pods + is running. Empty topologyKey is not allowed. type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces the - labelSelector applies to (matches against); null or - empty list means "this pod's namespace" - items: + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will not + be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms must + be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) that + this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of pods + is running + properties: + labelSelector: + description: A label query over a set of resources, in + this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any + node on which any of the selected pods is running. Empty + topologyKey is not allowed. type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of any - node on which any of the selected pods is running. Empty - topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: Describes pod anti-affinity scheduling rules (e.g. - avoid putting this pod in the same node, zone, etc. as some other - pod(s)). + required: + - topologyKey + type: object + type: array + type: object + type: object + alertDropLabels: + description: AlertDropLabels configure the label names which should + be dropped in ThanosRuler alerts. If `labels` field is not provided, + `thanos_ruler_replica` will be dropped in alerts by default. + items: + type: string + type: array + alertQueryUrl: + description: The external Query URL the Thanos Ruler will set in the + 'Source' field of all alerts. Maps to the '--alert.query-url' CLI + arg. + type: string + alertmanagersConfig: + description: Define configuration for connecting to alertmanager. Only + available with thanos v0.10.0 and higher. Maps to the `alertmanagers.config` + arg. + properties: + key: + description: The key of the secret to select from. Must be a valid + secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + alertmanagersUrl: + description: 'Define URLs to send alerts to Alertmanager. For Thanos + v0.10.0 and higher, AlertManagersConfig should be used instead. Note: + this field will be ignored if AlertManagersConfig is specified. Maps + to the `alertmanagers.url` arg.' + items: + type: string + type: array + containers: + description: 'Containers allows injecting additional containers or modifying + operator generated containers. This can be used to allow adding an + authentication proxy to a ThanosRuler pod or to change the behavior + of an operator generated container. Containers described here modify + an operator generated container if they share the same name and modifications + are done via a strategic merge patch. The current container names + are: `thanos-ruler` and `rules-configmap-reloader`. Overriding containers + is entirely outside the scope of what the maintainers will support + and by doing so, you accept that this behaviour may break at any time + without notice.' + items: + description: A single application container that you want to run within + a pod. properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes - that satisfy the anti-affinity expressions specified by this - field, but it may choose a node that violates one or more - of the expressions. The node that is most preferred is the - one with the greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource request, - requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field - and adding "weight" to the sum if the node has pods which - matches the corresponding podAffinityTerm; the node(s) with - the highest sum are the most preferred. + args: + description: 'Arguments to the entrypoint. The docker image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will be + unchanged. The $(VAR_NAME) syntax can be escaped with a double + $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + regardless of whether the variable exists or not. Cannot be + updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The + docker image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. The $(VAR_NAME) syntax + can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references + will never be expanded, regardless of whether the variable exists + or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred node(s) + description: EnvVar represents an environment variable present + in a Container. properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a + variable cannot be resolved, the reference in the input + string will be unchanged. The $(VAR_NAME) syntax can be + escaped with a double $$, ie: $$(VAR_NAME). Escaped references + will never be expanded, regardless of whether the variable + exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. + configMapKeyRef: + description: Selects a key of a ConfigMap. properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: A label selector requirement is - a selector that contains values, a key, and - an operator that relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If - the operator is Exists or DoesNotExist, - the values array must be empty. This array - is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". - The requirements are ANDed. - type: object + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, metadata.labels, metadata.annotations, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + description: Specifies the output format of the + exposed resources, defaults to "1" + type: string + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key type: object - namespaces: - description: namespaces specifies which namespaces - the labelSelector applies to (matches against); - null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey matches - that of any node on which any of the selected pods - is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey type: object - weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. - format: int32 - type: integer required: - - podAffinityTerm - - weight + - name type: object type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by - this field are not met at scheduling time, the pod will not - be scheduled onto the node. If the anti-affinity requirements - specified by this field cease to be met at some point during - pod execution (e.g. due to a pod label update), the system - may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. all terms must - be satisfied. + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be a + C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key will + take precedence. Cannot be updated. items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) that - this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of pods - is running + description: EnvFromSource represents the source of a set of + ConfigMaps properties: - labelSelector: - description: A label query over a set of resources, in - this case pods. + configMapRef: + description: The ConfigMap to select from properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values - array must be non-empty. If the operator is - Exists or DoesNotExist, the values array must - be empty. This array is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. - type: object + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean type: object - namespaces: - description: namespaces specifies which namespaces the - labelSelector applies to (matches against); null or - empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of any - node on which any of the selected pods is running. Empty - topologyKey is not allowed. + prefix: + description: An optional identifier to prepend to each key + in the ConfigMap. Must be a C_IDENTIFIER. type: string - required: - - topologyKey + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object type: object type: array - type: object - type: object - alertDropLabels: - description: AlertDropLabels configure the label names which should - be dropped in ThanosRuler alerts. If `labels` field is not provided, - `thanos_ruler_replica` will be dropped in alerts by default. - items: - type: string - type: array - alertQueryUrl: - description: The external Query URL the Thanos Ruler will set in the - 'Source' field of all alerts. Maps to the '--alert.query-url' CLI - arg. - type: string - alertmanagersConfig: - description: Define configuration for connecting to alertmanager. Only - available with thanos v0.10.0 and higher. Maps to the `alertmanagers.config` - arg. - properties: - key: - description: The key of the secret to select from. Must be a valid - secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - alertmanagersUrl: - description: 'Define URLs to send alerts to Alertmanager. For Thanos - v0.10.0 and higher, AlertManagersConfig should be used instead. Note: - this field will be ignored if AlertManagersConfig is specified. Maps - to the `alertmanagers.url` arg.' - items: - type: string - type: array - containers: - description: 'Containers allows injecting additional containers or modifying - operator generated containers. This can be used to allow adding an - authentication proxy to a ThanosRuler pod or to change the behavior - of an operator generated container. Containers described here modify - an operator generated container if they share the same name and modifications - are done via a strategic merge patch. The current container names - are: `thanos-ruler` and `rules-configmap-reloader`. Overriding containers - is entirely outside the scope of what the maintainers will support - and by doing so, you accept that this behaviour may break at any time - without notice.' - items: - description: A single application container that you want to run within - a pod. - properties: - args: - description: 'Arguments to the entrypoint. The docker image''s - CMD is used if this is not provided. Variable references $(VAR_NAME) - are expanded using the container''s environment. If a variable - cannot be resolved, the reference in the input string will be - unchanged. The $(VAR_NAME) syntax can be escaped with a double - $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - regardless of whether the variable exists or not. Cannot be - updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' - items: + image: + description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' type: string - type: array - command: - description: 'Entrypoint array. Not executed within a shell. The - docker image''s ENTRYPOINT is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container''s - environment. If a variable cannot be resolved, the reference - in the input string will be unchanged. The $(VAR_NAME) syntax - can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references - will never be expanded, regardless of whether the variable exists - or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' - items: + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' type: string - type: array - env: - description: List of environment variables to set in the container. - Cannot be updated. - items: - description: EnvVar represents an environment variable present - in a Container. + lifecycle: + description: Actions that the management system should take in + response to container lifecycle events. Cannot be updated. properties: - name: - description: Name of the environment variable. Must be a - C_IDENTIFIER. - type: string - value: - description: 'Variable references $(VAR_NAME) are expanded - using the previous defined environment variables in the - container and any service environment variables. If a - variable cannot be resolved, the reference in the input - string will be unchanged. The $(VAR_NAME) syntax can be - escaped with a double $$, ie: $$(VAR_NAME). Escaped references - will never be expanded, regardless of whether the variable - exists or not. Defaults to "".' - type: string - valueFrom: - description: Source for the environment variable's value. - Cannot be used if value is not empty. + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. + exec: + description: One and only one of the following should + be specified. Exec specifies the action to take. properties: - key: - description: The key to select. + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. type: string - optional: - description: Specify whether the ConfigMap or its - key must be defined - type: boolean required: - - key + - port type: object - fieldRef: - description: 'Selects a field of the pod: supports metadata.name, - metadata.namespace, metadata.labels, metadata.annotations, - spec.nodeName, spec.serviceAccountName, status.hostIP, - status.podIP, status.podIPs.' + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: implement + a realistic TCP lifecycle hook' properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the - specified API version. + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true required: - - fieldPath + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The reason for termination is passed to + the handler. The Pod''s termination grace period countdown + begins before the PreStop hooked is executed. Regardless + of the outcome of the handler, the container will eventually + terminate within the Pod''s termination grace period. Other + management of the container blocks until the hook completes + or until the termination grace period is reached. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should + be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array type: object - resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, requests.memory - and requests.ephemeral-storage) are currently supported.' + httpGet: + description: HTTPGet specifies the http request to perform. properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. type: string - divisor: - description: Specifies the output format of the - exposed resources, defaults to "1" + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. type: string - resource: - description: 'Required: resource to select' + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - - resource + - port type: object - secretKeyRef: - description: Selects a key of a secret in the pod's - namespace + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: implement + a realistic TCP lifecycle hook' properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true required: - - key + - port type: object type: object - required: - - name type: object - type: array - envFrom: - description: List of sources to populate environment variables - in the container. The keys defined within a source must be a - C_IDENTIFIER. All invalid keys will be reported as an event - when the container is starting. When a key exists in multiple - sources, the value associated with the last source will take - precedence. Values defined by an Env with a duplicate key will - take precedence. Cannot be updated. - items: - description: EnvFromSource represents the source of a set of - ConfigMaps + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: - configMapRef: - description: The ConfigMap to select from + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. type: string - optional: - description: Specify whether the ConfigMap must be defined - type: boolean + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port type: object - prefix: - description: An optional identifier to prepend to each key - in the ConfigMap. Must be a C_IDENTIFIER. - type: string - secretRef: - description: The Secret to select from + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string - optional: - description: Specify whether the Secret must be defined - type: boolean + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer type: object - type: array - image: - description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher level config management - to default or override container images in workload controllers - like Deployments and StatefulSets.' - type: string - imagePullPolicy: - description: 'Image pull policy. One of Always, Never, IfNotPresent. - Defaults to Always if :latest tag is specified, or IfNotPresent - otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' - type: string - lifecycle: - description: Actions that the management system should take in - response to container lifecycle events. Cannot be updated. - properties: - postStart: - description: 'PostStart is called immediately after a container - is created. If the handler fails, the container is terminated - and restarted according to its restart policy. Other management - of the container blocks until the hook completes. More info: - https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + name: + description: Name of the container specified as a DNS_LABEL. Each + container in a pod must have a unique name (DNS_LABEL). Cannot + be updated. + type: string + ports: + description: List of ports to expose from the container. Exposing + a port here gives the system additional information about the + network connections a container uses, but is primarily informational. + Not specifying a port here DOES NOT prevent that port from being + exposed. Any port which is listening on the default "0.0.0.0" + address inside a container will be accessible from the network. + Cannot be updated. + items: + description: ContainerPort represents a network port in a single + container. properties: - exec: - description: One and only one of the following should - be specified. Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute - inside the container, the working directory for - the command is root ('/') in the container's filesystem. - The command is simply exec'd, it is not run inside - a shell, so traditional shell instructions ('|', - etc) won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is treated - as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to - the pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: 'TCPSocket specifies an action involving - a TCP port. TCP hooks not yet supported TODO: implement - a realistic TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - description: 'PreStop is called immediately before a container - is terminated due to an API request or management event - such as liveness/startup probe failure, preemption, resource - contention, etc. The handler is not called if the container - crashes or exits. The reason for termination is passed to - the handler. The Pod''s termination grace period countdown - begins before the PreStop hooked is executed. Regardless - of the outcome of the handler, the container will eventually - terminate within the Pod''s termination grace period. Other - management of the container blocks until the hook completes - or until the termination grace period is reached. More info: - https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' - properties: - exec: - description: One and only one of the following should - be specified. Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute - inside the container, the working directory for - the command is root ('/') in the container's filesystem. - The command is simply exec'd, it is not run inside - a shell, so traditional shell instructions ('|', - etc) won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is treated - as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to - the pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: 'TCPSocket specifies an action involving - a TCP port. TCP hooks not yet supported TODO: implement - a realistic TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - description: 'Periodic probe of container liveness. Container - will be restarted if the probe fails. Cannot be updated. More - info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. + containerPort: + description: Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. + hostPort: + description: Number of port to expose on the host. If specified, + this must be a valid port number, 0 < x < 65536. If HostNetwork + is specified, this must match ContainerPort. Most containers + do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod must + have a unique name. Name for the port that can be referred + to by services. type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' + protocol: + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true required: - - port + - containerPort type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - name: - description: Name of the container specified as a DNS_LABEL. Each - container in a pod must have a unique name (DNS_LABEL). Cannot - be updated. - type: string - ports: - description: List of ports to expose from the container. Exposing - a port here gives the system additional information about the - network connections a container uses, but is primarily informational. - Not specifying a port here DOES NOT prevent that port from being - exposed. Any port which is listening on the default "0.0.0.0" - address inside a container will be accessible from the network. - Cannot be updated. - items: - description: ContainerPort represents a network port in a single - container. + type: array + readinessProbe: + description: 'Periodic probe of container service readiness. Container + will be removed from service endpoints if the probe fails. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: - containerPort: - description: Number of port to expose on the pod's IP address. - This must be a valid port number, 0 < x < 65536. + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. format: int32 type: integer - hostIP: - description: What host IP to bind the external port to. - type: string - hostPort: - description: Number of port to expose on the host. If specified, - this must be a valid port number, 0 < x < 65536. If HostNetwork - is specified, this must match ContainerPort. Most containers - do not need this. + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' format: int32 type: integer - name: - description: If specified, this must be an IANA_SVC_NAME - and unique within the pod. Each named port in a pod must - have a unique name. Name for the port that can be referred - to by services. - type: string - protocol: - description: Protocol for port. Must be UDP, TCP, or SCTP. - Defaults to "TCP". + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + properties: + limits: + additionalProperties: + type: string + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + type: string + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + securityContext: + description: 'Security options the pod should run with. More info: + https://kubernetes.io/docs/concepts/policy/security-context/ + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a + process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the + container runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in + privileged containers are essentially equivalent to root + on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use + for the containers. The default is DefaultProcMount which + uses the container runtime defaults for readonly paths and + masked paths. This requires the ProcMountType feature flag + to be enabled. type: string - required: - - containerPort + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. + Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail + to start the container if it does. If unset or false, no + such validation will be performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata if + unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. This field is beta-level and may be + disabled with the WindowsRunAsUserName feature flag. + type: string + type: object type: object - type: array - readinessProbe: - description: 'Periodic probe of container service readiness. Container - will be removed from service endpoints if the probe fails. Cannot - be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod will + be restarted, just as if the livenessProbe failed. This can + be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. This + cannot be updated. This is an alpha feature enabled by the StartupProbe + feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for + stdin in the container runtime. If this is not set, reads from + stdin in the container will always result in EOF. Default is + false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin + channel after it has been opened by a single attach. When stdin + is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container + start, is empty until the first client attaches to stdin, and + then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container + is restarted. If this flag is false, a container processes that + reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s + termination message will be written is mounted into the container''s + filesystem. Message written is intended to be brief final status, + such as an assertion failure message. Will be truncated by the + node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. Defaults to /dev/termination-log. + Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. + File will use the contents of terminationMessagePath to populate + the container status message on both success and failure. FallbackToLogsOnError + will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever + is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. This is a beta feature. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. + name: + description: name must match the name of a persistentVolumeClaim + in the pod type: string required: - - port + - devicePath + - name type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within + a container. properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other way + around. When not set, MountPropagationNone is used. This + field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true required: - - port - type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - resources: - description: 'Compute Resources required by this container. Cannot - be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - properties: - limits: - additionalProperties: - type: string - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - type: string - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + - mountPath + - name type: object - type: object - securityContext: - description: 'Security options the pod should run with. More info: - https://kubernetes.io/docs/concepts/policy/security-context/ - More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might be + configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + enforcedNamespaceLabel: + description: EnforcedNamespaceLabel enforces adding a namespace label + of origin for each alert and metric that is user created. The label + value will always be the namespace of the object that is being created. + type: string + evaluationInterval: + description: Interval between consecutive evaluations. + type: string + externalPrefix: + description: The external URL the Thanos Ruler instances will be available + under. This is necessary to generate correct URLs. This is necessary + if Thanos Ruler is not served from root of a DNS name. + type: string + grpcServerTlsConfig: + description: 'GRPCServerTLSConfig configures the gRPC server from which + Thanos Querier reads recorded rule data. Note: Currently only the + CAFile, CertFile, and KeyFile fields are supported. Maps to the ''--grpc-server-tls-*'' + CLI args.' + properties: + ca: + description: Stuct containing the CA cert to use for the targets. properties: - allowPrivilegeEscalation: - description: 'AllowPrivilegeEscalation controls whether a - process can gain more privileges than its parent process. - This bool directly controls if the no_new_privs flag will - be set on the container process. AllowPrivilegeEscalation - is true always when the container is: 1) run as Privileged - 2) has CAP_SYS_ADMIN' - type: boolean - capabilities: - description: The capabilities to add/drop when running containers. - Defaults to the default set of capabilities granted by the - container runtime. - properties: - add: - description: Added capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - drop: - description: Removed capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - type: object - privileged: - description: Run container in privileged mode. Processes in - privileged containers are essentially equivalent to root - on the host. Defaults to false. - type: boolean - procMount: - description: procMount denotes the type of proc mount to use - for the containers. The default is DefaultProcMount which - uses the container runtime defaults for readonly paths and - masked paths. This requires the ProcMountType feature flag - to be enabled. - type: string - readOnlyRootFilesystem: - description: Whether this container has a read-only root filesystem. - Default is false. - type: boolean - runAsGroup: - description: The GID to run the entrypoint of the container - process. Uses runtime default if unset. May also be set - in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. - format: int64 - type: integer - runAsNonRoot: - description: Indicates that the container must run as a non-root - user. If true, the Kubelet will validate the image at runtime - to ensure that it does not run as UID 0 (root) and fail - to start the container if it does. If unset or false, no - such validation will be performed. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: The UID to run the entrypoint of the container - process. Defaults to user specified in image metadata if - unspecified. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. - format: int64 - type: integer - seLinuxOptions: - description: The SELinux context to be applied to the container. - If unspecified, the container runtime will allocate a random - SELinux context for each container. May also be set in - PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. + configMap: + description: ConfigMap containing data to use for the targets. properties: - level: - description: Level is SELinux level label that applies - to the container. - type: string - role: - description: Role is a SELinux role label that applies - to the container. + key: + description: The key to select. type: string - type: - description: Type is a SELinux type label that applies - to the container. - type: string - user: - description: User is a SELinux user label that applies - to the container. + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string + optional: + description: Specify whether the ConfigMap or its key must + be defined + type: boolean + required: + - key type: object - windowsOptions: - description: The Windows specific settings applied to all - containers. If unspecified, the options from the PodSecurityContext - will be used. If set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. + secret: + description: Secret containing data to use for the targets. properties: - gmsaCredentialSpec: - description: GMSACredentialSpec is where the GMSA admission - webhook (https://github.com/kubernetes-sigs/windows-gmsa) - inlines the contents of the GMSA credential spec named - by the GMSACredentialSpecName field. This field is alpha-level - and is only honored by servers that enable the WindowsGMSA - feature flag. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name of the - GMSA credential spec to use. This field is alpha-level - and is only honored by servers that enable the WindowsGMSA - feature flag. + key: + description: The key of the secret to select from. Must + be a valid secret key. type: string - runAsUserName: - description: The UserName in Windows to run the entrypoint - of the container process. Defaults to the user specified - in image metadata if unspecified. May also be set in - PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. This field is beta-level and may be - disabled with the WindowsRunAsUserName feature flag. + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key type: object type: object - startupProbe: - description: 'StartupProbe indicates that the Pod has successfully - initialized. If specified, no other probes are executed until - this completes successfully. If this probe fails, the Pod will - be restarted, just as if the livenessProbe failed. This can - be used to provide different probe parameters at the beginning - of a Pod''s lifecycle, when it might take a long time to load - data or warm a cache, than during steady-state operation. This - cannot be updated. This is an alpha feature enabled by the StartupProbe - feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + caFile: + description: Path to the CA cert in the Prometheus container to + use for the targets. + type: string + cert: + description: Struct containing the client cert file for the targets. properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. + configMap: + description: ConfigMap containing data to use for the targets. properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. + key: + description: The key to select. type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string + optional: + description: Specify whether the ConfigMap or its key must + be defined + type: boolean required: - - port + - key type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' + secret: + description: Secret containing data to use for the targets. properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' + key: + description: The key of the secret to select from. Must + be a valid secret key. type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - stdin: - description: Whether this container should allocate a buffer for - stdin in the container runtime. If this is not set, reads from - stdin in the container will always result in EOF. Default is - false. - type: boolean - stdinOnce: - description: Whether the container runtime should close the stdin - channel after it has been opened by a single attach. When stdin - is true the stdin stream will remain open across multiple attach - sessions. If stdinOnce is set to true, stdin is opened on container - start, is empty until the first client attaches to stdin, and - then remains open and accepts data until the client disconnects, - at which time stdin is closed and remains closed until the container - is restarted. If this flag is false, a container processes that - reads from stdin will never receive an EOF. Default is false - type: boolean - terminationMessagePath: - description: 'Optional: Path at which the file to which the container''s - termination message will be written is mounted into the container''s - filesystem. Message written is intended to be brief final status, - such as an assertion failure message. Will be truncated by the - node if greater than 4096 bytes. The total message length across - all containers will be limited to 12kb. Defaults to /dev/termination-log. - Cannot be updated.' - type: string - terminationMessagePolicy: - description: Indicate how the termination message should be populated. - File will use the contents of terminationMessagePath to populate - the container status message on both success and failure. FallbackToLogsOnError - will use the last chunk of container log output if the termination - message file is empty and the container exited with an error. - The log output is limited to 2048 bytes or 80 lines, whichever - is smaller. Defaults to File. Cannot be updated. + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + certFile: + description: Path to the client cert file in the Prometheus container + for the targets. type: string - tty: - description: Whether this container should allocate a TTY for - itself, also requires 'stdin' to be true. Default is false. + insecureSkipVerify: + description: Disable target certificate validation. type: boolean - volumeDevices: - description: volumeDevices is the list of block devices to be - used by the container. This is a beta feature. - items: - description: volumeDevice describes a mapping of a raw block - device within a container. - properties: - devicePath: - description: devicePath is the path inside of the container - that the device will be mapped to. - type: string - name: - description: name must match the name of a persistentVolumeClaim - in the pod - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - description: Pod volumes to mount into the container's filesystem. - Cannot be updated. - items: - description: VolumeMount describes a mounting of a Volume within - a container. - properties: - mountPath: - description: Path within the container at which the volume - should be mounted. Must not contain ':'. - type: string - mountPropagation: - description: mountPropagation determines how mounts are - propagated from the host to container and the other way - around. When not set, MountPropagationNone is used. This - field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: Mounted read-only if true, read-write otherwise - (false or unspecified). Defaults to false. - type: boolean - subPath: - description: Path within the volume from which the container's - volume should be mounted. Defaults to "" (volume's root). - type: string - subPathExpr: - description: Expanded path within the volume from which - the container's volume should be mounted. Behaves similarly - to SubPath but environment variable references $(VAR_NAME) - are expanded using the container's environment. Defaults - to "" (volume's root). SubPathExpr and SubPath are mutually - exclusive. - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - description: Container's working directory. If not specified, - the container runtime's default will be used, which might be - configured in the container image. Cannot be updated. + keyFile: + description: Path to the client key file in the Prometheus container + for the targets. + type: string + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. type: string - required: - - name type: object - type: array - enforcedNamespaceLabel: - description: EnforcedNamespaceLabel enforces adding a namespace label - of origin for each alert and metric that is user created. The label - value will always be the namespace of the object that is being created. - type: string - evaluationInterval: - description: Interval between consecutive evaluations. - type: string - externalPrefix: - description: The external URL the Thanos Ruler instances will be available - under. This is necessary to generate correct URLs. This is necessary - if Thanos Ruler is not served from root of a DNS name. - type: string - grpcServerTlsConfig: - description: 'GRPCServerTLSConfig configures the gRPC server from which - Thanos Querier reads recorded rule data. Note: Currently only the - CAFile, CertFile, and KeyFile fields are supported. Maps to the ''--grpc-server-tls-*'' - CLI args.' - properties: - ca: - description: Stuct containing the CA cert to use for the targets. - properties: - configMap: - description: ConfigMap containing data to use for the targets. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the ConfigMap or its key must - be defined - type: boolean - required: - - key - type: object - secret: - description: Secret containing data to use for the targets. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - type: object - caFile: - description: Path to the CA cert in the Prometheus container to - use for the targets. - type: string - cert: - description: Struct containing the client cert file for the targets. - properties: - configMap: - description: ConfigMap containing data to use for the targets. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the ConfigMap or its key must - be defined - type: boolean - required: - - key - type: object - secret: - description: Secret containing data to use for the targets. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - type: object - certFile: - description: Path to the client cert file in the Prometheus container - for the targets. - type: string - insecureSkipVerify: - description: Disable target certificate validation. - type: boolean - keyFile: - description: Path to the client key file in the Prometheus container - for the targets. - type: string - keySecret: - description: Secret containing the client key file for the targets. + image: + description: Thanos container image URL. + type: string + imagePullSecrets: + description: An optional list of references to secrets in the same namespace + to use for pulling thanos images from registries see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod + items: + description: LocalObjectReference contains enough information to let + you locate the referenced object inside the same namespace. properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key type: object - serverName: - description: Used to verify the hostname for the targets. - type: string - type: object - image: - description: Thanos container image URL. - type: string - imagePullSecrets: - description: An optional list of references to secrets in the same namespace - to use for pulling thanos images from registries see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod - items: - description: LocalObjectReference contains enough information to let - you locate the referenced object inside the same namespace. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: array - initContainers: - description: 'InitContainers allows adding initContainers to the pod - definition. Those can be used to e.g. fetch secrets for injection - into the ThanosRuler configuration from external sources. Any errors - during the execution of an initContainer will lead to a restart of - the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - Using initContainers for any use case other then secret fetching is - entirely outside the scope of what the maintainers will support and - by doing so, you accept that this behaviour may break at any time - without notice.' - items: - description: A single application container that you want to run within - a pod. - properties: - args: - description: 'Arguments to the entrypoint. The docker image''s - CMD is used if this is not provided. Variable references $(VAR_NAME) - are expanded using the container''s environment. If a variable - cannot be resolved, the reference in the input string will be - unchanged. The $(VAR_NAME) syntax can be escaped with a double - $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - regardless of whether the variable exists or not. Cannot be - updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' - items: + type: array + initContainers: + description: 'InitContainers allows adding initContainers to the pod + definition. Those can be used to e.g. fetch secrets for injection + into the ThanosRuler configuration from external sources. Any errors + during the execution of an initContainer will lead to a restart of + the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + Using initContainers for any use case other then secret fetching is + entirely outside the scope of what the maintainers will support and + by doing so, you accept that this behaviour may break at any time + without notice.' + items: + description: A single application container that you want to run within + a pod. + properties: + args: + description: 'Arguments to the entrypoint. The docker image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will be + unchanged. The $(VAR_NAME) syntax can be escaped with a double + $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + regardless of whether the variable exists or not. Cannot be + updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The + docker image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. The $(VAR_NAME) syntax + can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references + will never be expanded, regardless of whether the variable exists + or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a + variable cannot be resolved, the reference in the input + string will be unchanged. The $(VAR_NAME) syntax can be + escaped with a double $$, ie: $$(VAR_NAME). Escaped references + will never be expanded, regardless of whether the variable + exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, metadata.labels, metadata.annotations, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + description: Specifies the output format of the + exposed resources, defaults to "1" + type: string + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be a + C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key will + take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of + ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key + in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' type: string - type: array - command: - description: 'Entrypoint array. Not executed within a shell. The - docker image''s ENTRYPOINT is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container''s - environment. If a variable cannot be resolved, the reference - in the input string will be unchanged. The $(VAR_NAME) syntax - can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references - will never be expanded, regardless of whether the variable exists - or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' - items: + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' type: string - type: array - env: - description: List of environment variables to set in the container. - Cannot be updated. - items: - description: EnvVar represents an environment variable present - in a Container. + lifecycle: + description: Actions that the management system should take in + response to container lifecycle events. Cannot be updated. properties: - name: - description: Name of the environment variable. Must be a - C_IDENTIFIER. - type: string - value: - description: 'Variable references $(VAR_NAME) are expanded - using the previous defined environment variables in the - container and any service environment variables. If a - variable cannot be resolved, the reference in the input - string will be unchanged. The $(VAR_NAME) syntax can be - escaped with a double $$, ie: $$(VAR_NAME). Escaped references - will never be expanded, regardless of whether the variable - exists or not. Defaults to "".' - type: string - valueFrom: - description: Source for the environment variable's value. - Cannot be used if value is not empty. + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. + exec: + description: One and only one of the following should + be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. properties: - key: - description: The key to select. + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. type: string - optional: - description: Specify whether the ConfigMap or its - key must be defined - type: boolean required: - - key + - port type: object - fieldRef: - description: 'Selects a field of the pod: supports metadata.name, - metadata.namespace, metadata.labels, metadata.annotations, - spec.nodeName, spec.serviceAccountName, status.hostIP, - status.podIP, status.podIPs.' + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: implement + a realistic TCP lifecycle hook' properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the - specified API version. + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true required: - - fieldPath + - port type: object - resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, requests.memory - and requests.ephemeral-storage) are currently supported.' + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The reason for termination is passed to + the handler. The Pod''s termination grace period countdown + begins before the PreStop hooked is executed. Regardless + of the outcome of the handler, the container will eventually + terminate within the Pod''s termination grace period. Other + management of the container blocks until the hook completes + or until the termination grace period is reached. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should + be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. type: string - divisor: - description: Specifies the output format of the - exposed resources, defaults to "1" + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. type: string - resource: - description: 'Required: resource to select' + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - - resource + - port type: object - secretKeyRef: - description: Selects a key of a secret in the pod's - namespace + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: implement + a realistic TCP lifecycle hook' properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true required: - - key + - port type: object type: object - required: - - name type: object - type: array - envFrom: - description: List of sources to populate environment variables - in the container. The keys defined within a source must be a - C_IDENTIFIER. All invalid keys will be reported as an event - when the container is starting. When a key exists in multiple - sources, the value associated with the last source will take - precedence. Values defined by an Env with a duplicate key will - take precedence. Cannot be updated. - items: - description: EnvFromSource represents the source of a set of - ConfigMaps + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: - configMapRef: - description: The ConfigMap to select from + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. type: string - optional: - description: Specify whether the ConfigMap must be defined - type: boolean + required: + - port type: object - prefix: - description: An optional identifier to prepend to each key - in the ConfigMap. Must be a C_IDENTIFIER. - type: string - secretRef: - description: The Secret to select from + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string - optional: - description: Specify whether the Secret must be defined - type: boolean + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer type: object - type: array - image: - description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher level config management - to default or override container images in workload controllers - like Deployments and StatefulSets.' - type: string - imagePullPolicy: - description: 'Image pull policy. One of Always, Never, IfNotPresent. - Defaults to Always if :latest tag is specified, or IfNotPresent - otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' - type: string - lifecycle: - description: Actions that the management system should take in - response to container lifecycle events. Cannot be updated. - properties: - postStart: - description: 'PostStart is called immediately after a container - is created. If the handler fails, the container is terminated - and restarted according to its restart policy. Other management - of the container blocks until the hook completes. More info: - https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' - properties: - exec: - description: One and only one of the following should - be specified. Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute - inside the container, the working directory for - the command is root ('/') in the container's filesystem. - The command is simply exec'd, it is not run inside - a shell, so traditional shell instructions ('|', - etc) won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is treated - as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to - the pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: 'TCPSocket specifies an action involving - a TCP port. TCP hooks not yet supported TODO: implement - a realistic TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - description: 'PreStop is called immediately before a container - is terminated due to an API request or management event - such as liveness/startup probe failure, preemption, resource - contention, etc. The handler is not called if the container - crashes or exits. The reason for termination is passed to - the handler. The Pod''s termination grace period countdown - begins before the PreStop hooked is executed. Regardless - of the outcome of the handler, the container will eventually - terminate within the Pod''s termination grace period. Other - management of the container blocks until the hook completes - or until the termination grace period is reached. More info: - https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' - properties: - exec: - description: One and only one of the following should - be specified. Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute - inside the container, the working directory for - the command is root ('/') in the container's filesystem. - The command is simply exec'd, it is not run inside - a shell, so traditional shell instructions ('|', - etc) won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is treated - as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to - the pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom header - to be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: 'TCPSocket specifies an action involving - a TCP port. TCP hooks not yet supported TODO: implement - a realistic TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access - on the container. Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - description: 'Periodic probe of container liveness. Container - will be restarted if the probe fails. Cannot be updated. More - info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. + name: + description: Name of the container specified as a DNS_LABEL. Each + container in a pod must have a unique name (DNS_LABEL). Cannot + be updated. + type: string + ports: + description: List of ports to expose from the container. Exposing + a port here gives the system additional information about the + network connections a container uses, but is primarily informational. + Not specifying a port here DOES NOT prevent that port from being + exposed. Any port which is listening on the default "0.0.0.0" + address inside a container will be accessible from the network. + Cannot be updated. + items: + description: ContainerPort represents a network port in a single + container. properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. + containerPort: + description: Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. + hostPort: + description: Number of port to expose on the host. If specified, + this must be a valid port number, 0 < x < 65536. If HostNetwork + is specified, this must match ContainerPort. Most containers + do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod must + have a unique name. Name for the port that can be referred + to by services. type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' + protocol: + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true required: - - port + - containerPort type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - name: - description: Name of the container specified as a DNS_LABEL. Each - container in a pod must have a unique name (DNS_LABEL). Cannot - be updated. - type: string - ports: - description: List of ports to expose from the container. Exposing - a port here gives the system additional information about the - network connections a container uses, but is primarily informational. - Not specifying a port here DOES NOT prevent that port from being - exposed. Any port which is listening on the default "0.0.0.0" - address inside a container will be accessible from the network. - Cannot be updated. - items: - description: ContainerPort represents a network port in a single - container. + type: array + readinessProbe: + description: 'Periodic probe of container service readiness. Container + will be removed from service endpoints if the probe fails. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: - containerPort: - description: Number of port to expose on the pod's IP address. - This must be a valid port number, 0 < x < 65536. + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. format: int32 type: integer - hostIP: - description: What host IP to bind the external port to. - type: string - hostPort: - description: Number of port to expose on the host. If specified, - this must be a valid port number, 0 < x < 65536. If HostNetwork - is specified, this must match ContainerPort. Most containers - do not need this. + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' format: int32 type: integer - name: - description: If specified, this must be an IANA_SVC_NAME - and unique within the pod. Each named port in a pod must - have a unique name. Name for the port that can be referred - to by services. - type: string - protocol: - description: Protocol for port. Must be UDP, TCP, or SCTP. - Defaults to "TCP". - type: string - required: - - containerPort - type: object - type: array - readinessProbe: - description: 'Periodic probe of container service readiness. Container - will be removed from service endpoints if the probe fails. Cannot - be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. - type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + properties: + limits: + additionalProperties: type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - resources: - description: 'Compute Resources required by this container. Cannot - be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - properties: - limits: - additionalProperties: - type: string - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + securityContext: + description: 'Security options the pod should run with. More info: + https://kubernetes.io/docs/concepts/policy/security-context/ + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a + process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the + container runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in + privileged containers are essentially equivalent to root + on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use + for the containers. The default is DefaultProcMount which + uses the container runtime defaults for readonly paths and + masked paths. This requires the ProcMountType feature flag + to be enabled. type: string - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - securityContext: - description: 'Security options the pod should run with. More info: - https://kubernetes.io/docs/concepts/policy/security-context/ - More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' - properties: - allowPrivilegeEscalation: - description: 'AllowPrivilegeEscalation controls whether a - process can gain more privileges than its parent process. - This bool directly controls if the no_new_privs flag will - be set on the container process. AllowPrivilegeEscalation - is true always when the container is: 1) run as Privileged - 2) has CAP_SYS_ADMIN' - type: boolean - capabilities: - description: The capabilities to add/drop when running containers. - Defaults to the default set of capabilities granted by the - container runtime. - properties: - add: - description: Added capabilities - items: - description: Capability represent POSIX capabilities - type + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. + Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail + to start the container if it does. If unset or false, no + such validation will be performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata if + unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + properties: + level: + description: Level is SELinux level label that applies + to the container. type: string - type: array - drop: - description: Removed capabilities - items: - description: Capability represent POSIX capabilities - type + role: + description: Role is a SELinux role label that applies + to the container. type: string - type: array - type: object - privileged: - description: Run container in privileged mode. Processes in - privileged containers are essentially equivalent to root - on the host. Defaults to false. - type: boolean - procMount: - description: procMount denotes the type of proc mount to use - for the containers. The default is DefaultProcMount which - uses the container runtime defaults for readonly paths and - masked paths. This requires the ProcMountType feature flag - to be enabled. - type: string - readOnlyRootFilesystem: - description: Whether this container has a read-only root filesystem. - Default is false. - type: boolean - runAsGroup: - description: The GID to run the entrypoint of the container - process. Uses runtime default if unset. May also be set - in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. - format: int64 - type: integer - runAsNonRoot: - description: Indicates that the container must run as a non-root - user. If true, the Kubelet will validate the image at runtime - to ensure that it does not run as UID 0 (root) and fail - to start the container if it does. If unset or false, no - such validation will be performed. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: The UID to run the entrypoint of the container - process. Defaults to user specified in image metadata if - unspecified. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. - format: int64 - type: integer - seLinuxOptions: - description: The SELinux context to be applied to the container. - If unspecified, the container runtime will allocate a random - SELinux context for each container. May also be set in - PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. This field is beta-level and may be + disabled with the WindowsRunAsUserName feature flag. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod will + be restarted, just as if the livenessProbe failed. This can + be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. This + cannot be updated. This is an alpha feature enabled by the StartupProbe + feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for + stdin in the container runtime. If this is not set, reads from + stdin in the container will always result in EOF. Default is + false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin + channel after it has been opened by a single attach. When stdin + is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container + start, is empty until the first client attaches to stdin, and + then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container + is restarted. If this flag is false, a container processes that + reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s + termination message will be written is mounted into the container''s + filesystem. Message written is intended to be brief final status, + such as an assertion failure message. Will be truncated by the + node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. Defaults to /dev/termination-log. + Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. + File will use the contents of terminationMessagePath to populate + the container status message on both success and failure. FallbackToLogsOnError + will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever + is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. This is a beta feature. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. properties: - level: - description: Level is SELinux level label that applies - to the container. - type: string - role: - description: Role is a SELinux role label that applies - to the container. - type: string - type: - description: Type is a SELinux type label that applies - to the container. + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. type: string - user: - description: User is a SELinux user label that applies - to the container. + name: + description: name must match the name of a persistentVolumeClaim + in the pod type: string + required: + - devicePath + - name type: object - windowsOptions: - description: The Windows specific settings applied to all - containers. If unspecified, the options from the PodSecurityContext - will be used. If set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within + a container. properties: - gmsaCredentialSpec: - description: GMSACredentialSpec is where the GMSA admission - webhook (https://github.com/kubernetes-sigs/windows-gmsa) - inlines the contents of the GMSA credential spec named - by the GMSACredentialSpecName field. This field is alpha-level - and is only honored by servers that enable the WindowsGMSA - feature flag. + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name of the - GMSA credential spec to use. This field is alpha-level - and is only honored by servers that enable the WindowsGMSA - feature flag. + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other way + around. When not set, MountPropagationNone is used. This + field is beta in 1.10. type: string - runAsUserName: - description: The UserName in Windows to run the entrypoint - of the container process. Defaults to the user specified - in image metadata if unspecified. May also be set in - PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext - takes precedence. This field is beta-level and may be - disabled with the WindowsRunAsUserName feature flag. - type: string - type: object - type: object - startupProbe: - description: 'StartupProbe indicates that the Pod has successfully - initialized. If specified, no other probes are executed until - this completes successfully. If this probe fails, the Pod will - be restarted, just as if the livenessProbe failed. This can - be used to provide different probe parameters at the beginning - of a Pod''s lifecycle, when it might take a long time to load - data or warm a cache, than during steady-state operation. This - cannot be updated. This is an alpha feature enabled by the StartupProbe - feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the command line to execute inside - the container, the working directory for the command is - root ('/') in the container's filesystem. The command - is simply exec'd, it is not run inside a shell, so traditional - shell instructions ('|', etc) won't work. To use a shell, - you need to explicitly call out to that shell. Exit - status of 0 is treated as live/healthy and non-zero - is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures for the probe to - be considered failed after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http request to perform. - properties: - host: - description: Host name to connect to, defaults to the - pod IP. You probably want to set "Host" in httpHeaders - instead. + name: + description: This must match the Name of a Volume. type: string - httpHeaders: - description: Custom headers to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes a custom header to - be used in HTTP probes - properties: - name: - description: The header field name - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the HTTP server. + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting to the host. - Defaults to HTTP. + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. type: string required: - - port + - mountPath + - name type: object - initialDelaySeconds: - description: 'Number of seconds after the container has started - before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes for the probe to - be considered successful after having failed. Defaults to - 1. Must be 1 for liveness and startup. Minimum value is - 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an action involving a TCP - port. TCP hooks not yet supported TODO: implement a realistic - TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the port to access on the - container. Number must be in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - description: 'Number of seconds after which the probe times - out. Defaults to 1 second. Minimum value is 1. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - stdin: - description: Whether this container should allocate a buffer for - stdin in the container runtime. If this is not set, reads from - stdin in the container will always result in EOF. Default is - false. - type: boolean - stdinOnce: - description: Whether the container runtime should close the stdin - channel after it has been opened by a single attach. When stdin - is true the stdin stream will remain open across multiple attach - sessions. If stdinOnce is set to true, stdin is opened on container - start, is empty until the first client attaches to stdin, and - then remains open and accepts data until the client disconnects, - at which time stdin is closed and remains closed until the container - is restarted. If this flag is false, a container processes that - reads from stdin will never receive an EOF. Default is false - type: boolean - terminationMessagePath: - description: 'Optional: Path at which the file to which the container''s - termination message will be written is mounted into the container''s - filesystem. Message written is intended to be brief final status, - such as an assertion failure message. Will be truncated by the - node if greater than 4096 bytes. The total message length across - all containers will be limited to 12kb. Defaults to /dev/termination-log. - Cannot be updated.' + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might be + configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + labels: + additionalProperties: + type: string + description: Labels configure the external label pairs to ThanosRuler. + If not provided, default replica label `thanos_ruler_replica` will + be added as a label and be dropped in alerts. + type: object + listenLocal: + description: ListenLocal makes the Thanos ruler listen on loopback, + so that it does not bind against the Pod IP. + type: boolean + logFormat: + description: Log format for ThanosRuler to be configured with. + type: string + logLevel: + description: Log level for ThanosRuler to be configured with. + type: string + nodeSelector: + additionalProperties: + type: string + description: Define which Nodes the Pods are scheduled on. + type: object + objectStorageConfig: + description: ObjectStorageConfig configures object storage in Thanos. + properties: + key: + description: The key of the secret to select from. Must be a valid + secret key. type: string - terminationMessagePolicy: - description: Indicate how the termination message should be populated. - File will use the contents of terminationMessagePath to populate - the container status message on both success and failure. FallbackToLogsOnError - will use the last chunk of container log output if the termination - message file is empty and the container exited with an error. - The log output is limited to 2048 bytes or 80 lines, whichever - is smaller. Defaults to File. Cannot be updated. + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string - tty: - description: Whether this container should allocate a TTY for - itself, also requires 'stdin' to be true. Default is false. + optional: + description: Specify whether the Secret or its key must be defined type: boolean - volumeDevices: - description: volumeDevices is the list of block devices to be - used by the container. This is a beta feature. - items: - description: volumeDevice describes a mapping of a raw block - device within a container. - properties: - devicePath: - description: devicePath is the path inside of the container - that the device will be mapped to. - type: string - name: - description: name must match the name of a persistentVolumeClaim - in the pod - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - description: Pod volumes to mount into the container's filesystem. - Cannot be updated. - items: - description: VolumeMount describes a mounting of a Volume within - a container. - properties: - mountPath: - description: Path within the container at which the volume - should be mounted. Must not contain ':'. - type: string - mountPropagation: - description: mountPropagation determines how mounts are - propagated from the host to container and the other way - around. When not set, MountPropagationNone is used. This - field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: Mounted read-only if true, read-write otherwise - (false or unspecified). Defaults to false. - type: boolean - subPath: - description: Path within the volume from which the container's - volume should be mounted. Defaults to "" (volume's root). - type: string - subPathExpr: - description: Expanded path within the volume from which - the container's volume should be mounted. Behaves similarly - to SubPath but environment variable references $(VAR_NAME) - are expanded using the container's environment. Defaults - to "" (volume's root). SubPathExpr and SubPath are mutually - exclusive. - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - description: Container's working directory. If not specified, - the container runtime's default will be used, which might be - configured in the container image. Cannot be updated. - type: string required: - - name + - key + type: object + paused: + description: When a ThanosRuler deployment is paused, no actions except + for deletion will be performed on the underlying objects. + type: boolean + podMetadata: + description: PodMetadata contains Labels and Annotations gets propagated + to the thanos ruler pods. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored + with a resource that may be set by external tools to store and + retrieve arbitrary metadata. They are not queryable and should + be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to + organize and categorize (scope and select) objects. May match + selectors of replication controllers and services. More info: + http://kubernetes.io/docs/user-guide/labels' + type: object type: object - type: array - labels: - additionalProperties: + portName: + description: Port name used for the pods and governing service. This + defaults to web type: string - description: Labels configure the external label pairs to ThanosRuler. - If not provided, default replica label `thanos_ruler_replica` will - be added as a label and be dropped in alerts. - type: object - listenLocal: - description: ListenLocal makes the Thanos ruler listen on loopback, - so that it does not bind against the Pod IP. - type: boolean - logFormat: - description: Log format for ThanosRuler to be configured with. - type: string - logLevel: - description: Log level for ThanosRuler to be configured with. - type: string - nodeSelector: - additionalProperties: + priorityClassName: + description: Priority class assigned to the Pods type: string - description: Define which Nodes the Pods are scheduled on. - type: object - objectStorageConfig: - description: ObjectStorageConfig configures object storage in Thanos. - properties: - key: - description: The key of the secret to select from. Must be a valid - secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - paused: - description: When a ThanosRuler deployment is paused, no actions except - for deletion will be performed on the underlying objects. - type: boolean - podMetadata: - description: PodMetadata contains Labels and Annotations gets propagated - to the thanos ruler pods. - properties: - annotations: - additionalProperties: + queryConfig: + description: Define configuration for connecting to thanos query instances. + If this is defined, the QueryEndpoints field will be ignored. Maps + to the `query.config` CLI argument. Only available with thanos v0.11.0 + and higher. + properties: + key: + description: The key of the secret to select from. Must be a valid + secret key. type: string - description: 'Annotations is an unstructured key value map stored - with a resource that may be set by external tools to store and - retrieve arbitrary metadata. They are not queryable and should - be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' - type: object - labels: - additionalProperties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string - description: 'Map of string keys and values that can be used to - organize and categorize (scope and select) objects. May match - selectors of replication controllers and services. More info: - http://kubernetes.io/docs/user-guide/labels' - type: object - type: object - portName: - description: Port name used for the pods and governing service. This - defaults to web - type: string - priorityClassName: - description: Priority class assigned to the Pods - type: string - queryConfig: - description: Define configuration for connecting to thanos query instances. - If this is defined, the QueryEndpoints field will be ignored. Maps - to the `query.config` CLI argument. Only available with thanos v0.11.0 - and higher. - properties: - key: - description: The key of the secret to select from. Must be a valid - secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + queryEndpoints: + description: QueryEndpoints defines Thanos querier endpoints from which + to query metrics. Maps to the --query flag of thanos ruler. + items: type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - queryEndpoints: - description: QueryEndpoints defines Thanos querier endpoints from which - to query metrics. Maps to the --query flag of thanos ruler. - items: - type: string - type: array - replicas: - description: Number of thanos ruler instances to deploy. - format: int32 - type: integer - resources: - description: Resources defines the resource requirements for single - Pods. If not provided, no requests/limits will be set - properties: - limits: - additionalProperties: - type: string - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - type: string - description: 'Requests describes the minimum amount of compute resources - required. If Requests is omitted for a container, it defaults - to Limits if that is explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - retention: - description: Time duration ThanosRuler shall retain data for. Default - is '24h', and must match the regular expression `[0-9]+(ms|s|m|h|d|w|y)` - (milliseconds seconds minutes hours days weeks years). - type: string - routePrefix: - description: The route prefix ThanosRuler registers HTTP handlers for. - This allows thanos UI to be served on a sub-path. - type: string - ruleNamespaceSelector: - description: Namespaces to be selected for Rules discovery. If unspecified, - only the same namespace as the ThanosRuler object is in is used. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains - values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies - to. - type: string - operator: - description: operator represents a key's relationship to a - set of values. Valid operators are In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator - is In or NotIn, the values array must be non-empty. If the - operator is Exists or DoesNotExist, the values array must - be empty. This array is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator is - "In", and the values array contains only "value". The requirements - are ANDed. - type: object - type: object - ruleSelector: - description: A label selector to select which PrometheusRules to mount - for alerting and recording. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains - values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies - to. - type: string - operator: - description: operator represents a key's relationship to a - set of values. Valid operators are In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator - is In or NotIn, the values array must be non-empty. If the - operator is Exists or DoesNotExist, the values array must - be empty. This array is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator is - "In", and the values array contains only "value". The requirements - are ANDed. - type: object - type: object - securityContext: - description: SecurityContext holds pod-level security attributes and - common container settings. This defaults to the default PodSecurityContext. - properties: - fsGroup: - description: "A special supplemental group that applies to all containers - in a pod. Some volume types allow the Kubelet to change the ownership - of that volume to be owned by the pod: \n 1. The owning GID will - be the FSGroup 2. The setgid bit is set (new files created in - the volume will be owned by FSGroup) 3. The permission bits are - OR'd with rw-rw---- \n If unset, the Kubelet will not modify the - ownership and permissions of any volume." - format: int64 - type: integer - runAsGroup: - description: The GID to run the entrypoint of the container process. - Uses runtime default if unset. May also be set in SecurityContext. If - set in both SecurityContext and PodSecurityContext, the value - specified in SecurityContext takes precedence for that container. - format: int64 - type: integer - runAsNonRoot: - description: Indicates that the container must run as a non-root - user. If true, the Kubelet will validate the image at runtime - to ensure that it does not run as UID 0 (root) and fail to start - the container if it does. If unset or false, no such validation - will be performed. May also be set in SecurityContext. If set - in both SecurityContext and PodSecurityContext, the value specified - in SecurityContext takes precedence. - type: boolean - runAsUser: - description: The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. May - also be set in SecurityContext. If set in both SecurityContext - and PodSecurityContext, the value specified in SecurityContext - takes precedence for that container. - format: int64 - type: integer - seLinuxOptions: - description: The SELinux context to be applied to all containers. - If unspecified, the container runtime will allocate a random SELinux - context for each container. May also be set in SecurityContext. If - set in both SecurityContext and PodSecurityContext, the value - specified in SecurityContext takes precedence for that container. - properties: - level: - description: Level is SELinux level label that applies to the - container. - type: string - role: - description: Role is a SELinux role label that applies to the - container. - type: string - type: - description: Type is a SELinux type label that applies to the - container. - type: string - user: - description: User is a SELinux user label that applies to the - container. + type: array + replicas: + description: Number of thanos ruler instances to deploy. + format: int32 + type: integer + resources: + description: Resources defines the resource requirements for single + Pods. If not provided, no requests/limits will be set + properties: + limits: + additionalProperties: type: string - type: object - supplementalGroups: - description: A list of groups applied to the first process run in - each container, in addition to the container's primary GID. If - unspecified, no groups will be added to any container. - items: - format: int64 - type: integer - type: array - sysctls: - description: Sysctls hold a list of namespaced sysctls used for - the pod. Pods with unsupported sysctls (by the container runtime) - might fail to launch. - items: - description: Sysctl defines a kernel parameter to be set - properties: - name: - description: Name of a property to set - type: string - value: - description: Value of a property to set - type: string - required: - - name - - value + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' type: object - type: array - windowsOptions: - description: The Windows specific settings applied to all containers. - If unspecified, the options within a container's SecurityContext - will be used. If set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. - properties: - gmsaCredentialSpec: - description: GMSACredentialSpec is where the GMSA admission - webhook (https://github.com/kubernetes-sigs/windows-gmsa) - inlines the contents of the GMSA credential spec named by - the GMSACredentialSpecName field. This field is alpha-level - and is only honored by servers that enable the WindowsGMSA - feature flag. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name of the GMSA - credential spec to use. This field is alpha-level and is only - honored by servers that enable the WindowsGMSA feature flag. - type: string - runAsUserName: - description: The UserName in Windows to run the entrypoint of - the container process. Defaults to the user specified in image - metadata if unspecified. May also be set in PodSecurityContext. - If set in both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. This - field is beta-level and may be disabled with the WindowsRunAsUserName - feature flag. - type: string - type: object - type: object - serviceAccountName: - description: ServiceAccountName is the name of the ServiceAccount to - use to run the Thanos Ruler Pods. - type: string - storage: - description: Storage spec to specify how storage shall be used. - properties: - emptyDir: - description: 'EmptyDirVolumeSource to be used by the Prometheus - StatefulSets. If specified, used in place of any volumeClaimTemplate. - More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir' - properties: - medium: - description: 'What type of storage medium should back this directory. - The default is "" which means to use the node''s default medium. - Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' - type: string - sizeLimit: - description: 'Total amount of local storage required for this - EmptyDir volume. The size limit is also applicable for memory - medium. The maximum usage on memory medium EmptyDir would - be the minimum value between the SizeLimit specified here - and the sum of memory limits of all containers in a pod. The - default is nil which means that the limit is undefined. More - info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + requests: + additionalProperties: type: string - type: object - volumeClaimTemplate: - description: A PVC spec to be used by the Prometheus StatefulSets. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource - this object represents. Servers may infer this from the endpoint - the client submits requests to. Cannot be updated. In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' - type: object - spec: - description: 'Spec defines the desired characteristics of a - volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + description: 'Requests describes the minimum amount of compute resources + required. If Requests is omitted for a container, it defaults + to Limits if that is explicitly specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + retention: + description: Time duration ThanosRuler shall retain data for. Default + is '24h', and must match the regular expression `[0-9]+(ms|s|m|h|d|w|y)` + (milliseconds seconds minutes hours days weeks years). + type: string + routePrefix: + description: The route prefix ThanosRuler registers HTTP handlers for. + This allows thanos UI to be served on a sub-path. + type: string + ruleNamespaceSelector: + description: Namespaces to be selected for Rules discovery. If unspecified, + only the same namespace as the ThanosRuler object is in is used. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains + values, a key, and an operator that relates the key and values. properties: - accessModes: - description: 'AccessModes contains the desired access modes - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to a + set of values. Valid operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator + is In or NotIn, the values array must be non-empty. If the + operator is Exists or DoesNotExist, the values array must + be empty. This array is replaced during a strategic merge + patch. items: type: string type: array - dataSource: - description: This field requires the VolumeSnapshotDataSource - alpha feature gate to be enabled and currently VolumeSnapshot - is the only supported data source. If the provisioner - can support VolumeSnapshot data source, it will create - a new volume and data will be restored to the volume at - the same time. If the provisioner does not support VolumeSnapshot - data source, volume will not be created and the failure - will be reported as an event. In the future, we plan to - support more data source types and the behavior of the - provisioner may change. - properties: - apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, the - specified Kind must be in the core API group. For - any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - type: string - description: 'Limits describes the maximum amount of - compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - type: string - description: 'Requests describes the minimum amount - of compute resources required. If Requests is omitted - for a container, it defaults to Limits if that is - explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - selector: - description: A label query over volumes to consider for - binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be empty. - This array is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required - by the claim. Value of Filesystem is implied when not - included in claim spec. This is a beta feature. - type: string - volumeName: - description: VolumeName is the binding reference to the - PersistentVolume backing this claim. - type: string + required: + - key + - operator type: object - status: - description: 'Status represents the current information/status - of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator is + "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + ruleSelector: + description: A label selector to select which PrometheusRules to mount + for alerting and recording. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains + values, a key, and an operator that relates the key and values. properties: - accessModes: - description: 'AccessModes contains the actual access modes - the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to a + set of values. Valid operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator + is In or NotIn, the values array must be non-empty. If the + operator is Exists or DoesNotExist, the values array must + be empty. This array is replaced during a strategic merge + patch. items: type: string type: array - capacity: - additionalProperties: - type: string - description: Represents the actual resources of the underlying - volume. - type: object - conditions: - description: Current Condition of persistent volume claim. - If underlying persistent volume is being resized then - the Condition will be set to 'ResizeStarted'. - items: - description: PersistentVolumeClaimCondition contails details - about state of pvc - properties: - lastProbeTime: - description: Last time we probed the condition. - format: date-time - type: string - lastTransitionTime: - description: Last time the condition transitioned - from one status to another. - format: date-time - type: string - message: - description: Human-readable message indicating details - about last transition. - type: string - reason: - description: Unique, this should be a short, machine - understandable string that gives the reason for - condition's last transition. If it reports "ResizeStarted" - that means the underlying persistent volume is being - resized. - type: string - status: - type: string - type: - description: PersistentVolumeClaimConditionType is - a valid value of PersistentVolumeClaimCondition.Type - type: string - required: - - status - - type - type: object - type: array - phase: - description: Phase represents the current phase of PersistentVolumeClaim. - type: string + required: + - key + - operator type: object - type: object - type: object - tolerations: - description: If specified, the pod's tolerations. - items: - description: The pod this Toleration is attached to tolerates any - taint that matches the triple using the matching - operator . + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator is + "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + securityContext: + description: SecurityContext holds pod-level security attributes and + common container settings. This defaults to the default PodSecurityContext. properties: - effect: - description: Effect indicates the taint effect to match. Empty - means match all taint effects. When specified, allowed values - are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies - to. Empty means match all taint keys. If the key is empty, operator - must be Exists; this combination means to match all values and - all keys. - type: string - operator: - description: Operator represents a key's relationship to the value. - Valid operators are Exists and Equal. Defaults to Equal. Exists - is equivalent to wildcard for value, so that a pod can tolerate - all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time the - toleration (which must be of effect NoExecute, otherwise this - field is ignored) tolerates the taint. By default, it is not - set, which means tolerate the taint forever (do not evict). - Zero and negative values will be treated as 0 (evict immediately) - by the system. + fsGroup: + description: "A special supplemental group that applies to all containers + in a pod. Some volume types allow the Kubelet to change the ownership + of that volume to be owned by the pod: \n 1. The owning GID will + be the FSGroup 2. The setgid bit is set (new files created in + the volume will be owned by FSGroup) 3. The permission bits are + OR'd with rw-rw---- \n If unset, the Kubelet will not modify the + ownership and permissions of any volume." format: int64 type: integer - value: - description: Value is the taint value the toleration matches to. - If the operator is Exists, the value should be empty, otherwise - just a regular string. - type: string - type: object - type: array - tracingConfig: - description: TracingConfig configures tracing in Thanos. This is an - experimental feature, it may change in any upcoming release in a breaking - way. - properties: - key: - description: The key of the secret to select from. Must be a valid - secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - volumes: - description: Volumes allows configuration of additional volumes on the - output StatefulSet definition. Volumes specified will be appended - to other volumes that are generated as a result of StorageSpec objects. - items: - description: Volume represents a named volume in a pod that may be - accessed by any container in the pod. - properties: - awsElasticBlockStore: - description: 'AWSElasticBlockStore represents an AWS Disk resource - that is attached to a kubelet''s host machine and then exposed - to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + runAsGroup: + description: The GID to run the entrypoint of the container process. + Uses runtime default if unset. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail to start + the container if it does. If unset or false, no such validation + will be performed. May also be set in SecurityContext. If set + in both SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. May + also be set in SecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux + context for each container. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. properties: - fsType: - description: 'Filesystem type of the volume that you want - to mount. Tip: Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", "xfs", "ntfs". - Implicitly inferred to be "ext4" if unspecified. More info: - https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - TODO: how do we prevent errors in the filesystem from compromising - the machine' + level: + description: Level is SELinux level label that applies to the + container. type: string - partition: - description: 'The partition in the volume that you want to - mount. If omitted, the default is to mount by volume name. - Examples: For volume /dev/sda1, you specify the partition - as "1". Similarly, the volume partition for /dev/sda is - "0" (or you can leave the property empty).' - format: int32 - type: integer - readOnly: - description: 'Specify "true" to force and set the ReadOnly - property in VolumeMounts to "true". If omitted, the default - is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' - type: boolean - volumeID: - description: 'Unique ID of the persistent disk resource in - AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + role: + description: Role is a SELinux role label that applies to the + container. type: string - required: - - volumeID - type: object - azureDisk: - description: AzureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. - properties: - cachingMode: - description: 'Host Caching mode: None, Read Only, Read Write.' + type: + description: Type is a SELinux type label that applies to the + container. type: string - diskName: - description: The Name of the data disk in the blob storage + user: + description: User is a SELinux user label that applies to the + container. type: string - diskURI: - description: The URI the data disk in the blob storage + type: object + supplementalGroups: + description: A list of groups applied to the first process run in + each container, in addition to the container's primary GID. If + unspecified, no groups will be added to any container. + items: + format: int64 + type: integer + type: array + sysctls: + description: Sysctls hold a list of namespaced sysctls used for + the pod. Pods with unsupported sysctls (by the container runtime) + might fail to launch. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named by + the GMSACredentialSpecName field. This field is alpha-level + and is only honored by servers that enable the WindowsGMSA + feature flag. type: string - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA + credential spec to use. This field is alpha-level and is only + honored by servers that enable the WindowsGMSA feature flag. type: string - kind: - description: 'Expected values Shared: multiple blob disks - per storage account Dedicated: single blob disk per storage - account Managed: azure managed data disk (only in managed - availability set). defaults to shared' + runAsUserName: + description: The UserName in Windows to run the entrypoint of + the container process. Defaults to the user specified in image + metadata if unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. This + field is beta-level and may be disabled with the WindowsRunAsUserName + feature flag. type: string - readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. - type: boolean - required: - - diskName - - diskURI type: object - azureFile: - description: AzureFile represents an Azure File Service mount - on the host and bind mount to the pod. + type: object + serviceAccountName: + description: ServiceAccountName is the name of the ServiceAccount to + use to run the Thanos Ruler Pods. + type: string + storage: + description: Storage spec to specify how storage shall be used. + properties: + emptyDir: + description: 'EmptyDirVolumeSource to be used by the Prometheus + StatefulSets. If specified, used in place of any volumeClaimTemplate. + More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir' properties: - readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. - type: boolean - secretName: - description: the name of secret that contains Azure Storage - Account Name and Key + medium: + description: 'What type of storage medium should back this directory. + The default is "" which means to use the node''s default medium. + Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' type: string - shareName: - description: Share Name + sizeLimit: + description: 'Total amount of local storage required for this + EmptyDir volume. The size limit is also applicable for memory + medium. The maximum usage on memory medium EmptyDir would + be the minimum value between the SizeLimit specified here + and the sum of memory limits of all containers in a pod. The + default is nil which means that the limit is undefined. More + info: http://kubernetes.io/docs/user-guide/volumes#emptydir' type: string - required: - - secretName - - shareName type: object - cephfs: - description: CephFS represents a Ceph FS mount on the host that - shares a pod's lifetime + volumeClaimTemplate: + description: A PVC spec to be used by the Prometheus StatefulSets. properties: - monitors: - description: 'Required: Monitors is a collection of Ceph monitors - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - items: - type: string - type: array - path: - description: 'Optional: Used as the mounted root, rather than - the full Ceph tree, default is /' + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string - readOnly: - description: 'Optional: Defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. More - info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - type: boolean - secretFile: - description: 'Optional: SecretFile is the path to key ring - for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint + the client submits requests to. Cannot be updated. In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string - secretRef: - description: 'Optional: SecretRef is reference to the authentication - secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' type: object - user: - description: 'Optional: User is the rados user name, default - is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' - type: string - required: - - monitors - type: object - cinder: - description: 'Cinder represents a cinder volume attached and mounted - on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' - properties: - fsType: - description: 'Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Examples: "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md' - type: string - readOnly: - description: 'Optional: Defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts. More - info: https://examples.k8s.io/mysql-cinder-pd/README.md' - type: boolean - secretRef: - description: 'Optional: points to a secret object containing - parameters used to connect to OpenStack.' + spec: + description: 'Spec defines the desired characteristics of a + volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + accessModes: + description: 'AccessModes contains the desired access modes + the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: This field requires the VolumeSnapshotDataSource + alpha feature gate to be enabled and currently VolumeSnapshot + is the only supported data source. If the provisioner + can support VolumeSnapshot data source, it will create + a new volume and data will be restored to the volume at + the same time. If the provisioner does not support VolumeSnapshot + data source, volume will not be created and the failure + will be reported as an event. In the future, we plan to + support more data source types and the behavior of the + provisioner may change. + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, the + specified Kind must be in the core API group. For + any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources + the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + type: string + description: 'Limits describes the maximum amount of + compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + type: string + description: 'Requests describes the minimum amount + of compute resources required. If Requests is omitted + for a container, it defaults to Limits if that is + explicitly specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + selector: + description: A label query over volumes to consider for + binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required + by the claim. Value of Filesystem is implied when not + included in claim spec. This is a beta feature. + type: string + volumeName: + description: VolumeName is the binding reference to the + PersistentVolume backing this claim. type: string type: object - volumeID: - description: 'volume id used to identify the volume in cinder. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md' - type: string - required: - - volumeID - type: object - configMap: - description: ConfigMap represents a configMap that should populate - this volume - properties: - defaultMode: - description: 'Optional: mode bits to use on created files - by default. Must be a value between 0 and 0777. Defaults - to 0644. Directories within the path are not affected by - this setting. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - items: - description: If unspecified, each key-value pair in the Data - field of the referenced ConfigMap will be projected into - the volume as a file whose name is the key and content is - the value. If specified, the listed keys will be projected - into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the - ConfigMap, the volume setup will error unless it is marked - optional. Paths must be relative and may not contain the - '..' path or start with '..'. - items: - description: Maps a string key to a path within a volume. - properties: - key: - description: The key to project. + status: + description: 'Status represents the current information/status + of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'AccessModes contains the actual access modes + the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: type: string - mode: - description: 'Optional: mode bits to use on this file, - must be a value between 0 and 0777. If not specified, - the volume defaultMode will be used. This might be - in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode - bits set.' - format: int32 - type: integer - path: - description: The relative path of the file to map the - key to. May not be an absolute path. May not contain - the path element '..'. May not start with the string - '..'. + type: array + capacity: + additionalProperties: type: string - required: - - key - - path - type: object - type: array - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the ConfigMap or its keys must - be defined - type: boolean - type: object - csi: - description: CSI (Container Storage Interface) represents storage - that is handled by an external CSI driver (Alpha feature). - properties: - driver: - description: Driver is the name of the CSI driver that handles - this volume. Consult with your admin for the correct name - as registered in the cluster. - type: string - fsType: - description: Filesystem type to mount. Ex. "ext4", "xfs", - "ntfs". If not provided, the empty value is passed to the - associated CSI driver which will determine the default filesystem - to apply. - type: string - nodePublishSecretRef: - description: NodePublishSecretRef is a reference to the secret - object containing sensitive information to pass to the CSI - driver to complete the CSI NodePublishVolume and NodeUnpublishVolume - calls. This field is optional, and may be empty if no secret - is required. If the secret object contains more than one - secret, all secret references are passed. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - readOnly: - description: Specifies a read-only configuration for the volume. - Defaults to false (read/write). - type: boolean - volumeAttributes: - additionalProperties: - type: string - description: VolumeAttributes stores driver-specific properties - that are passed to the CSI driver. Consult your driver's - documentation for supported values. - type: object - required: - - driver - type: object - downwardAPI: - description: DownwardAPI represents downward API about the pod - that should populate this volume - properties: - defaultMode: - description: 'Optional: mode bits to use on created files - by default. Must be a value between 0 and 0777. Defaults - to 0644. Directories within the path are not affected by - this setting. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - items: - description: Items is a list of downward API volume file - items: - description: DownwardAPIVolumeFile represents information - to create the file containing the pod field - properties: - fieldRef: - description: 'Required: Selects a field of the pod: - only annotations, labels, name and namespace are supported.' + description: Represents the actual resources of the underlying + volume. + type: object + conditions: + description: Current Condition of persistent volume claim. + If underlying persistent volume is being resized then + the Condition will be set to 'ResizeStarted'. + items: + description: PersistentVolumeClaimCondition contails details + about state of pvc properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". + lastProbeTime: + description: Last time we probed the condition. + format: date-time type: string - fieldPath: - description: Path of the field to select in the - specified API version. + lastTransitionTime: + description: Last time the condition transitioned + from one status to another. + format: date-time type: string - required: - - fieldPath - type: object - mode: - description: 'Optional: mode bits to use on this file, - must be a value between 0 and 0777. If not specified, - the volume defaultMode will be used. This might be - in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode - bits set.' - format: int32 - type: integer - path: - description: 'Required: Path is the relative path name - of the file to be created. Must not be absolute or - contain the ''..'' path. Must be utf-8 encoded. The - first item of the relative path must not start with - ''..''' - type: string - resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - requests.cpu and requests.memory) are currently supported.' - properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' + message: + description: Human-readable message indicating details + about last transition. type: string - divisor: - description: Specifies the output format of the - exposed resources, defaults to "1" + reason: + description: Unique, this should be a short, machine + understandable string that gives the reason for + condition's last transition. If it reports "ResizeStarted" + that means the underlying persistent volume is being + resized. type: string - resource: - description: 'Required: resource to select' + status: + type: string + type: + description: PersistentVolumeClaimConditionType is + a valid value of PersistentVolumeClaimCondition.Type type: string required: - - resource + - status + - type type: object - required: - - path - type: object - type: array - type: object - emptyDir: - description: 'EmptyDir represents a temporary directory that shares - a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' - properties: - medium: - description: 'What type of storage medium should back this - directory. The default is "" which means to use the node''s - default medium. Must be an empty string (default) or Memory. - More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' - type: string - sizeLimit: - description: 'Total amount of local storage required for this - EmptyDir volume. The size limit is also applicable for memory - medium. The maximum usage on memory medium EmptyDir would - be the minimum value between the SizeLimit specified here - and the sum of memory limits of all containers in a pod. - The default is nil which means that the limit is undefined. - More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' - type: string + type: array + phase: + description: Phase represents the current phase of PersistentVolumeClaim. + type: string + type: object type: object - fc: - description: FC represents a Fibre Channel resource that is attached - to a kubelet's host machine and then exposed to the pod. - properties: - fsType: - description: 'Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - TODO: how do we prevent errors in the filesystem from compromising - the machine' - type: string - lun: - description: 'Optional: FC target lun number' - format: int32 - type: integer - readOnly: - description: 'Optional: Defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts.' - type: boolean - targetWWNs: - description: 'Optional: FC target worldwide names (WWNs)' - items: + type: object + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple using the matching + operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, operator + must be Exists; this combination means to match all values and + all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. Exists + is equivalent to wildcard for value, so that a pod can tolerate + all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the + toleration (which must be of effect NoExecute, otherwise this + field is ignored) tolerates the taint. By default, it is not + set, which means tolerate the taint forever (do not evict). + Zero and negative values will be treated as 0 (evict immediately) + by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise + just a regular string. + type: string + type: object + type: array + tracingConfig: + description: TracingConfig configures tracing in Thanos. This is an + experimental feature, it may change in any upcoming release in a breaking + way. + properties: + key: + description: The key of the secret to select from. Must be a valid + secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + volumes: + description: Volumes allows configuration of additional volumes on the + output StatefulSet definition. Volumes specified will be appended + to other volumes that are generated as a result of StorageSpec objects. + items: + description: Volume represents a named volume in a pod that may be + accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'AWSElasticBlockStore represents an AWS Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from compromising + the machine' type: string - type: array - wwids: - description: 'Optional: FC volume world wide identifiers (wwids) - Either wwids or combination of targetWWNs and lun must be - set, but not both simultaneously.' - items: + partition: + description: 'The partition in the volume that you want to + mount. If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition + as "1". Similarly, the volume partition for /dev/sda is + "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'Specify "true" to force and set the ReadOnly + property in VolumeMounts to "true". If omitted, the default + is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'Unique ID of the persistent disk resource in + AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' type: string - type: array - type: object - flexVolume: - description: FlexVolume represents a generic volume resource that - is provisioned/attached using an exec based plugin. - properties: - driver: - description: Driver is the name of the driver to use for this - volume. - type: string - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". The default filesystem depends on FlexVolume - script. - type: string - options: - additionalProperties: + required: + - volumeID + type: object + azureDisk: + description: AzureDisk represents an Azure Data Disk mount on + the host and bind mount to the pod. + properties: + cachingMode: + description: 'Host Caching mode: None, Read Only, Read Write.' type: string - description: 'Optional: Extra command options if any.' - type: object - readOnly: - description: 'Optional: Defaults to false (read/write). ReadOnly - here will force the ReadOnly setting in VolumeMounts.' - type: boolean - secretRef: - description: 'Optional: SecretRef is reference to the secret - object containing sensitive information to pass to the plugin - scripts. This may be empty if no secret object is specified. - If the secret object contains more than one secret, all - secrets are passed to the plugin scripts.' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - required: - - driver - type: object - flocker: - description: Flocker represents a Flocker volume attached to a - kubelet's host machine. This depends on the Flocker control - service being running - properties: - datasetName: - description: Name of the dataset stored as metadata -> name - on the dataset for Flocker should be considered as deprecated - type: string - datasetUUID: - description: UUID of the dataset. This is unique identifier - of a Flocker dataset - type: string - type: object - gcePersistentDisk: - description: 'GCEPersistentDisk represents a GCE Disk resource - that is attached to a kubelet''s host machine and then exposed - to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - properties: - fsType: - description: 'Filesystem type of the volume that you want - to mount. Tip: Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", "xfs", "ntfs". - Implicitly inferred to be "ext4" if unspecified. More info: - https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - TODO: how do we prevent errors in the filesystem from compromising - the machine' - type: string - partition: - description: 'The partition in the volume that you want to - mount. If omitted, the default is to mount by volume name. - Examples: For volume /dev/sda1, you specify the partition - as "1". Similarly, the volume partition for /dev/sda is - "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - format: int32 - type: integer - pdName: - description: 'Unique name of the PD resource in GCE. Used - to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - type: string - readOnly: - description: 'ReadOnly here will force the ReadOnly setting - in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' - type: boolean - required: - - pdName - type: object - gitRepo: - description: 'GitRepo represents a git repository at a particular - revision. DEPRECATED: GitRepo is deprecated. To provision a - container with a git repo, mount an EmptyDir into an InitContainer - that clones the repo using git, then mount the EmptyDir into - the Pod''s container.' - properties: - directory: - description: Target directory name. Must not contain or start - with '..'. If '.' is supplied, the volume directory will - be the git repository. Otherwise, if specified, the volume - will contain the git repository in the subdirectory with - the given name. - type: string - repository: - description: Repository URL - type: string - revision: - description: Commit hash for the specified revision. - type: string - required: - - repository - type: object - glusterfs: - description: 'Glusterfs represents a Glusterfs mount on the host - that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' - properties: - endpoints: - description: 'EndpointsName is the endpoint name that details - Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' - type: string - path: - description: 'Path is the Glusterfs volume path. More info: - https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' - type: string - readOnly: - description: 'ReadOnly here will force the Glusterfs volume - to be mounted with read-only permissions. Defaults to false. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' - type: boolean - required: - - endpoints - - path - type: object - hostPath: - description: 'HostPath represents a pre-existing file or directory - on the host machine that is directly exposed to the container. - This is generally used for system agents or other privileged - things that are allowed to see the host machine. Most containers - will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - --- TODO(jonesdl) We need to restrict who can use host directory - mounts and who can/can not mount host directories as read/write.' - properties: - path: - description: 'Path of the directory on the host. If the path - is a symlink, it will follow the link to the real path. - More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' - type: string - type: - description: 'Type for HostPath Volume Defaults to "" More - info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' - type: string - required: - - path - type: object - iscsi: - description: 'ISCSI represents an ISCSI Disk resource that is - attached to a kubelet''s host machine and then exposed to the - pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' - properties: - chapAuthDiscovery: - description: whether support iSCSI Discovery CHAP authentication - type: boolean - chapAuthSession: - description: whether support iSCSI Session CHAP authentication - type: boolean - fsType: - description: 'Filesystem type of the volume that you want - to mount. Tip: Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", "xfs", "ntfs". - Implicitly inferred to be "ext4" if unspecified. More info: - https://kubernetes.io/docs/concepts/storage/volumes#iscsi - TODO: how do we prevent errors in the filesystem from compromising - the machine' - type: string - initiatorName: - description: Custom iSCSI Initiator Name. If initiatorName - is specified with iscsiInterface simultaneously, new iSCSI - interface : will be created - for the connection. - type: string - iqn: - description: Target iSCSI Qualified Name. - type: string - iscsiInterface: - description: iSCSI Interface Name that uses an iSCSI transport. - Defaults to 'default' (tcp). - type: string - lun: - description: iSCSI Target Lun number. - format: int32 - type: integer - portals: - description: iSCSI Target Portal List. The portal is either - an IP or ip_addr:port if the port is other than default - (typically TCP ports 860 and 3260). - items: + diskName: + description: The Name of the data disk in the blob storage type: string - type: array - readOnly: - description: ReadOnly here will force the ReadOnly setting - in VolumeMounts. Defaults to false. - type: boolean - secretRef: - description: CHAP Secret for iSCSI target and initiator authentication - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + diskURI: + description: The URI the data disk in the blob storage + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'Expected values Shared: multiple blob disks + per storage account Dedicated: single blob disk per storage + account Managed: azure managed data disk (only in managed + availability set). defaults to shared' + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: AzureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: the name of secret that contains Azure Storage + Account Name and Key + type: string + shareName: + description: Share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: CephFS represents a Ceph FS mount on the host that + shares a pod's lifetime + properties: + monitors: + description: 'Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: type: string - type: object - targetPortal: - description: iSCSI Target Portal. The Portal is either an - IP or ip_addr:port if the port is other than default (typically - TCP ports 860 and 3260). - type: string - required: - - iqn - - lun - - targetPortal - type: object - name: - description: 'Volume''s name. Must be a DNS_LABEL and unique within - the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - nfs: - description: 'NFS represents an NFS mount on the host that shares - a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - properties: - path: - description: 'Path that is exported by the NFS server. More - info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - type: string - readOnly: - description: 'ReadOnly here will force the NFS export to be - mounted with read-only permissions. Defaults to false. More - info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - type: boolean - server: - description: 'Server is the hostname or IP address of the - NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' - type: string - required: - - path - - server - type: object - persistentVolumeClaim: - description: 'PersistentVolumeClaimVolumeSource represents a reference - to a PersistentVolumeClaim in the same namespace. More info: - https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - properties: - claimName: - description: 'ClaimName is the name of a PersistentVolumeClaim - in the same namespace as the pod using this volume. More - info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' - type: string - readOnly: - description: Will force the ReadOnly setting in VolumeMounts. - Default false. - type: boolean - required: - - claimName - type: object - photonPersistentDisk: - description: PhotonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine - properties: - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - pdID: - description: ID that identifies Photon Controller persistent - disk - type: string - required: - - pdID - type: object - portworxVolume: - description: PortworxVolume represents a portworx volume attached - and mounted on kubelets host machine - properties: - fsType: - description: FSType represents the filesystem type to mount - Must be a filesystem type supported by the host operating - system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" - if unspecified. - type: string - readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. - type: boolean - volumeID: - description: VolumeID uniquely identifies a Portworx volume - type: string - required: - - volumeID - type: object - projected: - description: Items for all in one resources secrets, configmaps, - and downward API - properties: - defaultMode: - description: Mode bits to use on created files by default. - Must be a value between 0 and 0777. Directories within the - path are not affected by this setting. This might be in - conflict with other options that affect the file mode, like - fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - sources: - description: list of volume projections + type: array + path: + description: 'Optional: Used as the mounted root, rather than + the full Ceph tree, default is /' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. More + info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'Optional: SecretFile is the path to key ring + for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'Optional: SecretRef is reference to the authentication + secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'Optional: User is the rados user name, default + is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'Cinder represents a cinder volume attached and mounted + on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Examples: "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. More + info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'Optional: points to a secret object containing + parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeID: + description: 'volume id used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: ConfigMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer items: - description: Projection that may be projected along with - other supported volume types + description: If unspecified, each key-value pair in the Data + field of the referenced ConfigMap will be projected into + the volume as a file whose name is the key and content is + the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the + ConfigMap, the volume setup will error unless it is marked + optional. Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might be + in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode + bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map the + key to. May not be an absolute path. May not contain + the path element '..'. May not start with the string + '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its keys must + be defined + type: boolean + type: object + csi: + description: CSI (Container Storage Interface) represents storage + that is handled by an external CSI driver (Alpha feature). + properties: + driver: + description: Driver is the name of the CSI driver that handles + this volume. Consult with your admin for the correct name + as registered in the cluster. + type: string + fsType: + description: Filesystem type to mount. Ex. "ext4", "xfs", + "ntfs". If not provided, the empty value is passed to the + associated CSI driver which will determine the default filesystem + to apply. + type: string + nodePublishSecretRef: + description: NodePublishSecretRef is a reference to the secret + object containing sensitive information to pass to the CSI + driver to complete the CSI NodePublishVolume and NodeUnpublishVolume + calls. This field is optional, and may be empty if no secret + is required. If the secret object contains more than one + secret, all secret references are passed. properties: - configMap: - description: information about the configMap data to - project - properties: - items: - description: If unspecified, each key-value pair - in the Data field of the referenced ConfigMap - will be projected into the volume as a file whose - name is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. - If a key is specified which is not present in - the ConfigMap, the volume setup will error unless - it is marked optional. Paths must be relative - and may not contain the '..' path or start with - '..'. - items: - description: Maps a string key to a path within - a volume. - properties: - key: - description: The key to project. - type: string - mode: - description: 'Optional: mode bits to use on - this file, must be a value between 0 and - 0777. If not specified, the volume defaultMode - will be used. This might be in conflict - with other options that affect the file - mode, like fsGroup, and the result can be - other mode bits set.' - format: int32 - type: integer - path: - description: The relative path of the file - to map the key to. May not be an absolute - path. May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the ConfigMap or its - keys must be defined - type: boolean - type: object - downwardAPI: - description: information about the downwardAPI data - to project - properties: - items: - description: Items is a list of DownwardAPIVolume - file - items: - description: DownwardAPIVolumeFile represents - information to create the file containing the - pod field - properties: - fieldRef: - description: 'Required: Selects a field of - the pod: only annotations, labels, name - and namespace are supported.' - properties: - apiVersion: - description: Version of the schema the - FieldPath is written in terms of, defaults - to "v1". - type: string - fieldPath: - description: Path of the field to select - in the specified API version. - type: string - required: - - fieldPath - type: object - mode: - description: 'Optional: mode bits to use on - this file, must be a value between 0 and - 0777. If not specified, the volume defaultMode - will be used. This might be in conflict - with other options that affect the file - mode, like fsGroup, and the result can be - other mode bits set.' - format: int32 - type: integer - path: - description: 'Required: Path is the relative - path name of the file to be created. Must - not be absolute or contain the ''..'' path. - Must be utf-8 encoded. The first item of - the relative path must not start with ''..''' - type: string - resourceFieldRef: - description: 'Selects a resource of the container: - only resources limits and requests (limits.cpu, - limits.memory, requests.cpu and requests.memory) - are currently supported.' - properties: - containerName: - description: 'Container name: required - for volumes, optional for env vars' - type: string - divisor: - description: Specifies the output format - of the exposed resources, defaults to - "1" - type: string - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - required: - - path - type: object - type: array - type: object - secret: - description: information about the secret data to project - properties: - items: - description: If unspecified, each key-value pair - in the Data field of the referenced Secret will - be projected into the volume as a file whose name - is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. - If a key is specified which is not present in - the Secret, the volume setup will error unless - it is marked optional. Paths must be relative - and may not contain the '..' path or start with - '..'. - items: - description: Maps a string key to a path within - a volume. - properties: - key: - description: The key to project. - type: string - mode: - description: 'Optional: mode bits to use on - this file, must be a value between 0 and - 0777. If not specified, the volume defaultMode - will be used. This might be in conflict - with other options that affect the file - mode, like fsGroup, and the result can be - other mode bits set.' - format: int32 - type: integer - path: - description: The relative path of the file - to map the key to. May not be an absolute - path. May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - type: object - serviceAccountToken: - description: information about the serviceAccountToken - data to project - properties: - audience: - description: Audience is the intended audience of - the token. A recipient of a token must identify - itself with an identifier specified in the audience - of the token, and otherwise should reject the - token. The audience defaults to the identifier - of the apiserver. - type: string - expirationSeconds: - description: ExpirationSeconds is the requested - duration of validity of the service account token. - As the token approaches expiration, the kubelet - volume plugin will proactively rotate the service - account token. The kubelet will start trying to - rotate the token if the token is older than 80 - percent of its time to live or if the token is - older than 24 hours.Defaults to 1 hour and must - be at least 10 minutes. - format: int64 - type: integer - path: - description: Path is the path relative to the mount - point of the file to project the token into. - type: string - required: - - path - type: object + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string type: object - type: array - required: - - sources - type: object - quobyte: - description: Quobyte represents a Quobyte mount on the host that - shares a pod's lifetime - properties: - group: - description: Group to map volume access to Default is no group - type: string - readOnly: - description: ReadOnly here will force the Quobyte volume to - be mounted with read-only permissions. Defaults to false. - type: boolean - registry: - description: Registry represents a single or multiple Quobyte - Registry services specified as a string as host:port pair - (multiple entries are separated with commas) which acts - as the central registry for volumes - type: string - tenant: - description: Tenant owning the given Quobyte volume in the - Backend Used with dynamically provisioned Quobyte volumes, - value is set by the plugin - type: string - user: - description: User to map volume access to Defaults to serivceaccount - user - type: string - volume: - description: Volume is a string that references an already - created Quobyte volume by name. - type: string - required: - - registry - - volume - type: object - rbd: - description: 'RBD represents a Rados Block Device mount on the - host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' - properties: - fsType: - description: 'Filesystem type of the volume that you want - to mount. Tip: Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", "xfs", "ntfs". - Implicitly inferred to be "ext4" if unspecified. More info: - https://kubernetes.io/docs/concepts/storage/volumes#rbd - TODO: how do we prevent errors in the filesystem from compromising - the machine' - type: string - image: - description: 'The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - keyring: - description: 'Keyring is the path to key ring for RBDUser. - Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - monitors: - description: 'A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + readOnly: + description: Specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: VolumeAttributes stores driver-specific properties + that are passed to the CSI driver. Consult your driver's + documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: DownwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might be + in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode + bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path name + of the file to be created. Must not be absolute or + contain the ''..'' path. Must be utf-8 encoded. The + first item of the relative path must not start with + ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + requests.cpu and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + description: Specifies the output format of the + exposed resources, defaults to "1" + type: string + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'EmptyDir represents a temporary directory that shares + a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'What type of storage medium should back this + directory. The default is "" which means to use the node''s + default medium. Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' type: string - type: array - pool: - description: 'The rados pool name. Default is rbd. More info: - https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - readOnly: - description: 'ReadOnly here will force the ReadOnly setting - in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: boolean - secretRef: - description: 'SecretRef is name of the authentication secret - for RBDUser. If provided overrides keyring. Default is nil. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + sizeLimit: + description: 'Total amount of local storage required for this + EmptyDir volume. The size limit is also applicable for memory + medium. The maximum usage on memory medium EmptyDir would + be the minimum value between the SizeLimit specified here + and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + type: string + type: object + fc: + description: FC represents a Fibre Channel resource that is attached + to a kubelet's host machine and then exposed to the pod. + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from compromising + the machine' + type: string + lun: + description: 'Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'Optional: FC target worldwide names (WWNs)' + items: type: string - type: object - user: - description: 'The rados user name. Default is admin. More - info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' - type: string - required: - - image - - monitors - type: object - scaleIO: - description: ScaleIO represents a ScaleIO persistent volume attached - and mounted on Kubernetes nodes. - properties: - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Default is "xfs". - type: string - gateway: - description: The host address of the ScaleIO API Gateway. - type: string - protectionDomain: - description: The name of the ScaleIO Protection Domain for - the configured storage. - type: string - readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: SecretRef references to the secret for ScaleIO - user and other sensitive information. If this is not provided, - Login operation will fail. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + type: array + wwids: + description: 'Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be + set, but not both simultaneously.' + items: type: string - type: object - sslEnabled: - description: Flag to enable/disable SSL communication with - Gateway, default false - type: boolean - storageMode: - description: Indicates whether the storage for a volume should - be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. - type: string - storagePool: - description: The ScaleIO Storage Pool associated with the - protection domain. - type: string - system: - description: The name of the storage system as configured - in ScaleIO. - type: string - volumeName: - description: The name of a volume already created in the ScaleIO - system that is associated with this volume source. - type: string - required: - - gateway - - secretRef - - system - type: object - secret: - description: 'Secret represents a secret that should populate - this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' - properties: - defaultMode: - description: 'Optional: mode bits to use on created files - by default. Must be a value between 0 and 0777. Defaults - to 0644. Directories within the path are not affected by - this setting. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - items: - description: If unspecified, each key-value pair in the Data - field of the referenced Secret will be projected into the - volume as a file whose name is the key and content is the - value. If specified, the listed keys will be projected into - the specified paths, and unlisted keys will not be present. - If a key is specified which is not present in the Secret, - the volume setup will error unless it is marked optional. - Paths must be relative and may not contain the '..' path - or start with '..'. - items: - description: Maps a string key to a path within a volume. + type: array + type: object + flexVolume: + description: FlexVolume represents a generic volume resource that + is provisioned/attached using an exec based plugin. + properties: + driver: + description: Driver is the name of the driver to use for this + volume. + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". The default filesystem depends on FlexVolume + script. + type: string + options: + additionalProperties: + type: string + description: 'Optional: Extra command options if any.' + type: object + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'Optional: SecretRef is reference to the secret + object containing sensitive information to pass to the plugin + scripts. This may be empty if no secret object is specified. + If the secret object contains more than one secret, all + secrets are passed to the plugin scripts.' properties: - key: - description: The key to project. + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string - mode: - description: 'Optional: mode bits to use on this file, - must be a value between 0 and 0777. If not specified, - the volume defaultMode will be used. This might be - in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode - bits set.' - format: int32 - type: integer - path: - description: The relative path of the file to map the - key to. May not be an absolute path. May not contain - the path element '..'. May not start with the string - '..'. + type: object + required: + - driver + type: object + flocker: + description: Flocker represents a Flocker volume attached to a + kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: Name of the dataset stored as metadata -> name + on the dataset for Flocker should be considered as deprecated + type: string + datasetUUID: + description: UUID of the dataset. This is unique identifier + of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'GCEPersistentDisk represents a GCE Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from compromising + the machine' + type: string + partition: + description: 'The partition in the volume that you want to + mount. If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition + as "1". Similarly, the volume partition for /dev/sda is + "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'Unique name of the PD resource in GCE. Used + to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'GitRepo represents a git repository at a particular + revision. DEPRECATED: GitRepo is deprecated. To provision a + container with a git repo, mount an EmptyDir into an InitContainer + that clones the repo using git, then mount the EmptyDir into + the Pod''s container.' + properties: + directory: + description: Target directory name. Must not contain or start + with '..'. If '.' is supplied, the volume directory will + be the git repository. Otherwise, if specified, the volume + will contain the git repository in the subdirectory with + the given name. + type: string + repository: + description: Repository URL + type: string + revision: + description: Commit hash for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'Glusterfs represents a Glusterfs mount on the host + that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'EndpointsName is the endpoint name that details + Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'Path is the Glusterfs volume path. More info: + https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'ReadOnly here will force the Glusterfs volume + to be mounted with read-only permissions. Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'HostPath represents a pre-existing file or directory + on the host machine that is directly exposed to the container. + This is generally used for system agents or other privileged + things that are allowed to see the host machine. Most containers + will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host directory + mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'Path of the directory on the host. If the path + is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'Type for HostPath Volume Defaults to "" More + info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'ISCSI represents an ISCSI Disk resource that is + attached to a kubelet''s host machine and then exposed to the + pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: whether support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: whether support iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from compromising + the machine' + type: string + initiatorName: + description: Custom iSCSI Initiator Name. If initiatorName + is specified with iscsiInterface simultaneously, new iSCSI + interface : will be created + for the connection. + type: string + iqn: + description: Target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iSCSI Interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: iSCSI Target Portal List. The portal is either + an IP or ip_addr:port if the port is other than default + (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: CHAP Secret for iSCSI target and initiator authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' type: string - required: - - key - - path type: object - type: array - optional: - description: Specify whether the Secret or its keys must be - defined - type: boolean - secretName: - description: 'Name of the secret in the pod''s namespace to - use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' - type: string - type: object - storageos: - description: StorageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. - properties: - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: SecretRef specifies the secret to use for obtaining - the StorageOS API credentials. If not specified, default - values will be attempted. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + targetPortal: + description: iSCSI Target Portal. The Portal is either an + IP or ip_addr:port if the port is other than default (typically + TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'Volume''s name. Must be a DNS_LABEL and unique within + the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'NFS represents an NFS mount on the host that shares + a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'Path that is exported by the NFS server. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'ReadOnly here will force the NFS export to be + mounted with read-only permissions. Defaults to false. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'Server is the hostname or IP address of the + NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'PersistentVolumeClaimVolumeSource represents a reference + to a PersistentVolumeClaim in the same namespace. More info: + https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'ClaimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: PhotonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: ID that identifies Photon Controller persistent + disk + type: string + required: + - pdID + type: object + portworxVolume: + description: PortworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: FSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: VolumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: Items for all in one resources secrets, configmaps, + and downward API + properties: + defaultMode: + description: Mode bits to use on created files by default. + Must be a value between 0 and 0777. Directories within the + path are not affected by this setting. This might be in + conflict with other options that affect the file mode, like + fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: list of volume projections + items: + description: Projection that may be projected along with + other supported volume types + properties: + configMap: + description: information about the configMap data to + project + properties: + items: + description: If unspecified, each key-value pair + in the Data field of the referenced ConfigMap + will be projected into the volume as a file whose + name is the key and content is the value. If specified, + the listed keys will be projected into the specified + paths, and unlisted keys will not be present. + If a key is specified which is not present in + the ConfigMap, the volume setup will error unless + it is marked optional. Paths must be relative + and may not contain the '..' path or start with + '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on + this file, must be a value between 0 and + 0777. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can be + other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + keys must be defined + type: boolean + type: object + downwardAPI: + description: information about the downwardAPI data + to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name + and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits to use on + this file, must be a value between 0 and + 0777. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can be + other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + description: Specifies the output format + of the exposed resources, defaults to + "1" + type: string + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: information about the secret data to project + properties: + items: + description: If unspecified, each key-value pair + in the Data field of the referenced Secret will + be projected into the volume as a file whose name + is the key and content is the value. If specified, + the listed keys will be projected into the specified + paths, and unlisted keys will not be present. + If a key is specified which is not present in + the Secret, the volume setup will error unless + it is marked optional. Paths must be relative + and may not contain the '..' path or start with + '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on + this file, must be a value between 0 and + 0777. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can be + other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + type: object + serviceAccountToken: + description: information about the serviceAccountToken + data to project + properties: + audience: + description: Audience is the intended audience of + the token. A recipient of a token must identify + itself with an identifier specified in the audience + of the token, and otherwise should reject the + token. The audience defaults to the identifier + of the apiserver. + type: string + expirationSeconds: + description: ExpirationSeconds is the requested + duration of validity of the service account token. + As the token approaches expiration, the kubelet + volume plugin will proactively rotate the service + account token. The kubelet will start trying to + rotate the token if the token is older than 80 + percent of its time to live or if the token is + older than 24 hours.Defaults to 1 hour and must + be at least 10 minutes. + format: int64 + type: integer + path: + description: Path is the path relative to the mount + point of the file to project the token into. + type: string + required: + - path + type: object + type: object + type: array + required: + - sources + type: object + quobyte: + description: Quobyte represents a Quobyte mount on the host that + shares a pod's lifetime + properties: + group: + description: Group to map volume access to Default is no group + type: string + readOnly: + description: ReadOnly here will force the Quobyte volume to + be mounted with read-only permissions. Defaults to false. + type: boolean + registry: + description: Registry represents a single or multiple Quobyte + Registry services specified as a string as host:port pair + (multiple entries are separated with commas) which acts + as the central registry for volumes + type: string + tenant: + description: Tenant owning the given Quobyte volume in the + Backend Used with dynamically provisioned Quobyte volumes, + value is set by the plugin + type: string + user: + description: User to map volume access to Defaults to serivceaccount + user + type: string + volume: + description: Volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'RBD represents a Rados Block Device mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from compromising + the machine' + type: string + image: + description: 'The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'Keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: type: string - type: object - volumeName: - description: VolumeName is the human-readable name of the - StorageOS volume. Volume names are only unique within a - namespace. - type: string - volumeNamespace: - description: VolumeNamespace specifies the scope of the volume - within StorageOS. If no namespace is specified then the - Pod's namespace will be used. This allows the Kubernetes - name scoping to be mirrored within StorageOS for tighter - integration. Set VolumeName to any name to override the - default behaviour. Set to "default" if you are not using - namespaces within StorageOS. Namespaces that do not pre-exist - within StorageOS will be created. - type: string - type: object - vsphereVolume: - description: VsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine - properties: - fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - storagePolicyID: - description: Storage Policy Based Management (SPBM) profile - ID associated with the StoragePolicyName. - type: string - storagePolicyName: - description: Storage Policy Based Management (SPBM) profile - name. - type: string - volumePath: - description: Path that identifies vSphere volume vmdk - type: string - required: - - volumePath - type: object - required: - - name - type: object - type: array - type: object - status: - description: 'Most recent observed status of the ThanosRuler cluster. Read-only. - Not included when requesting from the apiserver, only from the ThanosRuler - Operator API itself. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - availableReplicas: - description: Total number of available pods (ready for at least minReadySeconds) - targeted by this ThanosRuler deployment. - format: int32 - type: integer - paused: - description: Represents whether any actions on the underlying managed - objects are being performed. Only delete actions will be performed. - type: boolean - replicas: - description: Total number of non-terminated pods targeted by this ThanosRuler - deployment (their labels match the selector). - format: int32 - type: integer - unavailableReplicas: - description: Total number of unavailable pods targeted by this ThanosRuler - deployment. - format: int32 - type: integer - updatedReplicas: - description: Total number of non-terminated pods targeted by this ThanosRuler - deployment that have the desired version spec. - format: int32 - type: integer - required: - - availableReplicas - - paused - - replicas - - unavailableReplicas - - updatedReplicas - type: object - required: - - spec - type: object - version: v1 - versions: - - name: v1 + type: array + pool: + description: 'The rados pool name. Default is rbd. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'SecretRef is name of the authentication secret + for RBDUser. If provided overrides keyring. Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'The rados user name. Default is admin. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: ScaleIO represents a ScaleIO persistent volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: The host address of the ScaleIO API Gateway. + type: string + protectionDomain: + description: The name of the ScaleIO Protection Domain for + the configured storage. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef references to the secret for ScaleIO + user and other sensitive information. If this is not provided, + Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + sslEnabled: + description: Flag to enable/disable SSL communication with + Gateway, default false + type: boolean + storageMode: + description: Indicates whether the storage for a volume should + be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. + type: string + storagePool: + description: The ScaleIO Storage Pool associated with the + protection domain. + type: string + system: + description: The name of the storage system as configured + in ScaleIO. + type: string + volumeName: + description: The name of a volume already created in the ScaleIO + system that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'Secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the Data + field of the referenced Secret will be projected into the + volume as a file whose name is the key and content is the + value. If specified, the listed keys will be projected into + the specified paths, and unlisted keys will not be present. + If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. + Paths must be relative and may not contain the '..' path + or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might be + in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode + bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map the + key to. May not be an absolute path. May not contain + the path element '..'. May not start with the string + '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: Specify whether the Secret or its keys must be + defined + type: boolean + secretName: + description: 'Name of the secret in the pod''s namespace to + use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: StorageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef specifies the secret to use for obtaining + the StorageOS API credentials. If not specified, default + values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: VolumeName is the human-readable name of the + StorageOS volume. Volume names are only unique within a + namespace. + type: string + volumeNamespace: + description: VolumeNamespace specifies the scope of the volume + within StorageOS. If no namespace is specified then the + Pod's namespace will be used. This allows the Kubernetes + name scoping to be mirrored within StorageOS for tighter + integration. Set VolumeName to any name to override the + default behaviour. Set to "default" if you are not using + namespaces within StorageOS. Namespaces that do not pre-exist + within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: VsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: Storage Policy Based Management (SPBM) profile + ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: Storage Policy Based Management (SPBM) profile + name. + type: string + volumePath: + description: Path that identifies vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + status: + description: 'Most recent observed status of the ThanosRuler cluster. Read-only. + Not included when requesting from the apiserver, only from the ThanosRuler + Operator API itself. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + availableReplicas: + description: Total number of available pods (ready for at least minReadySeconds) + targeted by this ThanosRuler deployment. + format: int32 + type: integer + paused: + description: Represents whether any actions on the underlying managed + objects are being performed. Only delete actions will be performed. + type: boolean + replicas: + description: Total number of non-terminated pods targeted by this ThanosRuler + deployment (their labels match the selector). + format: int32 + type: integer + unavailableReplicas: + description: Total number of unavailable pods targeted by this ThanosRuler + deployment. + format: int32 + type: integer + updatedReplicas: + description: Total number of non-terminated pods targeted by this ThanosRuler + deployment that have the desired version spec. + format: int32 + type: integer + required: + - availableReplicas + - paused + - replicas + - unavailableReplicas + - updatedReplicas + type: object + required: + - spec + type: object served: true storage: true + diff --git a/kubernetes/helm_charts/monitoring/prometheus-operator/templates/prometheus-operator/admission-webhooks/mutatingWebhookConfiguration.yaml b/kubernetes/helm_charts/monitoring/prometheus-operator/templates/prometheus-operator/admission-webhooks/mutatingWebhookConfiguration.yaml index 97214ca3ea..39ba1f1144 100755 --- a/kubernetes/helm_charts/monitoring/prometheus-operator/templates/prometheus-operator/admission-webhooks/mutatingWebhookConfiguration.yaml +++ b/kubernetes/helm_charts/monitoring/prometheus-operator/templates/prometheus-operator/admission-webhooks/mutatingWebhookConfiguration.yaml @@ -1,5 +1,5 @@ {{- if and .Values.prometheusOperator.admissionWebhooks.enabled }} -apiVersion: admissionregistration.k8s.io/v1beta1 +apiVersion: admissionregistration.k8s.io/v1 kind: MutatingWebhookConfiguration metadata: name: {{ template "prometheus-operator.fullname" . }}-admission @@ -28,4 +28,8 @@ webhooks: namespace: {{ $.Release.Namespace }} name: {{ template "prometheus-operator.operator.fullname" $ }} path: /admission-prometheusrules/mutate + timeoutSeconds: {{ .Values.prometheusOperator.admissionWebhooks.timeoutSeconds }} + admissionReviewVersions: ["v1beta1", "v1"] + sideEffects: None + {{- end }} diff --git a/kubernetes/helm_charts/monitoring/prometheus-operator/templates/prometheus-operator/admission-webhooks/validatingWebhookConfiguration.yaml b/kubernetes/helm_charts/monitoring/prometheus-operator/templates/prometheus-operator/admission-webhooks/validatingWebhookConfiguration.yaml index 6616f212d7..9370049724 100755 --- a/kubernetes/helm_charts/monitoring/prometheus-operator/templates/prometheus-operator/admission-webhooks/validatingWebhookConfiguration.yaml +++ b/kubernetes/helm_charts/monitoring/prometheus-operator/templates/prometheus-operator/admission-webhooks/validatingWebhookConfiguration.yaml @@ -1,5 +1,5 @@ {{- if and .Values.prometheusOperator.admissionWebhooks.enabled }} -apiVersion: admissionregistration.k8s.io/v1beta1 +apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingWebhookConfiguration metadata: name: {{ template "prometheus-operator.fullname" . }}-admission @@ -28,4 +28,8 @@ webhooks: namespace: {{ $.Release.Namespace }} name: {{ template "prometheus-operator.operator.fullname" $ }} path: /admission-prometheusrules/validate + timeoutSeconds: {{ .Values.prometheusOperator.admissionWebhooks.timeoutSeconds }} + admissionReviewVersions: ["v1beta1", "v1"] + sideEffects: None + {{- end }} diff --git a/kubernetes/helm_charts/monitoring/prometheus-operator/templates/prometheus-operator/crds.yaml b/kubernetes/helm_charts/monitoring/prometheus-operator/templates/prometheus-operator/crds.yaml deleted file mode 100755 index d6bca7ed58..0000000000 --- a/kubernetes/helm_charts/monitoring/prometheus-operator/templates/prometheus-operator/crds.yaml +++ /dev/null @@ -1,6 +0,0 @@ -{{- if and .Values.prometheusOperator.enabled .Values.prometheusOperator.createCustomResource -}} -{{- range $path, $bytes := .Files.Glob "crds/*.yaml" }} -{{ $.Files.Get $path }} ---- -{{- end }} -{{- end }} diff --git a/kubernetes/helm_charts/monitoring/prometheus-operator/values.yaml b/kubernetes/helm_charts/monitoring/prometheus-operator/values.yaml index 70b4126e5a..7617ad5517 100755 --- a/kubernetes/helm_charts/monitoring/prometheus-operator/values.yaml +++ b/kubernetes/helm_charts/monitoring/prometheus-operator/values.yaml @@ -1068,7 +1068,7 @@ prometheusOperator: enabled: true # If true prometheus operator will create and update its CRDs on startup - manageCrds: true + manageCrds: false tlsProxy: enabled: true @@ -1090,8 +1090,8 @@ prometheusOperator: patch: enabled: true image: - repository: jettech/kube-webhook-certgen - tag: v1.2.0 + repository: registry.k8s.io/ingress-nginx/kube-webhook-certgen + tag: v1.3.0 pullPolicy: IfNotPresent resources: {} ## Provide a priority class name to the webhook patching job diff --git a/kubernetes/helm_charts/monitoring/prometheus-redis-exporter/templates/role.yaml b/kubernetes/helm_charts/monitoring/prometheus-redis-exporter/templates/role.yaml index c1ef9fde18..f891cea457 100755 --- a/kubernetes/helm_charts/monitoring/prometheus-redis-exporter/templates/role.yaml +++ b/kubernetes/helm_charts/monitoring/prometheus-redis-exporter/templates/role.yaml @@ -1,5 +1,5 @@ {{- if .Values.rbac.create }} -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: {{ template "prometheus-redis-exporter.fullname" . }} diff --git a/kubernetes/helm_charts/monitoring/prometheus-redis-exporter/templates/rolebinding.yaml b/kubernetes/helm_charts/monitoring/prometheus-redis-exporter/templates/rolebinding.yaml index 6b960a603b..99e4afe4fb 100755 --- a/kubernetes/helm_charts/monitoring/prometheus-redis-exporter/templates/rolebinding.yaml +++ b/kubernetes/helm_charts/monitoring/prometheus-redis-exporter/templates/rolebinding.yaml @@ -1,5 +1,5 @@ {{- if .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: {{ template "prometheus-redis-exporter.fullname" . }} From 65e333d94058afe4f38294ec4056329ec5ba519f Mon Sep 17 00:00:00 2001 From: saiakhil Date: Thu, 9 Feb 2023 12:08:34 +0530 Subject: [PATCH 235/616] added UCI vars in private_repo template --- .../ansible/inventory/dev/Core/secrets.yml | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/private_repo/ansible/inventory/dev/Core/secrets.yml b/private_repo/ansible/inventory/dev/Core/secrets.yml index 8bf07e91cd..97745c7d4a 100644 --- a/private_repo/ansible/inventory/dev/Core/secrets.yml +++ b/private_repo/ansible/inventory/dev/Core/secrets.yml @@ -205,3 +205,29 @@ graylog_transport_email_auth_password: "{{ core_vault_mail_server_password }}" # ------------------------------------------------------------------------------------------------------------ # # Optional variables - Can be left blank if you dont plan to use the intended features lp_vault_youtube_api_key: # youtube api token if you want to upload youtube video urls on your site + +# use password generator tool like https://passwordsgenerator.net/ and generate password with length 33 +# example: hDF5fh9QEaW4vFjx6E4CVPPtQm8FtqJZ9 +uci_api_admin_token: "hDF5fh9QEaW4vFjx6E4CVPPtQm8FtqJZ9" + +# use password generator tool like https://passwordsgenerator.net/ and generate password with length 43 +# example: ZpfFgp75ncgs7w9rp96rMYzSFvr5T3H5QxkvsvQzmPM +hasura_graphql_admin_secret: "ZpfFgp75ncgs7w9rp96rMYzSFvr5T3H5QxkvsvQzmPM" + +# use password generator tool like https://passwordsgenerator.net/ and generate password with length 57 +# example: dZfGV6x2MLAsJTvbw87tZyhfqNgnawHz9LYrespT6WMJkmtLYLbmKVCRA +fusionauth_service_admin_key: "dZfGV6x2MLAsJTvbw87tZyhfqNgnawHz9LYrespT6WMJkmtLYLbmKVCRA" + +# use password generator tool like https://passwordsgenerator.net/ (include symbols) and generate password with length 17 +# example: ^26C~\3$T~A2xs6d# +uci_encryption_key_string: "^26C~\3$T~A2xs6d#" + +# Take the value of uci_encryption_key_string variable and use below command to get base64 encrypted string +# command: echo -n "^26C~\3$T~A2xs6d#" | base64 +uci_encryption_key_base64: "XjI2Q35cMyRUfkEyeHM2ZCM=" + +# Give some prefered username +uci_odk_user: "admin" + +# Give some prefered password +uci_odk_password: "admin" From 98ebbae2507c70344b53c904d2936d092af55db2 Mon Sep 17 00:00:00 2001 From: saiakhil Date: Thu, 9 Feb 2023 13:52:14 +0530 Subject: [PATCH 236/616] added UCI vars in private_repo template --- private_repo/ansible/inventory/dev/Core/secrets.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/private_repo/ansible/inventory/dev/Core/secrets.yml b/private_repo/ansible/inventory/dev/Core/secrets.yml index 97745c7d4a..1107874126 100644 --- a/private_repo/ansible/inventory/dev/Core/secrets.yml +++ b/private_repo/ansible/inventory/dev/Core/secrets.yml @@ -202,10 +202,6 @@ cloud_artifact_storage_secret: "{{ cloud_public_storage_secret }}" # Graylog graylog_transport_email_auth_password: "{{ core_vault_mail_server_password }}" # email server password / api token -# ------------------------------------------------------------------------------------------------------------ # -# Optional variables - Can be left blank if you dont plan to use the intended features -lp_vault_youtube_api_key: # youtube api token if you want to upload youtube video urls on your site - # use password generator tool like https://passwordsgenerator.net/ and generate password with length 33 # example: hDF5fh9QEaW4vFjx6E4CVPPtQm8FtqJZ9 uci_api_admin_token: "hDF5fh9QEaW4vFjx6E4CVPPtQm8FtqJZ9" @@ -231,3 +227,7 @@ uci_odk_user: "admin" # Give some prefered password uci_odk_password: "admin" + +# ------------------------------------------------------------------------------------------------------------ # +# Optional variables - Can be left blank if you dont plan to use the intended features +lp_vault_youtube_api_key: # youtube api token if you want to upload youtube video urls on your site From e8feb0983b2ef135753878c00691826550bcfabb Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Thu, 9 Feb 2023 22:37:49 +1100 Subject: [PATCH 237/616] updated desktop-faq-upload for oci Signed-off-by: Deepak Devadathan --- ansible/desktop-faq-upload.yml | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/ansible/desktop-faq-upload.yml b/ansible/desktop-faq-upload.yml index 1ed429bd23..00ef6841b2 100644 --- a/ansible/desktop-faq-upload.yml +++ b/ansible/desktop-faq-upload.yml @@ -231,7 +231,6 @@ - name: set common oci variables set_fact: local_file_or_folder_path: "{{ playbook_dir }}/../{{ src_file_path }}" - oss_bucket_name: "{{upload_storage}}" tags: - always @@ -240,6 +239,8 @@ include_role: name: oci-cloud-storage tasks_from: upload.yml + vars: + oss_bucket_name: "{{ cloud_storage_public_bucketname }}" tags: - upload-desktop-faq @@ -248,6 +249,8 @@ include_role: name: oci-cloud-storage tasks_from: upload.yml + vars: + oss_bucket_name: "{{ cloud_storage_label_bucketname }}" tags: - upload-label @@ -258,8 +261,10 @@ tasks_from: upload-folder.yml vars: oss_path: "{{ destination_path }}/" + oss_bucket_name: "{{ cloud_storage_chatbot_bucketname }}" tags: - upload-chatbot-config + - block: - name: upload folder to oci oss - csv-template include_role: @@ -267,8 +272,10 @@ tasks_from: upload-folder.yml vars: oss_path: "{{ destination_path }}/" + oss_bucket_name: "{{ cloud_storage_sourcing_bucketname }}" tags: - upload-csv-template + - block: - name: upload folder to oci oss - discussion-ui include_role: @@ -276,6 +283,7 @@ tasks_from: upload-folder.yml vars: oss_path: "{{ destination_path }}/" + oss_bucket_name: "{{ cloud_storage_discussionui_bucketname }}" tags: - upload-discussion-ui when: cloud_service_provider == "oci" From 7e5969adc8194733eb911f7055d17197748faa8f Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Thu, 9 Feb 2023 22:45:17 +1100 Subject: [PATCH 238/616] added uploadfaq for oci rc-schema Signed-off-by: Deepak Devadathan --- ansible/uploadFAQs.yml | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/ansible/uploadFAQs.yml b/ansible/uploadFAQs.yml index 1cb3dd091b..ed156e34e5 100644 --- a/ansible/uploadFAQs.yml +++ b/ansible/uploadFAQs.yml @@ -64,7 +64,7 @@ name: oci-cloud-storage tasks_from: upload-folder.yml vars: - oss_bucket_name: "{{ upload_storage }}" + oss_bucket_name: "{{ cloud_storage_public_bucketname }}" oss_path: "" local_file_or_folder_path: "{{ playbook_dir }}/../utils/{{ item }}" with_items: @@ -118,5 +118,17 @@ with_items: - "{{ source_folder.split(',') }}" when: cloud_service_provider == "gcloud" + + - name: upload batch of files to oci oss + include_role: + name: oci-cloud-storage + tasks_from: upload-folder.yml + vars: + oss_bucket_name: "{{ cloud_storage_content_bucketname }}" + oss_path: "" + local_file_or_folder_path: "{{ playbook_dir }}/../utils/{{ item }}" + with_items: + - "{{ source_folder.split(',') }}" + when: cloud_service_provider == "oci" tags: - upload-RC-schema From 15dfad8ed4e60b87a5b82d6e9f2b7f92788e6dd6 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Thu, 9 Feb 2023 22:51:15 +1100 Subject: [PATCH 239/616] updated bucket name for oci upload for schema Signed-off-by: Deepak Devadathan --- ansible/kp_upload-schema.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/kp_upload-schema.yml b/ansible/kp_upload-schema.yml index 53dca02255..70a8f0a393 100644 --- a/ansible/kp_upload-schema.yml +++ b/ansible/kp_upload-schema.yml @@ -46,7 +46,7 @@ name: oci-cloud-storage tasks_from: upload-folder.yml vars: - oss_bucket_name: "{{ plugin_storage }}" + oss_bucket_name: "{{ cloud_storage_content_bucketname }}" local_file_or_folder_path: "{{ source_name }}" oss_path: "schemas/local/" when: cloud_service_provider == "oci" \ No newline at end of file From 9bcf90c1843603b458aabcf9fba43cb9968131a3 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Thu, 9 Feb 2023 23:03:15 +1100 Subject: [PATCH 240/616] updated bucket name for artifact upl and download for oci Signed-off-by: Deepak Devadathan --- ansible/artifacts-download.yml | 10 ++++++++++ ansible/artifacts-upload.yml | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/ansible/artifacts-download.yml b/ansible/artifacts-download.yml index 2fc2748229..2a6c3b439b 100644 --- a/ansible/artifacts-download.yml +++ b/ansible/artifacts-download.yml @@ -40,3 +40,13 @@ aws_access_key_id: "{{ cloud_artifact_storage_accountname }}" aws_secret_access_key: "{{ cloud_artifact_storage_secret }}" when: cloud_service_provider == "aws" + + - name: download artifact from oci oss + include_role: + name: oci-cloud-storage + tasks_from: download.yml + vars: + local_file_or_folder_path: "{{ artifact_path }}" + oss_bucket_name: "{{ cloud_storage_artifacts_bucketname }}" + oss_path: "{{ artifact }}" + when: cloud_service_provider == "oci" \ No newline at end of file diff --git a/ansible/artifacts-upload.yml b/ansible/artifacts-upload.yml index a94f33dfa3..13af4627e6 100644 --- a/ansible/artifacts-upload.yml +++ b/ansible/artifacts-upload.yml @@ -48,6 +48,6 @@ tasks_from: upload.yml vars: local_file_or_folder_path: "{{ artifact_path }}" - oss_bucket_name: "{{ oci_artifact_oss_bucket_name }}" + oss_bucket_name: "{{ cloud_storage_artifacts_bucketname }}" oss_path: "{{ artifact }}" when: cloud_service_provider == "oci" \ No newline at end of file From 5077848d029dfb7e9a0759c3cd06a29efb01a93e Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Thu, 9 Feb 2023 23:10:10 +1100 Subject: [PATCH 241/616] added oci oss block for asset upload Signed-off-by: Deepak Devadathan --- ansible/assets-upload.yml | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/ansible/assets-upload.yml b/ansible/assets-upload.yml index b8d5836cc6..dcacfcef23 100644 --- a/ansible/assets-upload.yml +++ b/ansible/assets-upload.yml @@ -75,3 +75,25 @@ vars: local_file_or_folder_path: "{{ assets }}/*" when: cloud_service_provider == "gcloud" + +##### OCI + - name: this block consists of tasks related to oci oss + block: + - name: set common oci variables + set_fact: + oss_bucket_name: "{{ cloud_storage_playercdn_bucketname }}" + oss_path: "" + oss_namespace: "{{oci_namespace}}" + + - name: delete files and folders from oci oss + include_role: + name: oci-cloud-storage + tasks_from: delete-folder.yml + + - name: upload batch of files to oci oss + include_role: + name: oci-cloud-storage + tasks_from: upload-folder.yml + vars: + local_file_or_folder_path: "{{ assets }}" + when: cloud_service_provider == "oci" \ No newline at end of file From b4d729473e444ef804138ac393795734af5f080a Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Thu, 9 Feb 2023 23:15:15 +1100 Subject: [PATCH 242/616] updated oci buket name for deploy plugins Signed-off-by: Deepak Devadathan --- ansible/deploy-plugins.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index 19b615720c..df2f33d855 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -228,7 +228,7 @@ block: - name: set common oci variables set_fact: - oss_bucket_name: "{{ oci_content_oss_bucket_name }}" + oss_bucket_name: "{{ cloud_storage_content_bucketname }}" oss_namespace: "{{ oci_namespace }}" tags: - always From be321250f4b19267b9da1343f904b6fc7b9148cf Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Thu, 9 Feb 2023 23:40:19 +1100 Subject: [PATCH 243/616] added es snapshot for oci Signed-off-by: Deepak Devadathan --- ansible/es.yml | 4 ++ .../roles/es-oci-snapshot/defaults/main.yml | 20 +++++++++ ansible/roles/es-oci-snapshot/tasks/main.yml | 42 +++++++++++++++++++ 3 files changed, 66 insertions(+) create mode 100644 ansible/roles/es-oci-snapshot/defaults/main.yml create mode 100644 ansible/roles/es-oci-snapshot/tasks/main.yml diff --git a/ansible/es.yml b/ansible/es.yml index c526414f47..193f81ab27 100644 --- a/ansible/es.yml +++ b/ansible/es.yml @@ -12,6 +12,8 @@ when: cloud_service_provider == "aws" - role: es-gcs-snapshot when: cloud_service_provider == "gcloud" + - role: es-oci-snapshot + when: cloud_service_provider == "oci" - role: es5-snapshot-purge tags: - es_backup @@ -31,6 +33,8 @@ when: cloud_service_provider == "aws" - role: es-gcs-snapshot when: cloud_service_provider == "gcloud" + - role: es-oci-snapshot + when: cloud_service_provider == "oci" - es5-snapshot-purge tags: - log_es_backup diff --git a/ansible/roles/es-oci-snapshot/defaults/main.yml b/ansible/roles/es-oci-snapshot/defaults/main.yml new file mode 100644 index 0000000000..39610900ae --- /dev/null +++ b/ansible/roles/es-oci-snapshot/defaults/main.yml @@ -0,0 +1,20 @@ +snapshot_create_request_body: { + type: s3, + settings: { + bucket: "{{ cloud_storage_esbackup_bucketname }}", + base_path: "{{ cloud_storage_esbackup_foldername }}/{{ snapshot_base_path }}_{{ base_path_date }}", + access_key: "{{s3_storage_key}}", + secret_key: "{{s3_storage_secret}}", + endpoint: "{{s3_storage_endpoint}}", + path_style_access: "{{s3_path_style_access}}", + region: "{{s3_region}}", + } +} + +# Override these values +es_snapshot_host: "localhost" +snapshot_base_path: "default" +es_backup_storage: "elasticsearch-snapshots" + +cloud_storage_esbackup_bucketname: "{{ cloud_storage_management_bucketname }}" +cloud_storage_esbackup_foldername: "elasticsearch-snapshots" diff --git a/ansible/roles/es-oci-snapshot/tasks/main.yml b/ansible/roles/es-oci-snapshot/tasks/main.yml new file mode 100644 index 0000000000..aee768626c --- /dev/null +++ b/ansible/roles/es-oci-snapshot/tasks/main.yml @@ -0,0 +1,42 @@ +--- + +- set_fact: base_path_date="{{ lookup('pipe','date +%Y-%m') }}" + +- set_fact: snapshot_number="snapshot_{{ lookup('pipe','date +%s') }}" + +- name: Create S3 Repository + uri: + url: "http://{{ es_snapshot_host }}:9200/_snapshot/{{ snapshot_base_path }}" + method: PUT + body: "{{ snapshot_create_request_body | to_json }}" + headers: + Content-Type: "application/json" + +- name: Take new snapshot + uri: + url: "http://{{ es_snapshot_host }}:9200/_snapshot/{{ snapshot_base_path }}/{{ snapshot_number }}" + method: PUT + headers: + Content-Type: "application/json" + +- name: Print all snapshots + uri: + url: "http://{{ es_snapshot_host }}:9200/_snapshot/{{ snapshot_base_path }}/_all" + method: GET + +- name: Print status of current snapshot + uri: + url: "http://{{ es_snapshot_host }}:9200/_snapshot/{{ snapshot_base_path }}/{{ snapshot_number }}" + method: GET + +- name: "Wait for backup to be completed" + uri: + url: "http://{{ es_snapshot_host }}:9200/_snapshot/{{ snapshot_base_path }}/{{ snapshot_number }}" + method: GET + return_content: yes + status_code: 200 + body_format: json + register: result + until: result.json.snapshots[0].state == 'SUCCESS' + retries: 120 + delay: 10 From 86bd891e3489393eaaed27a71bff21b366988558 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 10 Feb 2023 00:03:51 +1100 Subject: [PATCH 244/616] added es repository for oci Signed-off-by: Deepak Devadathan --- ansible/roles/es6/tasks/main.yml | 4 +++ .../es6/tasks/plugins/repository-oci.yml | 36 +++++++++++++++++++ 2 files changed, 40 insertions(+) create mode 100644 ansible/roles/es6/tasks/plugins/repository-oci.yml diff --git a/ansible/roles/es6/tasks/main.yml b/ansible/roles/es6/tasks/main.yml index 1de1eeff8e..fd96759103 100644 --- a/ansible/roles/es6/tasks/main.yml +++ b/ansible/roles/es6/tasks/main.yml @@ -52,6 +52,10 @@ include: plugins/repository-gcs.yml when: cloud_service_provider == "gcloud" +- name: include plugins/repository-oci.yml + include: plugins/repository-oci.yml + when: cloud_service_provider == "oci" + - name: flush handlers meta: flush_handlers diff --git a/ansible/roles/es6/tasks/plugins/repository-oci.yml b/ansible/roles/es6/tasks/plugins/repository-oci.yml new file mode 100644 index 0000000000..02a2b45f45 --- /dev/null +++ b/ansible/roles/es6/tasks/plugins/repository-oci.yml @@ -0,0 +1,36 @@ +--- +- name: Add default oci account name for backups + become: yes + shell: echo "{{ cloud_management_storage_accountname }}" | {{ es_home }}/bin/elasticsearch-keystore add -f s3.client.default.access_key + no_log: True + environment: + ES_PATH_CONF: "{{ conf_dir }}" + +- name: Add default oci account key for backups + become: yes + shell: echo "{{ cloud_management_storage_secret }}" | {{ es_home }}/bin/elasticsearch-keystore add -f s3.client.default.secret_key + no_log: True + environment: + ES_PATH_CONF: "{{ conf_dir }}" + + +- name: Add default oci endpoint for backups + become: yes + shell: echo "{{ cloud_management_storage_endpoint }}" | {{ es_home }}/bin/elasticsearch-keystore add -f s3.client.default.endpoint + no_log: True + environment: + ES_PATH_CONF: "{{ conf_dir }}" + +- name: Add default oci region for backups + become: yes + shell: echo "{{ cloud_management_storage_region }}" | {{ es_home }}/bin/elasticsearch-keystore add -f s3.client.default.region + no_log: True + environment: + ES_PATH_CONF: "{{ conf_dir }}" + +- name: Add default path-style access for backups + become: yes + shell: echo "{{ cloud_management_storage_path_style_access }}" | {{ es_home }}/bin/elasticsearch-keystore add -f s3.client.default.path_style_access + no_log: True + environment: + ES_PATH_CONF: "{{ conf_dir }}" \ No newline at end of file From 735e5402340accb5783304a2d15c7cf0bbaace5d Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 10 Feb 2023 00:06:51 +1100 Subject: [PATCH 245/616] added repository changes for log-es6 for oci Signed-off-by: Deepak Devadathan --- ansible/roles/log-es6/tasks/main.yml | 4 +++ .../log-es6/tasks/plugins/repository-oci.yml | 36 +++++++++++++++++++ 2 files changed, 40 insertions(+) create mode 100644 ansible/roles/log-es6/tasks/plugins/repository-oci.yml diff --git a/ansible/roles/log-es6/tasks/main.yml b/ansible/roles/log-es6/tasks/main.yml index 646947abee..4852d29306 100644 --- a/ansible/roles/log-es6/tasks/main.yml +++ b/ansible/roles/log-es6/tasks/main.yml @@ -74,6 +74,10 @@ include: plugins/repository-gcs.yml when: cloud_service_provider == "gcloud" +- name: include plugins/repository-oci.yml + include: plugins/repository-oci.yml + when: cloud_service_provider == "oci" + - name: include elasticsearch-ssl.yml include: elasticsearch-ssl.yml when: es_enable_http_ssl or es_enable_transport_ssl diff --git a/ansible/roles/log-es6/tasks/plugins/repository-oci.yml b/ansible/roles/log-es6/tasks/plugins/repository-oci.yml new file mode 100644 index 0000000000..02a2b45f45 --- /dev/null +++ b/ansible/roles/log-es6/tasks/plugins/repository-oci.yml @@ -0,0 +1,36 @@ +--- +- name: Add default oci account name for backups + become: yes + shell: echo "{{ cloud_management_storage_accountname }}" | {{ es_home }}/bin/elasticsearch-keystore add -f s3.client.default.access_key + no_log: True + environment: + ES_PATH_CONF: "{{ conf_dir }}" + +- name: Add default oci account key for backups + become: yes + shell: echo "{{ cloud_management_storage_secret }}" | {{ es_home }}/bin/elasticsearch-keystore add -f s3.client.default.secret_key + no_log: True + environment: + ES_PATH_CONF: "{{ conf_dir }}" + + +- name: Add default oci endpoint for backups + become: yes + shell: echo "{{ cloud_management_storage_endpoint }}" | {{ es_home }}/bin/elasticsearch-keystore add -f s3.client.default.endpoint + no_log: True + environment: + ES_PATH_CONF: "{{ conf_dir }}" + +- name: Add default oci region for backups + become: yes + shell: echo "{{ cloud_management_storage_region }}" | {{ es_home }}/bin/elasticsearch-keystore add -f s3.client.default.region + no_log: True + environment: + ES_PATH_CONF: "{{ conf_dir }}" + +- name: Add default path-style access for backups + become: yes + shell: echo "{{ cloud_management_storage_path_style_access }}" | {{ es_home }}/bin/elasticsearch-keystore add -f s3.client.default.path_style_access + no_log: True + environment: + ES_PATH_CONF: "{{ conf_dir }}" \ No newline at end of file From 4097a668fdb70d15689c3d6f85ed6edf7afadc5f Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 10 Feb 2023 00:34:21 +1100 Subject: [PATCH 246/616] removed access key and secret from es s3 repo settings Signed-off-by: Deepak Devadathan --- ansible/roles/es-oci-snapshot/defaults/main.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/ansible/roles/es-oci-snapshot/defaults/main.yml b/ansible/roles/es-oci-snapshot/defaults/main.yml index 39610900ae..13d079859b 100644 --- a/ansible/roles/es-oci-snapshot/defaults/main.yml +++ b/ansible/roles/es-oci-snapshot/defaults/main.yml @@ -3,8 +3,6 @@ snapshot_create_request_body: { settings: { bucket: "{{ cloud_storage_esbackup_bucketname }}", base_path: "{{ cloud_storage_esbackup_foldername }}/{{ snapshot_base_path }}_{{ base_path_date }}", - access_key: "{{s3_storage_key}}", - secret_key: "{{s3_storage_secret}}", endpoint: "{{s3_storage_endpoint}}", path_style_access: "{{s3_path_style_access}}", region: "{{s3_region}}", From f9516a743eefdc8b3f1548df233bccd51c2d275d Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 10 Feb 2023 00:39:27 +1100 Subject: [PATCH 247/616] add s3 compatible repository setting for es backup Signed-off-by: Deepak Devadathan --- ansible/roles/es-oci-snapshot/defaults/main.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ansible/roles/es-oci-snapshot/defaults/main.yml b/ansible/roles/es-oci-snapshot/defaults/main.yml index 13d079859b..e8186ca47f 100644 --- a/ansible/roles/es-oci-snapshot/defaults/main.yml +++ b/ansible/roles/es-oci-snapshot/defaults/main.yml @@ -1,11 +1,11 @@ snapshot_create_request_body: { - type: s3, + type: repository-s3, settings: { bucket: "{{ cloud_storage_esbackup_bucketname }}", base_path: "{{ cloud_storage_esbackup_foldername }}/{{ snapshot_base_path }}_{{ base_path_date }}", - endpoint: "{{s3_storage_endpoint}}", - path_style_access: "{{s3_path_style_access}}", - region: "{{s3_region}}", + s3.client.CLIENT_NAME.endpoint: "{{s3_storage_endpoint}}", + s3.client.CLIENT_NAME.path_style_access: "{{s3_path_style_access}}", + s3.client.CLIENT_NAME.region: "{{s3_region}}", } } From fabb504e523df286adcbcb050bf739a2551b088e Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 10 Feb 2023 00:44:23 +1100 Subject: [PATCH 248/616] changed the repository type to s3 Signed-off-by: Deepak Devadathan --- ansible/roles/es-oci-snapshot/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/es-oci-snapshot/defaults/main.yml b/ansible/roles/es-oci-snapshot/defaults/main.yml index e8186ca47f..e16c434918 100644 --- a/ansible/roles/es-oci-snapshot/defaults/main.yml +++ b/ansible/roles/es-oci-snapshot/defaults/main.yml @@ -1,5 +1,5 @@ snapshot_create_request_body: { - type: repository-s3, + type: s3, settings: { bucket: "{{ cloud_storage_esbackup_bucketname }}", base_path: "{{ cloud_storage_esbackup_foldername }}/{{ snapshot_base_path }}_{{ base_path_date }}", From 9b14c169a387874a8e1d9f0a44401b2dbd9391a9 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 10 Feb 2023 01:09:08 +1100 Subject: [PATCH 249/616] plugin repository-s3 for oci Signed-off-by: Deepak Devadathan --- ansible/roles/es6/tasks/main.yml | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/ansible/roles/es6/tasks/main.yml b/ansible/roles/es6/tasks/main.yml index fd96759103..a134e10c40 100644 --- a/ansible/roles/es6/tasks/main.yml +++ b/ansible/roles/es6/tasks/main.yml @@ -46,16 +46,12 @@ - name: include plugins/repository-s3.yml include: plugins/repository-s3.yml - when: cloud_service_provider == "aws" + when: cloud_service_provider == "aws" || cloud_service_provider == "oci" - name: include plugins/repository-gcs.yml include: plugins/repository-gcs.yml when: cloud_service_provider == "gcloud" -- name: include plugins/repository-oci.yml - include: plugins/repository-oci.yml - when: cloud_service_provider == "oci" - - name: flush handlers meta: flush_handlers From d888ba6fb02a466a6c3081a933e5de683b33f32e Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 10 Feb 2023 01:14:40 +1100 Subject: [PATCH 250/616] temporarily disabling no_log for trigage Signed-off-by: Deepak Devadathan --- ansible/roles/es6/tasks/plugins/repository-s3.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/es6/tasks/plugins/repository-s3.yml b/ansible/roles/es6/tasks/plugins/repository-s3.yml index 07655d6746..bc12c91008 100644 --- a/ansible/roles/es6/tasks/plugins/repository-s3.yml +++ b/ansible/roles/es6/tasks/plugins/repository-s3.yml @@ -2,7 +2,7 @@ - name: Add default aws account name for backups become: yes shell: echo "{{ cloud_management_storage_accountname }}" | {{ es_home }}/bin/elasticsearch-keystore add -f s3.client.default.access_key - no_log: True + # no_log: True environment: ES_PATH_CONF: "{{ conf_dir }}" From f71dc5ae4103f6794172c6fbda0326a57bdc0d20 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 10 Feb 2023 01:18:32 +1100 Subject: [PATCH 251/616] used the logical or Signed-off-by: Deepak Devadathan --- ansible/roles/es6/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/es6/tasks/main.yml b/ansible/roles/es6/tasks/main.yml index a134e10c40..39f915a018 100644 --- a/ansible/roles/es6/tasks/main.yml +++ b/ansible/roles/es6/tasks/main.yml @@ -46,7 +46,7 @@ - name: include plugins/repository-s3.yml include: plugins/repository-s3.yml - when: cloud_service_provider == "aws" || cloud_service_provider == "oci" + when: cloud_service_provider == "aws" or cloud_service_provider == "oci" - name: include plugins/repository-gcs.yml include: plugins/repository-gcs.yml From 2470e093122895cba9f350917f9e41ea0e526812 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 10 Feb 2023 01:20:48 +1100 Subject: [PATCH 252/616] reverted no_log Signed-off-by: Deepak Devadathan --- ansible/roles/es6/tasks/plugins/repository-s3.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/es6/tasks/plugins/repository-s3.yml b/ansible/roles/es6/tasks/plugins/repository-s3.yml index bc12c91008..07655d6746 100644 --- a/ansible/roles/es6/tasks/plugins/repository-s3.yml +++ b/ansible/roles/es6/tasks/plugins/repository-s3.yml @@ -2,7 +2,7 @@ - name: Add default aws account name for backups become: yes shell: echo "{{ cloud_management_storage_accountname }}" | {{ es_home }}/bin/elasticsearch-keystore add -f s3.client.default.access_key - # no_log: True + no_log: True environment: ES_PATH_CONF: "{{ conf_dir }}" From 96fcc3805d40119318b7213eeee6d2a3eff2c526 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 10 Feb 2023 01:26:03 +1100 Subject: [PATCH 253/616] default client name based setting Signed-off-by: Deepak Devadathan --- ansible/roles/es-oci-snapshot/defaults/main.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ansible/roles/es-oci-snapshot/defaults/main.yml b/ansible/roles/es-oci-snapshot/defaults/main.yml index e16c434918..599067373d 100644 --- a/ansible/roles/es-oci-snapshot/defaults/main.yml +++ b/ansible/roles/es-oci-snapshot/defaults/main.yml @@ -3,9 +3,9 @@ snapshot_create_request_body: { settings: { bucket: "{{ cloud_storage_esbackup_bucketname }}", base_path: "{{ cloud_storage_esbackup_foldername }}/{{ snapshot_base_path }}_{{ base_path_date }}", - s3.client.CLIENT_NAME.endpoint: "{{s3_storage_endpoint}}", - s3.client.CLIENT_NAME.path_style_access: "{{s3_path_style_access}}", - s3.client.CLIENT_NAME.region: "{{s3_region}}", + s3.client.default.endpoint: "{{s3_storage_endpoint}}", + s3.client.default.path_style_access: "{{s3_path_style_access}}", + s3.client.default.region: "{{s3_region}}", } } From 630f46e021df5d67d7f2fd3b3944a7984cc517a7 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 10 Feb 2023 01:31:35 +1100 Subject: [PATCH 254/616] added signer override Signed-off-by: Deepak Devadathan --- ansible/roles/es-oci-snapshot/defaults/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/ansible/roles/es-oci-snapshot/defaults/main.yml b/ansible/roles/es-oci-snapshot/defaults/main.yml index 599067373d..8357adbaa6 100644 --- a/ansible/roles/es-oci-snapshot/defaults/main.yml +++ b/ansible/roles/es-oci-snapshot/defaults/main.yml @@ -6,6 +6,7 @@ snapshot_create_request_body: { s3.client.default.endpoint: "{{s3_storage_endpoint}}", s3.client.default.path_style_access: "{{s3_path_style_access}}", s3.client.default.region: "{{s3_region}}", + s3.client.default.signer_override: "S3SignerType", } } From f36f17461f62bfeaf5d556611b03a2bdffaef17d Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 10 Feb 2023 01:41:24 +1100 Subject: [PATCH 255/616] added s3 client configuration for oci Signed-off-by: Deepak Devadathan --- ansible/roles/es-oci-snapshot/defaults/main.yml | 4 ---- ansible/roles/es6/templates/elasticsearch.yml.j2 | 14 ++++++++++++++ 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/ansible/roles/es-oci-snapshot/defaults/main.yml b/ansible/roles/es-oci-snapshot/defaults/main.yml index 8357adbaa6..e328b1a7f8 100644 --- a/ansible/roles/es-oci-snapshot/defaults/main.yml +++ b/ansible/roles/es-oci-snapshot/defaults/main.yml @@ -3,10 +3,6 @@ snapshot_create_request_body: { settings: { bucket: "{{ cloud_storage_esbackup_bucketname }}", base_path: "{{ cloud_storage_esbackup_foldername }}/{{ snapshot_base_path }}_{{ base_path_date }}", - s3.client.default.endpoint: "{{s3_storage_endpoint}}", - s3.client.default.path_style_access: "{{s3_path_style_access}}", - s3.client.default.region: "{{s3_region}}", - s3.client.default.signer_override: "S3SignerType", } } diff --git a/ansible/roles/es6/templates/elasticsearch.yml.j2 b/ansible/roles/es6/templates/elasticsearch.yml.j2 index d6aaab45c9..41875536e5 100644 --- a/ansible/roles/es6/templates/elasticsearch.yml.j2 +++ b/ansible/roles/es6/templates/elasticsearch.yml.j2 @@ -56,3 +56,17 @@ network.host: 0.0.0.0 {% if es_remote_reindex is defined %} reindex.remote.whitelist: {{es_remote_host}}:9200 {% endif %} + + +{% if cloud_service_provider == "oci" %} +s3: + client: + default: + region: "{{s3_region}}" + max_retries: 3 + protocol: "https" + endpoint: "{{s3_storage_endpoint}}" + path_style_access: "{{s3_path_style_access}}" + signer_override: "S3SignerType" + read_timeout: "180s" +{% endif %} \ No newline at end of file From 0d4ff3b9b54d112f8429790d077d49846af6dde2 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 10 Feb 2023 02:00:45 +1100 Subject: [PATCH 256/616] corrected elasticsearch.yml template Signed-off-by: Deepak Devadathan --- .../roles/es6/templates/elasticsearch.yml.j2 | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/ansible/roles/es6/templates/elasticsearch.yml.j2 b/ansible/roles/es6/templates/elasticsearch.yml.j2 index 41875536e5..27fd333f56 100644 --- a/ansible/roles/es6/templates/elasticsearch.yml.j2 +++ b/ansible/roles/es6/templates/elasticsearch.yml.j2 @@ -59,14 +59,11 @@ reindex.remote.whitelist: {{es_remote_host}}:9200 {% if cloud_service_provider == "oci" %} -s3: - client: - default: - region: "{{s3_region}}" - max_retries: 3 - protocol: "https" - endpoint: "{{s3_storage_endpoint}}" - path_style_access: "{{s3_path_style_access}}" - signer_override: "S3SignerType" - read_timeout: "180s" +region: "{{s3_region}}" +max_retries: 3 +protocol: "https" +endpoint: "{{s3_storage_endpoint}}" +path_style_access: "{{s3_path_style_access}}" +signer_override: "S3SignerType" +read_timeout: "180s" {% endif %} \ No newline at end of file From 828dbc03dadf5c4bfbd4de087248e2956e3a11ba Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 10 Feb 2023 02:06:16 +1100 Subject: [PATCH 257/616] corrected the variable names in elasticsearch.yml Signed-off-by: Deepak Devadathan --- ansible/roles/es6/templates/elasticsearch.yml.j2 | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/ansible/roles/es6/templates/elasticsearch.yml.j2 b/ansible/roles/es6/templates/elasticsearch.yml.j2 index 27fd333f56..f578d5b2c9 100644 --- a/ansible/roles/es6/templates/elasticsearch.yml.j2 +++ b/ansible/roles/es6/templates/elasticsearch.yml.j2 @@ -59,11 +59,11 @@ reindex.remote.whitelist: {{es_remote_host}}:9200 {% if cloud_service_provider == "oci" %} -region: "{{s3_region}}" -max_retries: 3 -protocol: "https" -endpoint: "{{s3_storage_endpoint}}" -path_style_access: "{{s3_path_style_access}}" -signer_override: "S3SignerType" -read_timeout: "180s" +s3.client.default.region: "{{s3_region}}" +s3.client.default.max_retries: 3 +s3.client.default.protocol: "https" +s3.client.default.endpoint: "{{s3_storage_endpoint}}" +s3.client.default.path_style_access: "{{s3_path_style_access}}" +s3.client.default.signer_override: "S3SignerType" +s3.client.default.read_timeout: "180s" {% endif %} \ No newline at end of file From f7ad26207412b0179f4da9563450357c1332c084 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 10 Feb 2023 02:26:46 +1100 Subject: [PATCH 258/616] removed s3 compatiblity variables from elasticsearch.yaml Signed-off-by: Deepak Devadathan --- ansible/roles/es6/templates/elasticsearch.yml.j2 | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/ansible/roles/es6/templates/elasticsearch.yml.j2 b/ansible/roles/es6/templates/elasticsearch.yml.j2 index f578d5b2c9..880c27da2b 100644 --- a/ansible/roles/es6/templates/elasticsearch.yml.j2 +++ b/ansible/roles/es6/templates/elasticsearch.yml.j2 @@ -57,13 +57,3 @@ network.host: 0.0.0.0 reindex.remote.whitelist: {{es_remote_host}}:9200 {% endif %} - -{% if cloud_service_provider == "oci" %} -s3.client.default.region: "{{s3_region}}" -s3.client.default.max_retries: 3 -s3.client.default.protocol: "https" -s3.client.default.endpoint: "{{s3_storage_endpoint}}" -s3.client.default.path_style_access: "{{s3_path_style_access}}" -s3.client.default.signer_override: "S3SignerType" -s3.client.default.read_timeout: "180s" -{% endif %} \ No newline at end of file From eeb235e4d5d13fc18e93abdd367ba6513d0a8ba3 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 10 Feb 2023 02:54:30 +1100 Subject: [PATCH 259/616] tsting snapshot backup Signed-off-by: Deepak Devadathan --- ansible/roles/es-oci-snapshot/defaults/main.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ansible/roles/es-oci-snapshot/defaults/main.yml b/ansible/roles/es-oci-snapshot/defaults/main.yml index e328b1a7f8..e6ee81ff16 100644 --- a/ansible/roles/es-oci-snapshot/defaults/main.yml +++ b/ansible/roles/es-oci-snapshot/defaults/main.yml @@ -3,6 +3,10 @@ snapshot_create_request_body: { settings: { bucket: "{{ cloud_storage_esbackup_bucketname }}", base_path: "{{ cloud_storage_esbackup_foldername }}/{{ snapshot_base_path }}_{{ base_path_date }}", + endpoint: "{{s3_storage_endpoint}}", + region: "{{s3_region}}", + path_style_access: "{{s3_path_style_access}}" + } } From 7f65c16c719ebff430f08f31be02a39a08a91d42 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 10 Feb 2023 02:59:49 +1100 Subject: [PATCH 260/616] without path-style-acess Signed-off-by: Deepak Devadathan --- ansible/roles/es-oci-snapshot/defaults/main.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ansible/roles/es-oci-snapshot/defaults/main.yml b/ansible/roles/es-oci-snapshot/defaults/main.yml index e6ee81ff16..e53f74e44d 100644 --- a/ansible/roles/es-oci-snapshot/defaults/main.yml +++ b/ansible/roles/es-oci-snapshot/defaults/main.yml @@ -4,8 +4,7 @@ snapshot_create_request_body: { bucket: "{{ cloud_storage_esbackup_bucketname }}", base_path: "{{ cloud_storage_esbackup_foldername }}/{{ snapshot_base_path }}_{{ base_path_date }}", endpoint: "{{s3_storage_endpoint}}", - region: "{{s3_region}}", - path_style_access: "{{s3_path_style_access}}" + region: "{{s3_region}}" } } From 0ae5263b522363cee8ebe7e5178a6eb2adfd9344 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 10 Feb 2023 03:05:28 +1100 Subject: [PATCH 261/616] testing with home region Signed-off-by: Deepak Devadathan --- ansible/roles/es-oci-snapshot/defaults/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/roles/es-oci-snapshot/defaults/main.yml b/ansible/roles/es-oci-snapshot/defaults/main.yml index e53f74e44d..71af58c436 100644 --- a/ansible/roles/es-oci-snapshot/defaults/main.yml +++ b/ansible/roles/es-oci-snapshot/defaults/main.yml @@ -3,8 +3,8 @@ snapshot_create_request_body: { settings: { bucket: "{{ cloud_storage_esbackup_bucketname }}", base_path: "{{ cloud_storage_esbackup_foldername }}/{{ snapshot_base_path }}_{{ base_path_date }}", - endpoint: "{{s3_storage_endpoint}}", - region: "{{s3_region}}" + endpoint: "apaccpt03.compat.objectstorage.us-ashburn-1.oraclecloud.com", + region: "us-ashburn-1" } } From 752010b0573978da034acaf9678d99661e87d478 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 10 Feb 2023 03:07:32 +1100 Subject: [PATCH 262/616] using ip of endpoint Signed-off-by: Deepak Devadathan --- ansible/roles/es-oci-snapshot/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/es-oci-snapshot/defaults/main.yml b/ansible/roles/es-oci-snapshot/defaults/main.yml index 71af58c436..6f736b320d 100644 --- a/ansible/roles/es-oci-snapshot/defaults/main.yml +++ b/ansible/roles/es-oci-snapshot/defaults/main.yml @@ -3,7 +3,7 @@ snapshot_create_request_body: { settings: { bucket: "{{ cloud_storage_esbackup_bucketname }}", base_path: "{{ cloud_storage_esbackup_foldername }}/{{ snapshot_base_path }}_{{ base_path_date }}", - endpoint: "apaccpt03.compat.objectstorage.us-ashburn-1.oraclecloud.com", + endpoint: "134.70.24.1", region: "us-ashburn-1" } From 0b419a680dcdcb418b5b021324b52886966ff101 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 10 Feb 2023 09:59:42 +1100 Subject: [PATCH 263/616] install repository-s3 plugin for log-es Signed-off-by: Deepak Devadathan --- ansible/roles/log-es6/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/log-es6/tasks/main.yml b/ansible/roles/log-es6/tasks/main.yml index 4852d29306..8ac712117c 100644 --- a/ansible/roles/log-es6/tasks/main.yml +++ b/ansible/roles/log-es6/tasks/main.yml @@ -68,7 +68,7 @@ - name: include plugins/repository-s3.yml include: plugins/repository-s3.yml - when: cloud_service_provider == "aws" + when: cloud_service_provider == "aws" or cloud_service_provider == "oci" - name: include plugins/repository-gcs.yml include: plugins/repository-gcs.yml From 02b8e16820a11c7b6043edd1df70a94a0b1e67f6 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 10 Feb 2023 10:03:59 +1100 Subject: [PATCH 264/616] updated s3 settings values Signed-off-by: Deepak Devadathan --- ansible/roles/es-oci-snapshot/defaults/main.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ansible/roles/es-oci-snapshot/defaults/main.yml b/ansible/roles/es-oci-snapshot/defaults/main.yml index 6f736b320d..e6ee81ff16 100644 --- a/ansible/roles/es-oci-snapshot/defaults/main.yml +++ b/ansible/roles/es-oci-snapshot/defaults/main.yml @@ -3,8 +3,9 @@ snapshot_create_request_body: { settings: { bucket: "{{ cloud_storage_esbackup_bucketname }}", base_path: "{{ cloud_storage_esbackup_foldername }}/{{ snapshot_base_path }}_{{ base_path_date }}", - endpoint: "134.70.24.1", - region: "us-ashburn-1" + endpoint: "{{s3_storage_endpoint}}", + region: "{{s3_region}}", + path_style_access: "{{s3_path_style_access}}" } } From f333026afbed152f5db7c18a94bf065ebbd55ace Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 10 Feb 2023 10:12:24 +1100 Subject: [PATCH 265/616] re arranged the order Signed-off-by: Deepak Devadathan --- ansible/roles/es-oci-snapshot/defaults/main.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ansible/roles/es-oci-snapshot/defaults/main.yml b/ansible/roles/es-oci-snapshot/defaults/main.yml index e6ee81ff16..a172208ded 100644 --- a/ansible/roles/es-oci-snapshot/defaults/main.yml +++ b/ansible/roles/es-oci-snapshot/defaults/main.yml @@ -2,11 +2,11 @@ snapshot_create_request_body: { type: s3, settings: { bucket: "{{ cloud_storage_esbackup_bucketname }}", - base_path: "{{ cloud_storage_esbackup_foldername }}/{{ snapshot_base_path }}_{{ base_path_date }}", + path_style_access: "{{s3_path_style_access}}", endpoint: "{{s3_storage_endpoint}}", region: "{{s3_region}}", - path_style_access: "{{s3_path_style_access}}" - + base_path: "{{ cloud_storage_esbackup_foldername }}/{{ snapshot_base_path }}_{{ base_path_date }}", + } } From 72690a6a8002342526ec4a5c4c5c8d4f843aafce Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 10 Feb 2023 14:25:43 +1100 Subject: [PATCH 266/616] corrected the default values for sunbird_player.env Signed-off-by: Deepak Devadathan --- ansible/roles/stack-sunbird/templates/sunbird_player.env | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/roles/stack-sunbird/templates/sunbird_player.env b/ansible/roles/stack-sunbird/templates/sunbird_player.env index 3a9d72848e..d6129eefa0 100644 --- a/ansible/roles/stack-sunbird/templates/sunbird_player.env +++ b/ansible/roles/stack-sunbird/templates/sunbird_player.env @@ -228,9 +228,9 @@ sunbird_gcloud_projectId={{gcloud_private_bucket_projectId | default("")}} cloud_service_provider={{cloud_service_provider}} cloud_private_storage_accountname={{cloud_private_storage_accountname | default("")}} cloud_private_storage_secret={{cloud_private_storage_secret | default("")}} -cloud_private_storage_region={{cloud_private_storage_region | default("ap-hyderabad-1")}} +cloud_private_storage_region={{cloud_private_storage_region | default("ap-south-1")}} cloud_private_storage_project={{cloud_private_storage_project | default("")}} -cloud_private_storage_endpoint={{cloud_private_storage_endpoint | default("https://apaccpt03.compat.objectstorage.ap-hyderabad-1.oraclecloud.com")}} +cloud_private_storage_endpoint={{cloud_private_storage_endpoint | default("")}} cloud_storage_privatereports_bucketname={{cloud_storage_privatereports_bucketname | default("reports")}} cloud_storage_resourceBundle_bucketname={{cloud_storage_label_bucketname | default("label")}} cloud_storage_desktopCrash_bucketname={{cloud_storage_desktopcrash_bucketname | default("desktopappcrashlogs")}} From c4d827f858adb891c52106d56487c2606c1ad540 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 10 Feb 2023 14:27:56 +1100 Subject: [PATCH 267/616] removed the redundant check to get deployed image Signed-off-by: Deepak Devadathan --- kubernetes/ansible/roles/deploy-player/tasks/main.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/kubernetes/ansible/roles/deploy-player/tasks/main.yml b/kubernetes/ansible/roles/deploy-player/tasks/main.yml index 5b8c7212f4..c3f7ac6286 100644 --- a/kubernetes/ansible/roles/deploy-player/tasks/main.yml +++ b/kubernetes/ansible/roles/deploy-player/tasks/main.yml @@ -78,10 +78,6 @@ register: deployed_image when: cloud_service_provider != "oci" -- name: Get deployed image name - shell: kubectl get deployments.apps {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[0].image | split("/")[3]' - register: deployed_image - when: cloud_service_provider == "oci" - set_fact: metadata_image: "{{ image_name }}:{{ image_tag }}" From 4025cba3a514c1131349e0adacf812738e8e7e48 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Mon, 13 Feb 2023 13:45:37 +1100 Subject: [PATCH 268/616] removed csp switch for deployed image retrival Signed-off-by: Deepak Devadathan --- kubernetes/ansible/roles/deploy-player/tasks/main.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/kubernetes/ansible/roles/deploy-player/tasks/main.yml b/kubernetes/ansible/roles/deploy-player/tasks/main.yml index c3f7ac6286..28a757f224 100644 --- a/kubernetes/ansible/roles/deploy-player/tasks/main.yml +++ b/kubernetes/ansible/roles/deploy-player/tasks/main.yml @@ -76,7 +76,6 @@ - name: Get deployed image name shell: kubectl get deployments.apps {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[0].image | split("/")[-1]' register: deployed_image - when: cloud_service_provider != "oci" - set_fact: From c6ba317b93ab94f8fc11fa09e04e715925dffbba Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Mon, 13 Feb 2023 13:47:18 +1100 Subject: [PATCH 269/616] common login for getting deployed image Signed-off-by: Deepak Devadathan --- kubernetes/ansible/roles/helm-deploy/tasks/main.yml | 9 --------- 1 file changed, 9 deletions(-) diff --git a/kubernetes/ansible/roles/helm-deploy/tasks/main.yml b/kubernetes/ansible/roles/helm-deploy/tasks/main.yml index b9c7674f2d..e960145c7a 100644 --- a/kubernetes/ansible/roles/helm-deploy/tasks/main.yml +++ b/kubernetes/ansible/roles/helm-deploy/tasks/main.yml @@ -120,12 +120,6 @@ - name: Get deployed image name - deployments shell: kubectl get deployments.apps {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[0].image | split("/")[-1]' register: image - when: cloud_service_provider != "oci" - -- name: Get deployed image name - deployments for OCI - shell: kubectl get deployments.apps {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[0].image | split("/")[3]' - register: image - when: cloud_service_provider == "oci" - set_fact: deployed_image: "{{ image }}" @@ -141,9 +135,6 @@ shell: kubectl get daemonsets {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[].image | split("/")[-1]' register: image -- name: Get deployed image name - daemonsets for OCI - shell: "kubectl get daemonsets {{ release_name }} -o json -n {{ namespace }} | jq '.spec.template.spec.containers | .[].image' -r | awk -F/ '{print $4}'" - register: image - set_fact: deployed_image: "{{ image if image.stdout_lines | length > 0 else deployed_image }}" From 247f13c654af317723f311a491601f3ac5c2c37b Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Mon, 13 Feb 2023 13:48:04 +1100 Subject: [PATCH 270/616] removed csp switch for getting deployed image Signed-off-by: Deepak Devadathan --- kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml b/kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml index c6eba71bd5..6d0b7ef387 100644 --- a/kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml +++ b/kubernetes/ansible/roles/sunbird-deploy/tasks/main.yml @@ -91,12 +91,6 @@ - name: Get deployed image name shell: kubectl get deployments.apps {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[0].image | split("/")[-1]' register: deployed_image - when: cloud_service_provider != "oci" - -- name: Get deployed image name for oci - shell: kubectl get deployments.apps {{ release_name }} -o json -n {{ namespace }} | jq -r '.spec.template.spec.containers[0].image | split("/")[3]' - register: deployed_image - when: cloud_service_provider == "oci" - set_fact: metadata_image: "{{ image_name }}:{{ image_tag }}" From 4750c327374a3df9d6588e5cbff6c421d7f77759 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Tue, 14 Feb 2023 06:46:28 +1100 Subject: [PATCH 271/616] added the environment variable for instance principal for oci operation Signed-off-by: Deepak Devadathan --- ansible/artifacts-download.yml | 3 +++ ansible/artifacts-upload.yml | 3 +++ ansible/assets-upload.yml | 6 ++++++ ansible/deploy-plugins.yml | 12 ++++++++++++ ansible/desktop-faq-upload.yml | 15 +++++++++++++++ ansible/kp_upload-schema.yml | 3 +++ ansible/roles/cert-templates/tasks/main.yml | 3 +++ ansible/uploadFAQs.yml | 6 ++++++ 8 files changed, 51 insertions(+) diff --git a/ansible/artifacts-download.yml b/ansible/artifacts-download.yml index 2a6c3b439b..ce7a125bde 100644 --- a/ansible/artifacts-download.yml +++ b/ansible/artifacts-download.yml @@ -44,6 +44,9 @@ - name: download artifact from oci oss include_role: name: oci-cloud-storage + apply: + environment: + OCI_CLI_AUTH: "instance_principal" tasks_from: download.yml vars: local_file_or_folder_path: "{{ artifact_path }}" diff --git a/ansible/artifacts-upload.yml b/ansible/artifacts-upload.yml index 13af4627e6..3e444ef163 100644 --- a/ansible/artifacts-upload.yml +++ b/ansible/artifacts-upload.yml @@ -45,6 +45,9 @@ - name: upload artifact to oci oss include_role: name: oci-cloud-storage + apply: + environment: + OCI_CLI_AUTH: "instance_principal" tasks_from: upload.yml vars: local_file_or_folder_path: "{{ artifact_path }}" diff --git a/ansible/assets-upload.yml b/ansible/assets-upload.yml index dcacfcef23..b39f1a4694 100644 --- a/ansible/assets-upload.yml +++ b/ansible/assets-upload.yml @@ -88,11 +88,17 @@ - name: delete files and folders from oci oss include_role: name: oci-cloud-storage + apply: + environment: + OCI_CLI_AUTH: "instance_principal" tasks_from: delete-folder.yml - name: upload batch of files to oci oss include_role: name: oci-cloud-storage + apply: + environment: + OCI_CLI_AUTH: "instance_principal" tasks_from: upload-folder.yml vars: local_file_or_folder_path: "{{ assets }}" diff --git a/ansible/deploy-plugins.yml b/ansible/deploy-plugins.yml index df2f33d855..a96af44f5a 100644 --- a/ansible/deploy-plugins.yml +++ b/ansible/deploy-plugins.yml @@ -237,6 +237,9 @@ - name: delete files and folders from oci oss include_role: name: oci-cloud-storage + apply: + environment: + OCI_CLI_AUTH: "instance_principal" tasks_from: delete-folder.yml vars: oss_path: "{{ folder_name }}/" @@ -250,6 +253,9 @@ - name: upload folder to oci oss include_role: name: oci-cloud-storage + apply: + environment: + OCI_CLI_AUTH: "instance_principal" tasks_from: upload-folder.yml vars: oss_path: "{{ folder_name }}/" @@ -266,6 +272,9 @@ - name: upload file to oci oss include_role: name: oci-cloud-storage + apply: + environment: + OCI_CLI_AUTH: "instance_principal" tasks_from: upload.yml vars: oss_path: "artefacts/content-player/content-player-{{ player_version_number }}.zip" @@ -277,6 +286,9 @@ - name: delete and re-upload plugins for oci include_role: name: oci-cloud-storage + apply: + environment: + OCI_CLI_AUTH: "instance_principal" tasks_from: "{{ item[0] }}" vars: object_prefix: "content-plugins/{{ item[1] }}/" diff --git a/ansible/desktop-faq-upload.yml b/ansible/desktop-faq-upload.yml index 00ef6841b2..2971b8d0c7 100644 --- a/ansible/desktop-faq-upload.yml +++ b/ansible/desktop-faq-upload.yml @@ -238,6 +238,9 @@ - name: upload file to oci oss public bucket include_role: name: oci-cloud-storage + apply: + environment: + OCI_CLI_AUTH: "instance_principal" tasks_from: upload.yml vars: oss_bucket_name: "{{ cloud_storage_public_bucketname }}" @@ -248,6 +251,9 @@ - name: upload file to oci oss private bucket include_role: name: oci-cloud-storage + apply: + environment: + OCI_CLI_AUTH: "instance_principal" tasks_from: upload.yml vars: oss_bucket_name: "{{ cloud_storage_label_bucketname }}" @@ -258,6 +264,9 @@ - name: upload folder to oci oss - chatbot include_role: name: oci-cloud-storage + apply: + environment: + OCI_CLI_AUTH: "instance_principal" tasks_from: upload-folder.yml vars: oss_path: "{{ destination_path }}/" @@ -269,6 +278,9 @@ - name: upload folder to oci oss - csv-template include_role: name: oci-cloud-storage + apply: + environment: + OCI_CLI_AUTH: "instance_principal" tasks_from: upload-folder.yml vars: oss_path: "{{ destination_path }}/" @@ -280,6 +292,9 @@ - name: upload folder to oci oss - discussion-ui include_role: name: oci-cloud-storage + apply: + environment: + OCI_CLI_AUTH: "instance_principal" tasks_from: upload-folder.yml vars: oss_path: "{{ destination_path }}/" diff --git a/ansible/kp_upload-schema.yml b/ansible/kp_upload-schema.yml index 70a8f0a393..0f57586047 100644 --- a/ansible/kp_upload-schema.yml +++ b/ansible/kp_upload-schema.yml @@ -44,6 +44,9 @@ - name: upload batch of files to oci oss include_role: name: oci-cloud-storage + apply: + environment: + OCI_CLI_AUTH: "instance_principal" tasks_from: upload-folder.yml vars: oss_bucket_name: "{{ cloud_storage_content_bucketname }}" diff --git a/ansible/roles/cert-templates/tasks/main.yml b/ansible/roles/cert-templates/tasks/main.yml index 926164b59b..f294bcf7fd 100644 --- a/ansible/roles/cert-templates/tasks/main.yml +++ b/ansible/roles/cert-templates/tasks/main.yml @@ -72,6 +72,9 @@ - name: upload batch of files to oci oss include_role: name: oci-cloud-storage + apply: + environment: + OCI_CLI_AUTH: "instance_principal" tasks_from: upload-folder.yml vars: oss_bucket_name: "{{ cert_service_container_name }}" diff --git a/ansible/uploadFAQs.yml b/ansible/uploadFAQs.yml index ed156e34e5..9913741938 100644 --- a/ansible/uploadFAQs.yml +++ b/ansible/uploadFAQs.yml @@ -62,6 +62,9 @@ - name: upload batch of files to oci oss include_role: name: oci-cloud-storage + apply: + environment: + OCI_CLI_AUTH: "instance_principal" tasks_from: upload-folder.yml vars: oss_bucket_name: "{{ cloud_storage_public_bucketname }}" @@ -122,6 +125,9 @@ - name: upload batch of files to oci oss include_role: name: oci-cloud-storage + apply: + environment: + OCI_CLI_AUTH: "instance_principal" tasks_from: upload-folder.yml vars: oss_bucket_name: "{{ cloud_storage_content_bucketname }}" From e769381ec3102c28e029138acdb426b367950b60 Mon Sep 17 00:00:00 2001 From: subhash_chandra_budde Date: Tue, 21 Feb 2023 23:30:25 +0530 Subject: [PATCH 272/616] Fixed indentation error --- ansible/roles/cert-templates/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/cert-templates/tasks/main.yml b/ansible/roles/cert-templates/tasks/main.yml index f294bcf7fd..d4134d7cbd 100644 --- a/ansible/roles/cert-templates/tasks/main.yml +++ b/ansible/roles/cert-templates/tasks/main.yml @@ -74,7 +74,7 @@ name: oci-cloud-storage apply: environment: - OCI_CLI_AUTH: "instance_principal" + OCI_CLI_AUTH: "instance_principal" tasks_from: upload-folder.yml vars: oss_bucket_name: "{{ cert_service_container_name }}" From f0453dc0e11bf71c52d59f3bf42fa321a894aeab Mon Sep 17 00:00:00 2001 From: Kenneth Heung Date: Mon, 27 Feb 2023 22:37:51 +0800 Subject: [PATCH 273/616] Update oss-upload-batch-no-poll.yml missing content type caused a LOT of problem --- .../oci-cloud-storage/tasks/oss-upload-batch-no-poll.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/roles/oci-cloud-storage/tasks/oss-upload-batch-no-poll.yml b/ansible/roles/oci-cloud-storage/tasks/oss-upload-batch-no-poll.yml index 6d01756dfe..31794e73e8 100644 --- a/ansible/roles/oci-cloud-storage/tasks/oss-upload-batch-no-poll.yml +++ b/ansible/roles/oci-cloud-storage/tasks/oss-upload-batch-no-poll.yml @@ -1,5 +1,5 @@ --- - name: upload files and folders - uploading {{ oss_bucket_name }}/{{ object_prefix }} - shell: oci os object bulk-upload -bn {{oss_bucket_name}} --prefix {{object_prefix}} --src-dir {{local_file_or_folder_path}} --overwrite + shell: oci os object bulk-upload -bn {{oss_bucket_name}} --prefix {{object_prefix}} --src-dir {{local_file_or_folder_path}} --overwrite --content-type auto async: 1800 - poll: 0 \ No newline at end of file + poll: 0 From 04eb5a291743147053824dfe91813af5406daaa8 Mon Sep 17 00:00:00 2001 From: subhash_chandra_budde Date: Thu, 2 Mar 2023 21:39:51 +0530 Subject: [PATCH 274/616] Changes for postgres ssl configuration --- ansible/roles/stack-sunbird/templates/odk.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/stack-sunbird/templates/odk.env b/ansible/roles/stack-sunbird/templates/odk.env index 64ca86b1ec..d062eae84d 100644 --- a/ansible/roles/stack-sunbird/templates/odk.env +++ b/ansible/roles/stack-sunbird/templates/odk.env @@ -1,4 +1,4 @@ -DB_URL=jdbc:postgresql://{{uci_postgres_host}}:5432/{{uci_odk_postgres_database}}?user={{uci_postgres_user}}&password={{uci_postgres_password}} +DB_URL=jdbc:postgresql://{{uci_postgres_host}}:5432/{{uci_odk_postgres_database}}?user={{uci_postgres_user}}&password={{uci_postgres_password}}&sslmode=require DB_USERNAME={{uci_postgres_user}} DB_PASSWORD={{uci_postgres_password}} DB_SCHEMA=public From bc985f1955a1649b2141e1e0f24ab7f33b0da122 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 17 Mar 2023 00:29:09 +1100 Subject: [PATCH 275/616] added the storage type for deployment Signed-off-by: Deepak Devadathan --- .../roles/stack-sunbird/templates/sunbird_lms-service.env | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env b/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env index cbbf6612c2..59cf439e65 100644 --- a/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env +++ b/ansible/roles/stack-sunbird/templates/sunbird_lms-service.env @@ -152,4 +152,7 @@ cloud_storage_base_url={{cloud_storage_base_url}} cloud_storage_cname_url={{ cloud_storage_cname_url | default('') }} cloud_storage_dial_bucketname={{ cloud_storage_dial_bucketname | default('dial') }} cloud_storage_path_prefix_dial={{ cloudstorage_relative_path_prefix_dial | default('DIAL_STORAGE_BASE_PATH') }} -cloud_store_base_path_placeholder={{ cloud_store_base_path_placeholder | default('CLOUD_BASE_PATH') }} \ No newline at end of file +cloud_store_base_path_placeholder={{ cloud_store_base_path_placeholder | default('CLOUD_BASE_PATH') }} + +## Added to support multi storage +sunbird_content_cloud_storage_type={{cloud_service_provider}} \ No newline at end of file From 8dba2467336dbccee3611d13a0c9364d20c0fd45 Mon Sep 17 00:00:00 2001 From: Ramya Date: Wed, 22 Mar 2023 10:41:01 +0530 Subject: [PATCH 276/616] issue with npm version --- pipelines/deploy/ContentFramework/Jenkinsfile | 1 + 1 file changed, 1 insertion(+) diff --git a/pipelines/deploy/ContentFramework/Jenkinsfile b/pipelines/deploy/ContentFramework/Jenkinsfile index a02c72eb69..f85dc9b7d3 100644 --- a/pipelines/deploy/ContentFramework/Jenkinsfile +++ b/pipelines/deploy/ContentFramework/Jenkinsfile @@ -44,6 +44,7 @@ node() { sh """ zip -r content-editor-artifact.zip ansible/content-editor cd ansible/content-editor + nvm use 6 sudo npm install -g gulp npm install npm install promise From 26a9b47219e8238a336353fc8f9bfaf02389a156 Mon Sep 17 00:00:00 2001 From: Ramya Date: Wed, 29 Mar 2023 18:18:42 +0530 Subject: [PATCH 277/616] Fix for DU-417 --- ansible/inventory/env/group_vars/all.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index 6cbd63ec03..e31c3c3a85 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -360,7 +360,7 @@ content_service_blacklisted_resourcetype: '' content_service_whitelisted_resourcetype: '' content_service_whitelisted_mimetype: '' content_service_blacklisted_mimetype: '' -sunbird_cloud_storage_urls: 'https://s3.ap-south-1.amazonaws.com/ekstep-public-{{ekstep_s3_env}}/,https://ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com/,{{ upstream_url }}/' +sunbird_cloud_storage_urls: 'https://s3.ap-south-1.amazonaws.com/ekstep-public-{{ekstep_s3_env}}/,https://ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com/,{{ upstream_url }}/,https://files.odev.oci.diksha.gov.in/odev-dev-diksha-contents/,https://obj.diksha.gov.in/ntp-content-production/' sunbird_email_max_recipients_limit: 100 sunbird_cassandra_consistency_level: local_quorum sunbird_cassandra_replication_strategy: '{"class":"SimpleStrategy","replication_factor":"1"}' From 5ad9bd274af9a1de2099d627da9347df35aa5ef8 Mon Sep 17 00:00:00 2001 From: subhash_chandra_budde Date: Sat, 22 Apr 2023 14:28:09 +0530 Subject: [PATCH 278/616] Moved the storage config to ansible variables --- .../stack-sunbird/templates/analytics_api_service.conf | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ansible/roles/stack-sunbird/templates/analytics_api_service.conf b/ansible/roles/stack-sunbird/templates/analytics_api_service.conf index 2afb0291de..08e4cbac02 100644 --- a/ansible/roles/stack-sunbird/templates/analytics_api_service.conf +++ b/ansible/roles/stack-sunbird/templates/analytics_api_service.conf @@ -200,11 +200,11 @@ public { } } } -cloud_storage_type="azure" -storage.key.config="azure_storage_key" -storage.secret.config="azure_storage_secret" -public.storage.key.config="public_azure_storage_key" -public.storage.secret.config="public_azure_storage_secret" +cloud_storage_type="{{cloud_service_provider}}" +storage.key.config="{{cloud_public_storage_accountname}}" +storage.secret.config="{{cloud_public_storage_secret}}" +public.storage.key.config="{{cloud_public_storage_accountname}}" +public.storage.secret.config="{{cloud_public_storage_secret}}t" metrics.time.interval.min=30 cache.refresh.time.interval.min=1440 redis.host="{{metadata_redis_host}}" From 839917451031e5a00d7116e77d74771e7c8a772a Mon Sep 17 00:00:00 2001 From: subhash_chandra_budde Date: Sat, 22 Apr 2023 14:36:01 +0530 Subject: [PATCH 279/616] typo fix --- .../roles/stack-sunbird/templates/analytics_api_service.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/stack-sunbird/templates/analytics_api_service.conf b/ansible/roles/stack-sunbird/templates/analytics_api_service.conf index 08e4cbac02..ef073b98f1 100644 --- a/ansible/roles/stack-sunbird/templates/analytics_api_service.conf +++ b/ansible/roles/stack-sunbird/templates/analytics_api_service.conf @@ -204,7 +204,7 @@ cloud_storage_type="{{cloud_service_provider}}" storage.key.config="{{cloud_public_storage_accountname}}" storage.secret.config="{{cloud_public_storage_secret}}" public.storage.key.config="{{cloud_public_storage_accountname}}" -public.storage.secret.config="{{cloud_public_storage_secret}}t" +public.storage.secret.config="{{cloud_public_storage_secret}}" metrics.time.interval.min=30 cache.refresh.time.interval.min=1440 redis.host="{{metadata_redis_host}}" From eeac4714d4a720f57455246ab8defb27f58ea530 Mon Sep 17 00:00:00 2001 From: subhash_chandra_budde Date: Sat, 22 Apr 2023 15:11:49 +0530 Subject: [PATCH 280/616] updated oci oss config entries --- .../roles/stack-sunbird/templates/analytics_api_service.conf | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ansible/roles/stack-sunbird/templates/analytics_api_service.conf b/ansible/roles/stack-sunbird/templates/analytics_api_service.conf index ef073b98f1..8bdcd63467 100644 --- a/ansible/roles/stack-sunbird/templates/analytics_api_service.conf +++ b/ansible/roles/stack-sunbird/templates/analytics_api_service.conf @@ -7,7 +7,7 @@ data_exhaust.retry.limit="3" data_exhaust.dataset.list=["eks-consumption-raw", "eks-consumption-summary", "eks-consumption-metrics","eks-creation-raw", "eks-creation-summary", "eks-creation-metrics"] data_exhaust.dataset.default="eks-consumption-raw" data_exhaust.output_format="json" -data_exhaust.bucket="reports" +data_exhaust.bucket="{{ cloud_storage_privatereports_bucketname }}" cassandra.service.embedded.enable=false cassandra.keyspace_prefix="{{ cassandra.keyspace_prefix }}" device-register-controller-dispatcher { @@ -205,6 +205,7 @@ storage.key.config="{{cloud_public_storage_accountname}}" storage.secret.config="{{cloud_public_storage_secret}}" public.storage.key.config="{{cloud_public_storage_accountname}}" public.storage.secret.config="{{cloud_public_storage_secret}}" +cloud_storage_endpoint_with_protocol="{{cloud_storage_url}}" metrics.time.interval.min=30 cache.refresh.time.interval.min=1440 redis.host="{{metadata_redis_host}}" From 66510bdebe7babdc7db9dfc41490f15d420b5730 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 12 May 2023 12:44:41 +1000 Subject: [PATCH 281/616] updated kafka setup zookeeper Signed-off-by: Deepak Devadathan --- ansible/roles/setup-kafka/defaults/main.yml | 2 +- ansible/roles/setup-kafka/tasks/main.yml | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ansible/roles/setup-kafka/defaults/main.yml b/ansible/roles/setup-kafka/defaults/main.yml index cb2bb2f38f..5b275f7d5a 100644 --- a/ansible/roles/setup-kafka/defaults/main.yml +++ b/ansible/roles/setup-kafka/defaults/main.yml @@ -3,7 +3,7 @@ env: dev ml_kafka_topic_create: false uci_kafka_topic_create: false av_kafka_topic_create: false - +zookeeper_host: "{{ groups['zookeeper'][0] | default('localhost')}}" processing_kafka_overriden_topics: - name: lms.audit.events retention_time: 172800000 diff --git a/ansible/roles/setup-kafka/tasks/main.yml b/ansible/roles/setup-kafka/tasks/main.yml index 502c627bdc..28ef75f422 100644 --- a/ansible/roles/setup-kafka/tasks/main.yml +++ b/ansible/roles/setup-kafka/tasks/main.yml @@ -1,5 +1,5 @@ - name: create topics - command: /opt/kafka/bin/kafka-topics.sh --zookeeper localhost:2181 --create --topic {{env_name}}.{{item.name}} --partitions {{ item.num_of_partitions }} --replication-factor {{ item.replication_factor }} + command: /opt/kafka/bin/kafka-topics.sh --zookeeper {{zookeeper_host}}:2181 --create --topic {{env_name}}.{{item.name}} --partitions {{ item.num_of_partitions }} --replication-factor {{ item.replication_factor }} with_items: "{{processing_kafka_topics}}" ignore_errors: true when: kafka_id=="1" @@ -7,14 +7,14 @@ - processing-kafka - name: override retention time - command: /opt/kafka/bin/kafka-topics.sh --zookeeper localhost:2181 --alter --topic {{env_name}}.{{item.name}} --config retention.ms={{ item.retention_time }} + command: /opt/kafka/bin/kafka-topics.sh --zookeeper {{zookeeper_host}}:2181 --alter --topic {{env_name}}.{{item.name}} --config retention.ms={{ item.retention_time }} with_items: "{{processing_kafka_overriden_topics}}" when: kafka_id=="1" and item.retention_time is defined tags: - processing-kafka - name: create topics - command: /opt/kafka/bin/kafka-topics.sh --zookeeper localhost:2181 --create --topic {{env_name}}.{{item.name}} --partitions {{ item.num_of_partitions }} --replication-factor {{ item.replication_factor }} + command: /opt/kafka/bin/kafka-topics.sh --zookeeper {{zookeeper_host}}:2181 --create --topic {{env_name}}.{{item.name}} --partitions {{ item.num_of_partitions }} --replication-factor {{ item.replication_factor }} with_items: "{{ml_service_topics}}" ignore_errors: true when: kafka_id=="1" and ml_kafka_topic_create @@ -22,7 +22,7 @@ - processing-kafka - name: create topics - command: /opt/kafka/bin/kafka-topics.sh --zookeeper localhost:2181 --create --topic {{env_name}}.{{item.name}} --partitions {{ item.num_of_partitions }} --replication-factor {{ item.replication_factor }} + command: /opt/kafka/bin/kafka-topics.sh --zookeeper {{zookeeper_host}}:2181 --create --topic {{env_name}}.{{item.name}} --partitions {{ item.num_of_partitions }} --replication-factor {{ item.replication_factor }} with_items: "{{uci_service_topics}}" ignore_errors: true when: kafka_id=="1" and uci_kafka_topic_create @@ -30,7 +30,7 @@ - processing-kafka - name: create topics - command: /opt/kafka/bin/kafka-topics.sh --zookeeper localhost:2181 --create --topic {{env_name}}.{{item.name}} --partitions {{ item.num_of_partitions }} --replication-factor {{ item.replication_factor }} + command: /opt/kafka/bin/kafka-topics.sh --zookeeper {{zookeeper_host}}:2181 --create --topic {{env_name}}.{{item.name}} --partitions {{ item.num_of_partitions }} --replication-factor {{ item.replication_factor }} with_items: "{{av_service_topics}}" ignore_errors: true when: kafka_id=="1" and av_kafka_topic_create From 7fada3f8818e9a0bf9304fc90a62b4ec5b3a6d93 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 12 May 2023 13:08:39 +1000 Subject: [PATCH 282/616] updated the generic defaults Signed-off-by: Deepak Devadathan --- ansible/inventory/env/group_vars/all.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/inventory/env/group_vars/all.yml b/ansible/inventory/env/group_vars/all.yml index e31c3c3a85..7f49a3a079 100644 --- a/ansible/inventory/env/group_vars/all.yml +++ b/ansible/inventory/env/group_vars/all.yml @@ -360,7 +360,7 @@ content_service_blacklisted_resourcetype: '' content_service_whitelisted_resourcetype: '' content_service_whitelisted_mimetype: '' content_service_blacklisted_mimetype: '' -sunbird_cloud_storage_urls: 'https://s3.ap-south-1.amazonaws.com/ekstep-public-{{ekstep_s3_env}}/,https://ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com/,{{ upstream_url }}/,https://files.odev.oci.diksha.gov.in/odev-dev-diksha-contents/,https://obj.diksha.gov.in/ntp-content-production/' +sunbird_cloud_storage_urls: 'https://s3.ap-south-1.amazonaws.com/ekstep-public-{{ekstep_s3_env}}/,https://ekstep-public-{{ekstep_s3_env}}.s3-ap-south-1.amazonaws.com/,{{ upstream_url }}/,https://files.xyz.in/contentsbucket/,https://obj.xyz.in/contentsbucket/' sunbird_email_max_recipients_limit: 100 sunbird_cassandra_consistency_level: local_quorum sunbird_cassandra_replication_strategy: '{"class":"SimpleStrategy","replication_factor":"1"}' From 4cc91b32a55276030a6863af4db8b7b451dcf5b1 Mon Sep 17 00:00:00 2001 From: Deepak Devadathan Date: Fri, 12 May 2023 13:29:02 +1000 Subject: [PATCH 283/616] added conditional check for pg ssl Signed-off-by: Deepak Devadathan --- ansible/roles/stack-sunbird/defaults/main.yml | 1 + ansible/roles/stack-sunbird/templates/odk.env | 8 ++++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/ansible/roles/stack-sunbird/defaults/main.yml b/ansible/roles/stack-sunbird/defaults/main.yml index 7fbeb17d70..6e8c98fcbc 100644 --- a/ansible/roles/stack-sunbird/defaults/main.yml +++ b/ansible/roles/stack-sunbird/defaults/main.yml @@ -1070,3 +1070,4 @@ cloud_store_base_path_placeholder: "CLOUD_BASE_PATH" #Youtube Standard Licence Validation youtube_app_name: fetch-youtube-license youtube_api_key: "{{ lp_vault_youtube_api_key }}" +pgssl: "false" diff --git a/ansible/roles/stack-sunbird/templates/odk.env b/ansible/roles/stack-sunbird/templates/odk.env index d062eae84d..5fcd16ddd4 100644 --- a/ansible/roles/stack-sunbird/templates/odk.env +++ b/ansible/roles/stack-sunbird/templates/odk.env @@ -1,7 +1,11 @@ -DB_URL=jdbc:postgresql://{{uci_postgres_host}}:5432/{{uci_odk_postgres_database}}?user={{uci_postgres_user}}&password={{uci_postgres_password}}&sslmode=require DB_USERNAME={{uci_postgres_user}} DB_PASSWORD={{uci_postgres_password}} DB_SCHEMA=public DB_NAME={{uci_odk_postgres_database}} DB_PORT=5432 -AGGREGATE_HOST= \ No newline at end of file +AGGREGATE_HOST= +{% if pgssl == "true" %} +DB_URL=jdbc:postgresql://{{uci_postgres_host}}:5432/{{uci_odk_postgres_database}}?user={{uci_postgres_user}}&password={{uci_postgres_password}}&sslmode=require +{% else %} +DB_URL=jdbc:postgresql://{{uci_postgres_host}}:5432/{{uci_odk_postgres_database}}?user={{uci_postgres_user}}&password={{uci_postgres_password}} +{% endif %} \ No newline at end of file From 135fc421d13c44b951a1a7a43e3155ae6ec5c935 Mon Sep 17 00:00:00 2001 From: Prasath Sivasubramaniyan Date: Sun, 21 Jan 2024 02:16:07 +0530 Subject: [PATCH 284/616] Add OCI vars for desktop offline upload --- ansible/roles/desktop-deploy/tasks/main.yml | 26 +++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/ansible/roles/desktop-deploy/tasks/main.yml b/ansible/roles/desktop-deploy/tasks/main.yml index 6a01f97b0c..affbf7aa8f 100644 --- a/ansible/roles/desktop-deploy/tasks/main.yml +++ b/ansible/roles/desktop-deploy/tasks/main.yml @@ -128,3 +128,29 @@ gcp_path: "latest" local_file_or_folder_path: "{{ offline_repo_location }}/desktop_uploader_assets/{{ time }}/" when: cloud_service_provider == "gcloud" + +### OCI Tasks ### +- name: this block consists of tasks related to OCI + block: + - name: set common oci variables + set_fact: + oss_bucket_name: "{{ cloud_storage_offlineinstaller_bucketname }}" + oss_path: "{{ offline_installer_storage }}/latest" + local_file_or_folder_path: "{{ offline_repo_location }}/desktop_uploader_assets" + + - name: upload batch of files to oci oss + include_role: + name: oci-cloud-storage + tasks_from: upload-folder.yml + vars: + oss_path: "{{ offline_installer_storage }}" + local_file_or_folder_path: "{{ offline_repo_location }}/desktop_uploader_assets" + + - name: upload batch of files to oci oss + include_role: + name: oci-cloud-storage + tasks_from: upload-folder.yml + vars: + oss_path: "{{ offline_installer_storage }}/latest" + local_file_or_folder_path: "{{ offline_repo_location }}/desktop_uploader_assets/{{ time }}/" + when: cloud_service_provider == "oci" From ec23095bf1168550be5874f21eaa2705e2b82e4a Mon Sep 17 00:00:00 2001 From: Prasath Sivasubramaniyan Date: Sun, 21 Jan 2024 02:28:12 +0530 Subject: [PATCH 285/616] removed latest suffix --- ansible/roles/desktop-deploy/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/desktop-deploy/tasks/main.yml b/ansible/roles/desktop-deploy/tasks/main.yml index affbf7aa8f..44f32c9041 100644 --- a/ansible/roles/desktop-deploy/tasks/main.yml +++ b/ansible/roles/desktop-deploy/tasks/main.yml @@ -135,7 +135,7 @@ - name: set common oci variables set_fact: oss_bucket_name: "{{ cloud_storage_offlineinstaller_bucketname }}" - oss_path: "{{ offline_installer_storage }}/latest" + oss_path: "{{ offline_installer_storage }}" local_file_or_folder_path: "{{ offline_repo_location }}/desktop_uploader_assets" - name: upload batch of files to oci oss From e0fda4747b681dc99e7ee20455679554794bd008 Mon Sep 17 00:00:00 2001 From: Prasath Sivasubramaniyan Date: Sun, 21 Jan 2024 02:52:39 +0530 Subject: [PATCH 286/616] removed oss_path from set_fact --- ansible/roles/desktop-deploy/tasks/main.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/ansible/roles/desktop-deploy/tasks/main.yml b/ansible/roles/desktop-deploy/tasks/main.yml index 44f32c9041..b447f3d2ad 100644 --- a/ansible/roles/desktop-deploy/tasks/main.yml +++ b/ansible/roles/desktop-deploy/tasks/main.yml @@ -135,7 +135,6 @@ - name: set common oci variables set_fact: oss_bucket_name: "{{ cloud_storage_offlineinstaller_bucketname }}" - oss_path: "{{ offline_installer_storage }}" local_file_or_folder_path: "{{ offline_repo_location }}/desktop_uploader_assets" - name: upload batch of files to oci oss From 5c78e9bc1e4d54cf231be81bf99e9a087d3c67af Mon Sep 17 00:00:00 2001 From: Prasath Sivasubramaniyan Date: Wed, 24 Jan 2024 04:34:27 +0530 Subject: [PATCH 287/616] Add scripts to check services and pod running status --- ansible/check_pods_status.yml | 22 +++ ansible/check_services.yml | 106 +++++++++++++++ .../Check_Pods_Status/config.xml | 121 +++++++++++++++++ .../Check_Service_Status/config.xml | 125 ++++++++++++++++++ pipelines/check-pod-status/Jenkinsfile | 50 +++++++ pipelines/check-service-status/Jenkinsfile | 50 +++++++ 6 files changed, 474 insertions(+) create mode 100644 ansible/check_pods_status.yml create mode 100644 ansible/check_services.yml create mode 100644 deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/check-pod-status/Check_Pods_Status/config.xml create mode 100644 deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/check-pod-status/Check_Service_Status/config.xml create mode 100644 pipelines/check-pod-status/Jenkinsfile create mode 100644 pipelines/check-service-status/Jenkinsfile diff --git a/ansible/check_pods_status.yml b/ansible/check_pods_status.yml new file mode 100644 index 0000000000..0247937f64 --- /dev/null +++ b/ansible/check_pods_status.yml @@ -0,0 +1,22 @@ +--- +- name: Check Pod Status in All Namespaces + hosts: localhost + gather_facts: no + environment: + KUBECONFIG: "{{ kubeconfig_path }}" + tasks: + - name: Check Non-Running Pods in Namespaces + shell: kubectl get pods -A | grep -v Running + register: non_running_pods + changed_when: false + failed_when: false + + - name: Parse Non-Running Pods + set_fact: + non_running_pod_lines: "{{ non_running_pods.stdout_lines }}" + when: non_running_pods.stdout_lines | length > 0 + + - name: Print Non-Running Pods + debug: + msg: "{{ non_running_pod_lines | join('\n') }}" + when: non_running_pod_lines is defined diff --git a/ansible/check_services.yml b/ansible/check_services.yml new file mode 100644 index 0000000000..aafa1342d8 --- /dev/null +++ b/ansible/check_services.yml @@ -0,0 +1,106 @@ +- hosts: cassandra + tasks: + - name: Check Cassandra service status + service: + name: cassandra + state: started + register: cassandra_status + +- hosts: postgres + tasks: + - name: Check postgres service status + service: + name: postgresql@12-main.service + state: started + register: postgres_status + # - name: Start postgres service if it's not running + # service: + # name: postgresql@12-main.service + # state: started + # when: postgres_status.changed + # become: true + +- hosts: learning-neo4j-node1 + tasks: + - name: Check Neo4j Service Status + command: "/home/learning/neo4j-learning/neo4j-enterprise-3.3.10-SNAPSHOT/bin/neo4j status" + register: service_status + changed_when: false + become: true + +- hosts: composite-search-cluster + tasks: + - name: Check Elasticsearch Port + command: "nc -z -v -w 2 {{ inventory_hostname }} 9200" + ignore_errors: yes + register: nc_result + +- hosts: lp-redis-ps + tasks: + - name: Check redis service status + service: + name: redis + state: started + register: redis_status + #- name: Start redis service if it's not running + # service: + # name: redis + # state: started + # when: redis_status.changed + # become: true + +- hosts: kafka + tasks: + - name: Check kafka service status + service: + name: kafka + state: started + register: kafka_status + #- name: Start kafka service if it's not running + # service: + # name: kafka + # state: started + # when: kafka_status.changed + # become: true + +- hosts: keycloak + tasks: + - name: Check keycloak service status + service: + name: keycloak + state: started + register: keycloak_status + #- name: Start keycloak service if it's not running + # service: + # name: keycloak + # state: started + # when: keycloak_status.changed + # become: true + +- hosts: graylog + tasks: + - name: Check graylog service status + service: + name: graylog-server + state: started + register: graylog_status + #- name: Start graylog service if it's not running + # service: + # name: graylog-server + # state: started + # when: graylog_status.changed + # become: true + +- hosts: lp-learning-ps + tasks: + - name: Check Tomcat Port (learning VM) + command: "nc -z -v -w 2 {{ inventory_hostname }} 8080" + ignore_errors: yes + register: nc_tomcat_result + +- hosts: internal-proxy + tasks: + - name: Check Kong Port (Internal proxy) + command: "nc -z -v -w 2 {{ inventory_hostname }} 8000" + ignore_errors: yes + register: nc_kong_result diff --git a/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/check-pod-status/Check_Pods_Status/config.xml b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/check-pod-status/Check_Pods_Status/config.xml new file mode 100644 index 0000000000..d2d13b4d21 --- /dev/null +++ b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/check-pod-status/Check_Pods_Status/config.xml @@ -0,0 +1,121 @@ + + + + false + + + + + -1 + 5 + -1 + 5 + + + + false + + + false + false + + + + + private_branch + choice-parameter-189743214208409 + 1 + + true + + + + true + + + Check_Pods_Status + OpsAdministration/dev/Core/Check_Pods_Status + + + ET_FORMATTED_HTML + true + + + branch_or_tag + choice-parameter-189743216959018 + 1 + + true + + + + true + + + Check_Pods_Status + OpsAdministration/dev/Core/Check_Pods_Status + + + ET_FORMATTED_HTML + true + + + + + 0 + 0 + + false + project + false + + + + + + + + + + 2 + + + https://github.com/dicdiksha/sunbird-devops.git + + + + + oci-5.1.0-check-status + + + false + + + + true + false + + 0 + false + + + + pipelines/ops/check-pod-status/Jenkinsfile + false + + + false + diff --git a/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/check-pod-status/Check_Service_Status/config.xml b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/check-pod-status/Check_Service_Status/config.xml new file mode 100644 index 0000000000..d64586248a --- /dev/null +++ b/deploy/jenkins/jobs/OpsAdministration/jobs/dev/jobs/Core/jobs/check-pod-status/Check_Service_Status/config.xml @@ -0,0 +1,125 @@ + + + + false + + + + + -1 + 3 + -1 + 5 + + + + false + + + false + false + + + + + private_branch + choice-parameter-189743214208409 + 1 + + true + + + + true + + + Check_Service_Status + OpsAdministration/dev/Core/Check_Service_Status + + + ET_FORMATTED_HTML + true + + + branch_or_tag + choice-parameter-189743216959018 + 1 + + true + + + + true + + + Check_Service_Status + OpsAdministration/dev/Core/Check_Service_Status + + + ET_FORMATTED_HTML + true + + + + + 0 + 0 + + false + project + false + + + + + + + 00 09 * * 1-5 + + + + + + + 2 + + + https://github.com/dicdiksha/sunbird-devops.git + + + + + oci-5.1.0-check-status + + + false + + + + true + false + + 0 + false + + + + pipelines/ops/check-service-status/Jenkinsfile + false + + + false + diff --git a/pipelines/check-pod-status/Jenkinsfile b/pipelines/check-pod-status/Jenkinsfile new file mode 100644 index 0000000000..cb1ff03989 --- /dev/null +++ b/pipelines/check-pod-status/Jenkinsfile @@ -0,0 +1,50 @@ +@Library('deploy-conf') _ +node() { + try { + String ANSI_GREEN = "\u001B[32m" + String ANSI_NORMAL = "\u001B[0m" + String ANSI_BOLD = "\u001B[1m" + String ANSI_RED = "\u001B[31m" + String ANSI_YELLOW = "\u001B[33m" + + stage('checkout public repo') { + folder = new File("$WORKSPACE/.git") + if (folder.exists()) + { + println "Found .git folder. Clearing it.." + sh'git clean -fxd' + } + checkout scm + } + + ansiColor('xterm') { + stage('deploy'){ + values = [:] + envDir = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-3].trim() + module = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-2].trim() + jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() + currentWs = sh(returnStdout: true, script: 'pwd').trim() + ansiblePlaybook = "${currentWs}/ansible/check_pods_status.yml" + ansibleExtraArgs = "--vault-password-file /var/lib/jenkins/secrets/vault-pass" + values.put('currentWs', currentWs) + values.put('env', envDir) + values.put('module', module) + values.put('jobName', jobName) + values.put('ansiblePlaybook', ansiblePlaybook) + values.put('ansibleExtraArgs', ansibleExtraArgs) + println values + ansible_playbook_run(values) + currentBuild.result = 'SUCCESS' + currentBuild.description = "Private: ${params.private_branch}, Public: ${params.branch_or_tag}" + } + } + } + catch (err) { + currentBuild.result = 'FAILURE' + throw err + } + finally { + slack_notify(currentBuild.result) + email_notify() + } +} diff --git a/pipelines/check-service-status/Jenkinsfile b/pipelines/check-service-status/Jenkinsfile new file mode 100644 index 0000000000..07c1395be2 --- /dev/null +++ b/pipelines/check-service-status/Jenkinsfile @@ -0,0 +1,50 @@ +@Library('deploy-conf') _ +node() { + try { + String ANSI_GREEN = "\u001B[32m" + String ANSI_NORMAL = "\u001B[0m" + String ANSI_BOLD = "\u001B[1m" + String ANSI_RED = "\u001B[31m" + String ANSI_YELLOW = "\u001B[33m" + + stage('checkout public repo') { + folder = new File("$WORKSPACE/.git") + if (folder.exists()) + { + println "Found .git folder. Clearing it.." + sh'git clean -fxd' + } + checkout scm + } + + ansiColor('xterm') { + stage('deploy'){ + values = [:] + envDir = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-3].trim() + module = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-2].trim() + jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() + currentWs = sh(returnStdout: true, script: 'pwd').trim() + ansiblePlaybook = "${currentWs}/ansible/check_services.yml" + ansibleExtraArgs = "--vault-password-file /var/lib/jenkins/secrets/vault-pass" + values.put('currentWs', currentWs) + values.put('env', envDir) + values.put('module', module) + values.put('jobName', jobName) + values.put('ansiblePlaybook', ansiblePlaybook) + values.put('ansibleExtraArgs', ansibleExtraArgs) + println values + ansible_playbook_run(values) + currentBuild.result = 'SUCCESS' + currentBuild.description = "Private: ${params.private_branch}, Public: ${params.branch_or_tag}" + } + } + } + catch (err) { + currentBuild.result = 'FAILURE' + throw err + } + finally { + slack_notify(currentBuild.result) + email_notify() + } +} From 48043f6de48d78c187884462e7a88a410dad9631 Mon Sep 17 00:00:00 2001 From: Prasath Sivasubramaniyan Date: Wed, 24 Jan 2024 04:37:17 +0530 Subject: [PATCH 288/616] Add scripts to check services for keycloak --- ansible/check_services.yml | 99 -------------------------------------- 1 file changed, 99 deletions(-) diff --git a/ansible/check_services.yml b/ansible/check_services.yml index aafa1342d8..62896fc2d6 100644 --- a/ansible/check_services.yml +++ b/ansible/check_services.yml @@ -1,68 +1,3 @@ -- hosts: cassandra - tasks: - - name: Check Cassandra service status - service: - name: cassandra - state: started - register: cassandra_status - -- hosts: postgres - tasks: - - name: Check postgres service status - service: - name: postgresql@12-main.service - state: started - register: postgres_status - # - name: Start postgres service if it's not running - # service: - # name: postgresql@12-main.service - # state: started - # when: postgres_status.changed - # become: true - -- hosts: learning-neo4j-node1 - tasks: - - name: Check Neo4j Service Status - command: "/home/learning/neo4j-learning/neo4j-enterprise-3.3.10-SNAPSHOT/bin/neo4j status" - register: service_status - changed_when: false - become: true - -- hosts: composite-search-cluster - tasks: - - name: Check Elasticsearch Port - command: "nc -z -v -w 2 {{ inventory_hostname }} 9200" - ignore_errors: yes - register: nc_result - -- hosts: lp-redis-ps - tasks: - - name: Check redis service status - service: - name: redis - state: started - register: redis_status - #- name: Start redis service if it's not running - # service: - # name: redis - # state: started - # when: redis_status.changed - # become: true - -- hosts: kafka - tasks: - - name: Check kafka service status - service: - name: kafka - state: started - register: kafka_status - #- name: Start kafka service if it's not running - # service: - # name: kafka - # state: started - # when: kafka_status.changed - # become: true - - hosts: keycloak tasks: - name: Check keycloak service status @@ -70,37 +5,3 @@ name: keycloak state: started register: keycloak_status - #- name: Start keycloak service if it's not running - # service: - # name: keycloak - # state: started - # when: keycloak_status.changed - # become: true - -- hosts: graylog - tasks: - - name: Check graylog service status - service: - name: graylog-server - state: started - register: graylog_status - #- name: Start graylog service if it's not running - # service: - # name: graylog-server - # state: started - # when: graylog_status.changed - # become: true - -- hosts: lp-learning-ps - tasks: - - name: Check Tomcat Port (learning VM) - command: "nc -z -v -w 2 {{ inventory_hostname }} 8080" - ignore_errors: yes - register: nc_tomcat_result - -- hosts: internal-proxy - tasks: - - name: Check Kong Port (Internal proxy) - command: "nc -z -v -w 2 {{ inventory_hostname }} 8000" - ignore_errors: yes - register: nc_kong_result From 4f8081a53bfa9958afdff4d70d1832c2205d1471 Mon Sep 17 00:00:00 2001 From: Prasath Sivasubramaniyan Date: Sat, 3 Feb 2024 22:11:56 +0530 Subject: [PATCH 289/616] Update check_services.yml --- ansible/check_services.yml | 55 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/ansible/check_services.yml b/ansible/check_services.yml index 62896fc2d6..41891f7dec 100644 --- a/ansible/check_services.yml +++ b/ansible/check_services.yml @@ -5,3 +5,58 @@ name: keycloak state: started register: keycloak_status + +- hosts: cassandra + tasks: + - name: Check Cassandra service status + service: + name: cassandra + state: started + register: cassandra_status + +- hosts: learning-neo4j-node1 + tasks: + - name: Check Neo4j Service Status + command: "/home/learning/neo4j-learning/neo4j-enterprise-3.3.10-SNAPSHOT/bin/neo4j status" + register: service_status + changed_when: false + become: true + +- hosts: composite-search-cluster + tasks: + - name: Check Elasticsearch Port + command: "nc -z -v -w 2 {{ inventory_hostname }} 9200" + ignore_errors: yes + register: nc_result + +- hosts: lp-redis-ps + tasks: + - name: Check redis service status + service: + name: redis + state: started + register: redis_status + +- hosts: kafka + tasks: + - name: Check kafka service status + service: + name: kafka + state: started + register: kafka_status + +- hosts: lp-learning-ps + tasks: + - name: Check Tomcat Port (learning VM) + command: "nc -z -v -w 2 {{ inventory_hostname }} 8080" + ignore_errors: yes + register: nc_tomcat_result + +- hosts: internal-proxy + tasks: + - name: Check Kong Port (Internal proxy) + command: "nc -z -v -w 2 {{ inventory_hostname }} 8000" + ignore_errors: yes + register: nc_kong_result + + From 3257374b5a10d590e2ea049e8b141e966fcd1e6d Mon Sep 17 00:00:00 2001 From: Prasath Sivasubramaniyan Date: Sat, 3 Feb 2024 22:16:37 +0530 Subject: [PATCH 290/616] Update check_services.yml --- ansible/check_services.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/check_services.yml b/ansible/check_services.yml index 41891f7dec..395b18596b 100644 --- a/ansible/check_services.yml +++ b/ansible/check_services.yml @@ -14,7 +14,7 @@ state: started register: cassandra_status -- hosts: learning-neo4j-node1 +- hosts: lp-neo4j-ps tasks: - name: Check Neo4j Service Status command: "/home/learning/neo4j-learning/neo4j-enterprise-3.3.10-SNAPSHOT/bin/neo4j status" From 7120f620d16ca06b995ec62dc1e2531b03af17f0 Mon Sep 17 00:00:00 2001 From: Prasath Sivasubramaniyan Date: Sat, 3 Feb 2024 22:18:51 +0530 Subject: [PATCH 291/616] Update check_services.yml --- ansible/check_services.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/check_services.yml b/ansible/check_services.yml index 395b18596b..b5ab724ad5 100644 --- a/ansible/check_services.yml +++ b/ansible/check_services.yml @@ -17,7 +17,7 @@ - hosts: lp-neo4j-ps tasks: - name: Check Neo4j Service Status - command: "/home/learning/neo4j-learning/neo4j-enterprise-3.3.10-SNAPSHOT/bin/neo4j status" + command: "/home/learning/neo4j-learning/neo4j-community-3.3.9/bin/neo4j status" register: service_status changed_when: false become: true From 0cb1c3e7419e377350eb54a5b2500607f9ba3c14 Mon Sep 17 00:00:00 2001 From: Prasath Sivasubramaniyan Date: Sat, 3 Feb 2024 22:22:53 +0530 Subject: [PATCH 292/616] Update check_services.yml --- ansible/check_services.yml | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/ansible/check_services.yml b/ansible/check_services.yml index b5ab724ad5..9f861b6d40 100644 --- a/ansible/check_services.yml +++ b/ansible/check_services.yml @@ -52,11 +52,9 @@ ignore_errors: yes register: nc_tomcat_result -- hosts: internal-proxy - tasks: - - name: Check Kong Port (Internal proxy) - command: "nc -z -v -w 2 {{ inventory_hostname }} 8000" - ignore_errors: yes - register: nc_kong_result - - +# - hosts: internal-proxy +# tasks: +# - name: Check Kong Port (Internal proxy) +# command: "nc -z -v -w 2 {{ inventory_hostname }} 8000" +# ignore_errors: yes +# register: nc_kong_result From 167e087a4485db22b8f6731812aa88cbde72b991 Mon Sep 17 00:00:00 2001 From: Prasath Sivasubramaniyan Date: Mon, 12 Feb 2024 12:38:22 +0530 Subject: [PATCH 293/616] Added report --- ansible/check_services.yml | 8 ++++++++ ansible/service_report.html | 20 ++++++++++++++++++++ pipelines/check-service-status/Jenkinsfile | 3 +++ 3 files changed, 31 insertions(+) create mode 100644 ansible/service_report.html diff --git a/ansible/check_services.yml b/ansible/check_services.yml index 9f861b6d40..4faa1ccc1e 100644 --- a/ansible/check_services.yml +++ b/ansible/check_services.yml @@ -52,6 +52,14 @@ ignore_errors: yes register: nc_tomcat_result +- name: Generate HTML service report + hosts: localhost + tasks: + - name: Generate HTML report + template: + src: service_report.html + dest: service_report.html + # - hosts: internal-proxy # tasks: # - name: Check Kong Port (Internal proxy) diff --git a/ansible/service_report.html b/ansible/service_report.html new file mode 100644 index 0000000000..2efbf14df8 --- /dev/null +++ b/ansible/service_report.html @@ -0,0 +1,20 @@ + + + + + + Service Report + + +

Service Report

+
    +
  • Keycloak: Status: {{ keycloak_status.status }}
  • +
  • Cassandra: Status: {{ cassandra_status.status }}
  • +
  • Neo4j: Status: {{ service_status.stdout }}
  • +
  • Elasticsearch: Status: {% if nc_result.rc == 0 %} Port 9200 is open {% else %} Port 9200 is closed {% endif %}
  • +
  • Redis: Status: {{ redis_status.status }}
  • +
  • Kafka: Status: {{ kafka_status.status }}
  • +
  • Tomcat (Learning VM): Status: {% if nc_tomcat_result.rc == 0 %} Port 8080 is open {% else %} Port 8080 is closed {% endif %}
  • +
+ + diff --git a/pipelines/check-service-status/Jenkinsfile b/pipelines/check-service-status/Jenkinsfile index 07c1395be2..8b160ac3d7 100644 --- a/pipelines/check-service-status/Jenkinsfile +++ b/pipelines/check-service-status/Jenkinsfile @@ -37,6 +37,9 @@ node() { currentBuild.result = 'SUCCESS' currentBuild.description = "Private: ${params.private_branch}, Public: ${params.branch_or_tag}" } + stage('ArchiveArtifacts') { + archiveArtifacts "service_report.html" + } } } catch (err) { From 9ca79e9be39da54c60f13d0313da6f0f75c48fd1 Mon Sep 17 00:00:00 2001 From: Prasath Sivasubramaniyan Date: Mon, 12 Feb 2024 12:45:35 +0530 Subject: [PATCH 294/616] changes for report --- ansible/check_services.yml | 20 ++++++++++++-------- ansible/service_report.html | 20 -------------------- ansible/service_report.j2 | 13 +++++++++++++ 3 files changed, 25 insertions(+), 28 deletions(-) delete mode 100644 ansible/service_report.html create mode 100644 ansible/service_report.j2 diff --git a/ansible/check_services.yml b/ansible/check_services.yml index 4faa1ccc1e..fee729556c 100644 --- a/ansible/check_services.yml +++ b/ansible/check_services.yml @@ -5,7 +5,11 @@ name: keycloak state: started register: keycloak_status - + - name: Generate HTML Report + template: + src: service_report.j2 + dest: service_report.html + - hosts: cassandra tasks: - name: Check Cassandra service status @@ -52,13 +56,13 @@ ignore_errors: yes register: nc_tomcat_result -- name: Generate HTML service report - hosts: localhost - tasks: - - name: Generate HTML report - template: - src: service_report.html - dest: service_report.html +# - name: Generate HTML service report +# hosts: localhost +# tasks: +# - name: Generate HTML report +# template: +# src: service_report.html +# dest: service_report.html # - hosts: internal-proxy # tasks: diff --git a/ansible/service_report.html b/ansible/service_report.html deleted file mode 100644 index 2efbf14df8..0000000000 --- a/ansible/service_report.html +++ /dev/null @@ -1,20 +0,0 @@ - - - - - - Service Report - - -

Service Report

-
    -
  • Keycloak: Status: {{ keycloak_status.status }}
  • -
  • Cassandra: Status: {{ cassandra_status.status }}
  • -
  • Neo4j: Status: {{ service_status.stdout }}
  • -
  • Elasticsearch: Status: {% if nc_result.rc == 0 %} Port 9200 is open {% else %} Port 9200 is closed {% endif %}
  • -
  • Redis: Status: {{ redis_status.status }}
  • -
  • Kafka: Status: {{ kafka_status.status }}
  • -
  • Tomcat (Learning VM): Status: {% if nc_tomcat_result.rc == 0 %} Port 8080 is open {% else %} Port 8080 is closed {% endif %}
  • -
- - diff --git a/ansible/service_report.j2 b/ansible/service_report.j2 new file mode 100644 index 0000000000..b3d0d233bb --- /dev/null +++ b/ansible/service_report.j2 @@ -0,0 +1,13 @@ + + + + Keycloak Status Report + + +

Keycloak Status Report

+

Keycloak Status: {% if keycloak_status.rc == 0 %}Running{% else %}Not Running{% endif %}

+ {% if keycloak_status.rc != 0 %} +

Error Message: {{ keycloak_status.stderr }}

+ {% endif %} + + From b48d972fa6d1a137bb55c971cff9dbe294f50266 Mon Sep 17 00:00:00 2001 From: Prasath Sivasubramaniyan Date: Mon, 12 Feb 2024 12:49:48 +0530 Subject: [PATCH 295/616] testing --- ansible/check_services.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/ansible/check_services.yml b/ansible/check_services.yml index fee729556c..c88bf5d8ce 100644 --- a/ansible/check_services.yml +++ b/ansible/check_services.yml @@ -5,10 +5,11 @@ name: keycloak state: started register: keycloak_status + - name: Generate HTML Report - template: - src: service_report.j2 - dest: service_report.html + template: + src: ansible/service_report.j2 + dest: service_report.html - hosts: cassandra tasks: From aa347cfd9241a5f8b976a5fb753a8cadaa3e8e10 Mon Sep 17 00:00:00 2001 From: Prasath Sivasubramaniyan Date: Mon, 12 Feb 2024 12:50:44 +0530 Subject: [PATCH 296/616] testing --- ansible/check_services.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/check_services.yml b/ansible/check_services.yml index c88bf5d8ce..8b3e4b2261 100644 --- a/ansible/check_services.yml +++ b/ansible/check_services.yml @@ -8,7 +8,7 @@ - name: Generate HTML Report template: - src: ansible/service_report.j2 + src: service_report.j2 dest: service_report.html - hosts: cassandra From d00fda13248668854afd9915040be5c42b420084 Mon Sep 17 00:00:00 2001 From: Prasath Sivasubramaniyan Date: Mon, 12 Feb 2024 12:52:38 +0530 Subject: [PATCH 297/616] reports changes --- ansible/service_report.j2 | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/ansible/service_report.j2 b/ansible/service_report.j2 index b3d0d233bb..71757d85de 100644 --- a/ansible/service_report.j2 +++ b/ansible/service_report.j2 @@ -1,13 +1,10 @@ - Keycloak Status Report + Keycloak Service Status Report -

Keycloak Status Report

-

Keycloak Status: {% if keycloak_status.rc == 0 %}Running{% else %}Not Running{% endif %}

- {% if keycloak_status.rc != 0 %} -

Error Message: {{ keycloak_status.stderr }}

- {% endif %} +

Keycloak Service Status Report

+

Keycloak Service Status: {% if keycloak_status.changed %}Started{% else %}Stopped{% endif %}

From e9e9bce91a7cf4eeedaab64ae2918d9fd0d79925 Mon Sep 17 00:00:00 2001 From: Prasath Sivasubramaniyan Date: Mon, 12 Feb 2024 13:02:24 +0530 Subject: [PATCH 298/616] added host --- ansible/check_services.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ansible/check_services.yml b/ansible/check_services.yml index 8b3e4b2261..34cbccc53b 100644 --- a/ansible/check_services.yml +++ b/ansible/check_services.yml @@ -5,7 +5,9 @@ name: keycloak state: started register: keycloak_status - + +- hosts: localhost + tasks: - name: Generate HTML Report template: src: service_report.j2 From 3a2fba8368abfc01c2bff211d4f0610947a4d239 Mon Sep 17 00:00:00 2001 From: Prasath Sivasubramaniyan Date: Mon, 12 Feb 2024 13:05:14 +0530 Subject: [PATCH 299/616] Revert "Update check_services.yml" This reverts commit 0cb1c3e7419e377350eb54a5b2500607f9ba3c14. --- ansible/check_services.yml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/ansible/check_services.yml b/ansible/check_services.yml index 34cbccc53b..42c31ada06 100644 --- a/ansible/check_services.yml +++ b/ansible/check_services.yml @@ -59,14 +59,6 @@ ignore_errors: yes register: nc_tomcat_result -# - name: Generate HTML service report -# hosts: localhost -# tasks: -# - name: Generate HTML report -# template: -# src: service_report.html -# dest: service_report.html - # - hosts: internal-proxy # tasks: # - name: Check Kong Port (Internal proxy) From 428311e322cd5513e06cd3e6e824a677638609cb Mon Sep 17 00:00:00 2001 From: Prasath Sivasubramaniyan Date: Mon, 12 Feb 2024 13:06:23 +0530 Subject: [PATCH 300/616] Revert "Update check_services.yml" This reverts commit 0cb1c3e7419e377350eb54a5b2500607f9ba3c14. --- ansible/check_services.yml | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/ansible/check_services.yml b/ansible/check_services.yml index 42c31ada06..1c86cc5152 100644 --- a/ansible/check_services.yml +++ b/ansible/check_services.yml @@ -59,9 +59,11 @@ ignore_errors: yes register: nc_tomcat_result -# - hosts: internal-proxy -# tasks: -# - name: Check Kong Port (Internal proxy) -# command: "nc -z -v -w 2 {{ inventory_hostname }} 8000" -# ignore_errors: yes -# register: nc_kong_result +- hosts: internal-proxy + tasks: + - name: Check Kong Port (Internal proxy) + command: "nc -z -v -w 2 {{ inventory_hostname }} 8000" + ignore_errors: yes + register: nc_kong_result + + From 45ed6e6ec4df149208a1950a4237e44da4e0f03a Mon Sep 17 00:00:00 2001 From: Prasath Sivasubramaniyan Date: Mon, 12 Feb 2024 13:08:28 +0530 Subject: [PATCH 301/616] reports are reverted --- ansible/check_services.yml | 19 ++++++------------- ansible/service_report.j2 | 10 ---------- 2 files changed, 6 insertions(+), 23 deletions(-) delete mode 100644 ansible/service_report.j2 diff --git a/ansible/check_services.yml b/ansible/check_services.yml index 1c86cc5152..628ca5c0a2 100644 --- a/ansible/check_services.yml +++ b/ansible/check_services.yml @@ -6,13 +6,6 @@ state: started register: keycloak_status -- hosts: localhost - tasks: - - name: Generate HTML Report - template: - src: service_report.j2 - dest: service_report.html - - hosts: cassandra tasks: - name: Check Cassandra service status @@ -59,11 +52,11 @@ ignore_errors: yes register: nc_tomcat_result -- hosts: internal-proxy - tasks: - - name: Check Kong Port (Internal proxy) - command: "nc -z -v -w 2 {{ inventory_hostname }} 8000" - ignore_errors: yes - register: nc_kong_result +# - hosts: internal-proxy +# tasks: +# - name: Check Kong Port (Internal proxy) +# command: "nc -z -v -w 2 {{ inventory_hostname }} 8000" +# ignore_errors: yes +# register: nc_kong_result diff --git a/ansible/service_report.j2 b/ansible/service_report.j2 deleted file mode 100644 index 71757d85de..0000000000 --- a/ansible/service_report.j2 +++ /dev/null @@ -1,10 +0,0 @@ - - - - Keycloak Service Status Report - - -

Keycloak Service Status Report

-

Keycloak Service Status: {% if keycloak_status.changed %}Started{% else %}Stopped{% endif %}

- - From 4cfa988ef9d0be2e9f411ab0e448dd28aaf33bf0 Mon Sep 17 00:00:00 2001 From: Prasath Sivasubramaniyan Date: Mon, 12 Feb 2024 13:10:42 +0530 Subject: [PATCH 302/616] jenkins file reverted --- pipelines/check-service-status/Jenkinsfile | 3 --- 1 file changed, 3 deletions(-) diff --git a/pipelines/check-service-status/Jenkinsfile b/pipelines/check-service-status/Jenkinsfile index 8b160ac3d7..07c1395be2 100644 --- a/pipelines/check-service-status/Jenkinsfile +++ b/pipelines/check-service-status/Jenkinsfile @@ -37,9 +37,6 @@ node() { currentBuild.result = 'SUCCESS' currentBuild.description = "Private: ${params.private_branch}, Public: ${params.branch_or_tag}" } - stage('ArchiveArtifacts') { - archiveArtifacts "service_report.html" - } } } catch (err) { From d1d248111e7cdb6439ec64638ad67ad059ab3c84 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 21 Feb 2024 17:44:45 +0530 Subject: [PATCH 303/616] Update check_services.yml Added the Influx db --- ansible/check_services.yml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/ansible/check_services.yml b/ansible/check_services.yml index 628ca5c0a2..ded5e96202 100644 --- a/ansible/check_services.yml +++ b/ansible/check_services.yml @@ -59,4 +59,11 @@ # ignore_errors: yes # register: nc_kong_result - +- hosts: dp-influx-ps + tasks: + - name: Check InfluxDB service status + shell: systemctl status influxdb + register: influxdb_status + - debug: + msg: "InfluxDB service is {{ 'running' if influxdb_status.rc == 0 else 'not running' }}" + var: influxdb_status.stdout From 3e0b95a27a4294b0eb9b3308830a88faaee01ecd Mon Sep 17 00:00:00 2001 From: kirantrigyn <139841936+kirantrigyn@users.noreply.github.com> Date: Wed, 21 Feb 2024 17:52:40 +0530 Subject: [PATCH 304/616] Update check_services.yml Update check_services.yml --- ansible/check_services.yml | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/ansible/check_services.yml b/ansible/check_services.yml index ded5e96202..e5373b89d6 100644 --- a/ansible/check_services.yml +++ b/ansible/check_services.yml @@ -58,12 +58,11 @@ # command: "nc -z -v -w 2 {{ inventory_hostname }} 8000" # ignore_errors: yes # register: nc_kong_result - + - hosts: dp-influx-ps tasks: - - name: Check InfluxDB service status - shell: systemctl status influxdb + - name: Check systemctl status influxdb + service: + name: influxdb + state: started register: influxdb_status - - debug: - msg: "InfluxDB service is {{ 'running' if influxdb_status.rc == 0 else 'not running' }}" - var: influxdb_status.stdout From 7d93d77aa15a18b846b5b761dd22e46225e6b2ff Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Thu, 22 Feb 2024 12:18:03 +0530 Subject: [PATCH 305/616] Update check_services.yml Adding spark & postgres db --- ansible/check_services.yml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/ansible/check_services.yml b/ansible/check_services.yml index e5373b89d6..4ce88307d3 100644 --- a/ansible/check_services.yml +++ b/ansible/check_services.yml @@ -66,3 +66,19 @@ name: influxdb state: started register: influxdb_status + +- hosts: postgresql-master-1 + tasks: + - name: Check systemctl status postgresql + service: + name: postgresql + state: started + register: postgresql_status + +- hosts: dp-spark-ps + tasks: + - name: Check systemctl status + service: + name: spark + state: running + register: spark_status From e5169a56586d8d32b44b895d6e3b04829e412c9a Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Thu, 22 Feb 2024 12:38:36 +0530 Subject: [PATCH 306/616] Update check_services.yml Adding - survey-sl-mongodb --- ansible/check_services.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/ansible/check_services.yml b/ansible/check_services.yml index 4ce88307d3..69a7bffd3a 100644 --- a/ansible/check_services.yml +++ b/ansible/check_services.yml @@ -82,3 +82,11 @@ name: spark state: running register: spark_status + +- hosts: mongo_master + tasks: + - name: Check systemctl status + service: + name: survey-sl-mongodb + state: running + register: survey-sl-mongodb_status From 599ab1638e08039141f7436cf96fb391ccc8370d Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Thu, 22 Feb 2024 12:49:34 +0530 Subject: [PATCH 307/616] Update check_services.yml --- ansible/check_services.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/check_services.yml b/ansible/check_services.yml index 69a7bffd3a..ea4dd8d3bd 100644 --- a/ansible/check_services.yml +++ b/ansible/check_services.yml @@ -80,7 +80,7 @@ - name: Check systemctl status service: name: spark - state: running + state: started register: spark_status - hosts: mongo_master @@ -88,5 +88,5 @@ - name: Check systemctl status service: name: survey-sl-mongodb - state: running + state: started register: survey-sl-mongodb_status From 765a2b250d79e1902427a9d8ef3ad948bd6c40ee Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Thu, 22 Feb 2024 13:09:24 +0530 Subject: [PATCH 308/616] Update check_services.yml correction in host name spark --- ansible/check_services.yml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/ansible/check_services.yml b/ansible/check_services.yml index ea4dd8d3bd..42d5437c7a 100644 --- a/ansible/check_services.yml +++ b/ansible/check_services.yml @@ -74,15 +74,7 @@ name: postgresql state: started register: postgresql_status - -- hosts: dp-spark-ps - tasks: - - name: Check systemctl status - service: - name: spark - state: started - register: spark_status - + - hosts: mongo_master tasks: - name: Check systemctl status @@ -90,3 +82,11 @@ name: survey-sl-mongodb state: started register: survey-sl-mongodb_status + +- hosts: dp-spark-ps + tasks: + - name: Check systemctl status + service: + name: spark-01 + state: started + register: spark_status From 0d19cb558472c1d3dedb8b04267002a9e0e87d21 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Thu, 22 Feb 2024 13:49:32 +0530 Subject: [PATCH 309/616] Update check_services.yml updating mongod.service --- ansible/check_services.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ansible/check_services.yml b/ansible/check_services.yml index 42d5437c7a..5e4588d07f 100644 --- a/ansible/check_services.yml +++ b/ansible/check_services.yml @@ -77,11 +77,11 @@ - hosts: mongo_master tasks: - - name: Check systemctl status + - name: Check systemctl status mongod service: - name: survey-sl-mongodb + name: mongod state: started - register: survey-sl-mongodb_status + register: mongod_status - hosts: dp-spark-ps tasks: From f54548ce4219546817346f079943781afa3b12ee Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Thu, 22 Feb 2024 13:53:25 +0530 Subject: [PATCH 310/616] Update check_services.yml removing spark seem no service running as of now. --- ansible/check_services.yml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/ansible/check_services.yml b/ansible/check_services.yml index 5e4588d07f..da5f3ad243 100644 --- a/ansible/check_services.yml +++ b/ansible/check_services.yml @@ -82,11 +82,3 @@ name: mongod state: started register: mongod_status - -- hosts: dp-spark-ps - tasks: - - name: Check systemctl status - service: - name: spark-01 - state: started - register: spark_status From 3580c8656d1ead13055020d700bd2cad8c27099a Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 22 Feb 2024 15:29:56 +0530 Subject: [PATCH 311/616] Update check_services.yml Added new check services --- ansible/check_services.yml | 143 ++++++++++++++++++++++++++++--------- 1 file changed, 108 insertions(+), 35 deletions(-) diff --git a/ansible/check_services.yml b/ansible/check_services.yml index da5f3ad243..6301078d02 100644 --- a/ansible/check_services.yml +++ b/ansible/check_services.yml @@ -1,84 +1,157 @@ -- hosts: keycloak +- name: Check Service Status + hosts: + - keycloak + - cassandra + - lp-neo4j-ps + - composite-search-cluster + - lp-redis-ps + - kafka + - lp-learning-ps + - dp-influx-ps + - postgresql-master-1 + - mongo_master + - processing-cluster-kafka-1 + - flink-processing-cluster-kafka + tasks: - name: Check keycloak service status service: name: keycloak state: started register: keycloak_status + when: inventory_hostname == "keycloak" + + - name: Print keycloak service status + debug: + msg: "Keycloak Service Status: {{ 'running' if keycloak_status is succeeded else 'not running' }}" + when: inventory_hostname == "keycloak" -- hosts: cassandra - tasks: - name: Check Cassandra service status service: name: cassandra state: started register: cassandra_status - -- hosts: lp-neo4j-ps - tasks: + when: inventory_hostname == "cassandra" + + - name: Print Cassandra service status + debug: + msg: "Cassandra Service Status: {{ 'running' if cassandra_status is succeeded else 'not running' }}" + when: inventory_hostname == "cassandra" + - name: Check Neo4j Service Status command: "/home/learning/neo4j-learning/neo4j-community-3.3.9/bin/neo4j status" - register: service_status + register: neo4j_status changed_when: false become: true + when: inventory_hostname == "lp-neo4j-ps" + + - name: Print Neo4j service status + debug: + msg: "Neo4j Service Status: {{ 'running' if neo4j_status.rc == 0 else 'not running' }}" + when: inventory_hostname == "lp-neo4j-ps" -- hosts: composite-search-cluster - tasks: - name: Check Elasticsearch Port command: "nc -z -v -w 2 {{ inventory_hostname }} 9200" ignore_errors: yes - register: nc_result + register: elasticsearch_status + when: inventory_hostname == "composite-search-cluster" + + - name: Print Elasticsearch service status + debug: + msg: "Elasticsearch Service Status: {{ 'running' if elasticsearch_status.rc == 0 else 'not running' }}" + when: inventory_hostname == "composite-search-cluster" -- hosts: lp-redis-ps - tasks: - name: Check redis service status service: name: redis state: started register: redis_status + when: inventory_hostname == "lp-redis-ps" + + - name: Print Redis service status + debug: + msg: "Redis Service Status: {{ 'running' if redis_status is succeeded else 'not running' }}" + when: inventory_hostname == "lp-redis-ps" -- hosts: kafka - tasks: - name: Check kafka service status service: name: kafka state: started register: kafka_status + when: inventory_hostname == "kafka" + + - name: Print Kafka service status + debug: + msg: "Kafka Service Status: {{ 'running' if kafka_status is succeeded else 'not running' }}" + when: inventory_hostname == "kafka" -- hosts: lp-learning-ps - tasks: - name: Check Tomcat Port (learning VM) command: "nc -z -v -w 2 {{ inventory_hostname }} 8080" ignore_errors: yes - register: nc_tomcat_result + register: tomcat_status + when: inventory_hostname == "lp-learning-ps" -# - hosts: internal-proxy -# tasks: -# - name: Check Kong Port (Internal proxy) -# command: "nc -z -v -w 2 {{ inventory_hostname }} 8000" -# ignore_errors: yes -# register: nc_kong_result + - name: Print Tomcat service status + debug: + msg: "Tomcat Service Status: {{ 'running' if tomcat_status.rc == 0 else 'not running' }}" + when: inventory_hostname == "lp-learning-ps" -- hosts: dp-influx-ps - tasks: - name: Check systemctl status influxdb service: name: influxdb state: started register: influxdb_status - -- hosts: postgresql-master-1 - tasks: + when: inventory_hostname == "dp-influx-ps" + + - name: Print InfluxDB service status + debug: + msg: "InfluxDB Service Status: {{ 'running' if influxdb_status is succeeded else 'not running' }}" + when: inventory_hostname == "dp-influx-ps" + - name: Check systemctl status postgresql service: name: postgresql state: started - register: postgresql_status - -- hosts: mongo_master - tasks: - - name: Check systemctl status mongod + register: postgresql_status + when: inventory_hostname == "postgresql-master-1" + + - name: Print PostgreSQL service status + debug: + msg: "PostgreSQL Service Status: {{ 'running' if postgresql_status is succeeded else 'not running' }}" + when: inventory_hostname == "postgresql-master-1" + + - name: Check systemctl status survey-sl-mongodb + service: + name: survey-sl-mongodb + state: started + register: survey_sl_mongodb_status + when: inventory_hostname == "mongo_master" + + - name: Print MongoDB service status + debug: + msg: "MongoDB Service Status: {{ 'running' if survey_sl_mongodb_status is succeeded else 'not running' }}" + when: inventory_hostname == "mongo_master" + + - name: Check systemctl status processing-cluster-kafka-1 service: - name: mongod + name: processing-cluster-kafka-1 state: started - register: mongod_status + register: processing_cluster_kafka1_status + when: inventory_hostname == "processing-cluster-kafka-1" + + - name: Print Processing Cluster Kafka-1 service status + debug: + msg: "Processing Cluster Kafka-1 Service Status: {{ 'running' if processing_cluster_kafka1_status is succeeded else 'not running' }}" + when: inventory_hostname == "processing-cluster-kafka-1" + + - name: Check systemctl status flink-processing-cluster-kafka + service: + name: flink-processing-cluster-kafka + state: started + register: flink_processing_cluster_kafka_status + when: inventory_hostname == "flink-processing-cluster-kafka" + + - name: Print Flink Processing Cluster Kafka service status + debug: + msg: "Flink Processing Cluster Kafka Service Status: {{ 'running' if flink_processing_cluster_kafka_status is succeeded else 'not running' }}" + when: inventory_hostname == "flink-processing-cluster-kafka" From 9f4df32fccb00b087ccd13b6899df27f4a96eeac Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 22 Feb 2024 16:34:04 +0530 Subject: [PATCH 312/616] Update check_services.yml New Playbook Added --- ansible/check_services.yml | 192 +++++++++++-------------------------- 1 file changed, 58 insertions(+), 134 deletions(-) diff --git a/ansible/check_services.yml b/ansible/check_services.yml index 6301078d02..3ad9faa243 100644 --- a/ansible/check_services.yml +++ b/ansible/check_services.yml @@ -1,5 +1,6 @@ +--- - name: Check Service Status - hosts: + hosts: - keycloak - cassandra - lp-neo4j-ps @@ -12,146 +13,69 @@ - mongo_master - processing-cluster-kafka-1 - flink-processing-cluster-kafka - - tasks: - - name: Check keycloak service status - service: + + vars: + service_checks: + keycloak: name: keycloak - state: started - register: keycloak_status - when: inventory_hostname == "keycloak" - - - name: Print keycloak service status - debug: - msg: "Keycloak Service Status: {{ 'running' if keycloak_status is succeeded else 'not running' }}" - when: inventory_hostname == "keycloak" - - - name: Check Cassandra service status - service: + status_msg: "Keycloak Service Status" + cassandra: name: cassandra - state: started - register: cassandra_status - when: inventory_hostname == "cassandra" - - - name: Print Cassandra service status - debug: - msg: "Cassandra Service Status: {{ 'running' if cassandra_status is succeeded else 'not running' }}" - when: inventory_hostname == "cassandra" - - - name: Check Neo4j Service Status - command: "/home/learning/neo4j-learning/neo4j-community-3.3.9/bin/neo4j status" - register: neo4j_status - changed_when: false - become: true - when: inventory_hostname == "lp-neo4j-ps" - - - name: Print Neo4j service status - debug: - msg: "Neo4j Service Status: {{ 'running' if neo4j_status.rc == 0 else 'not running' }}" - when: inventory_hostname == "lp-neo4j-ps" - - - name: Check Elasticsearch Port - command: "nc -z -v -w 2 {{ inventory_hostname }} 9200" - ignore_errors: yes - register: elasticsearch_status - when: inventory_hostname == "composite-search-cluster" - - - name: Print Elasticsearch service status - debug: - msg: "Elasticsearch Service Status: {{ 'running' if elasticsearch_status.rc == 0 else 'not running' }}" - when: inventory_hostname == "composite-search-cluster" - - - name: Check redis service status - service: + status_msg: "Cassandra Service Status" + lp-neo4j-ps: + name: neo4j + status_msg: "Neo4j Service Status" + command: "/home/learning/neo4j-learning/neo4j-community-3.3.9/bin/neo4j status" + composite-search-cluster: + name: elasticsearch + status_msg: "Elasticsearch Service Status" + command: "nc -z -v -w 2 {{ inventory_hostname }} 9200" + lp-redis-ps: name: redis - state: started - register: redis_status - when: inventory_hostname == "lp-redis-ps" - - - name: Print Redis service status - debug: - msg: "Redis Service Status: {{ 'running' if redis_status is succeeded else 'not running' }}" - when: inventory_hostname == "lp-redis-ps" - - - name: Check kafka service status - service: + status_msg: "Redis Service Status" + kafka: name: kafka - state: started - register: kafka_status - when: inventory_hostname == "kafka" - - - name: Print Kafka service status - debug: - msg: "Kafka Service Status: {{ 'running' if kafka_status is succeeded else 'not running' }}" - when: inventory_hostname == "kafka" - - - name: Check Tomcat Port (learning VM) - command: "nc -z -v -w 2 {{ inventory_hostname }} 8080" - ignore_errors: yes - register: tomcat_status - when: inventory_hostname == "lp-learning-ps" - - - name: Print Tomcat service status - debug: - msg: "Tomcat Service Status: {{ 'running' if tomcat_status.rc == 0 else 'not running' }}" - when: inventory_hostname == "lp-learning-ps" - - - name: Check systemctl status influxdb - service: + status_msg: "Kafka Service Status" + lp-learning-ps: + name: tomcat + status_msg: "Tomcat Service Status" + command: "nc -z -v -w 2 {{ inventory_hostname }} 8080" + dp-influx-ps: name: influxdb - state: started - register: influxdb_status - when: inventory_hostname == "dp-influx-ps" - - - name: Print InfluxDB service status - debug: - msg: "InfluxDB Service Status: {{ 'running' if influxdb_status is succeeded else 'not running' }}" - when: inventory_hostname == "dp-influx-ps" - - - name: Check systemctl status postgresql - service: + status_msg: "InfluxDB Service Status" + postgresql-master-1: name: postgresql - state: started - register: postgresql_status - when: inventory_hostname == "postgresql-master-1" - - - name: Print PostgreSQL service status - debug: - msg: "PostgreSQL Service Status: {{ 'running' if postgresql_status is succeeded else 'not running' }}" - when: inventory_hostname == "postgresql-master-1" - - - name: Check systemctl status survey-sl-mongodb - service: + status_msg: "PostgreSQL Service Status" + mongo_master: name: survey-sl-mongodb - state: started - register: survey_sl_mongodb_status - when: inventory_hostname == "mongo_master" - - - name: Print MongoDB service status - debug: - msg: "MongoDB Service Status: {{ 'running' if survey_sl_mongodb_status is succeeded else 'not running' }}" - when: inventory_hostname == "mongo_master" - - - name: Check systemctl status processing-cluster-kafka-1 - service: + status_msg: "MongoDB Service Status" + processing-cluster-kafka-1: name: processing-cluster-kafka-1 - state: started - register: processing_cluster_kafka1_status - when: inventory_hostname == "processing-cluster-kafka-1" - - - name: Print Processing Cluster Kafka-1 service status - debug: - msg: "Processing Cluster Kafka-1 Service Status: {{ 'running' if processing_cluster_kafka1_status is succeeded else 'not running' }}" - when: inventory_hostname == "processing-cluster-kafka-1" - - - name: Check systemctl status flink-processing-cluster-kafka - service: + status_msg: "Processing Cluster Kafka-1 Service Status" + flink-processing-cluster-kafka: name: flink-processing-cluster-kafka - state: started - register: flink_processing_cluster_kafka_status - when: inventory_hostname == "flink-processing-cluster-kafka" + status_msg: "Flink Processing Cluster Kafka Service Status" - - name: Print Flink Processing Cluster Kafka service status - debug: - msg: "Flink Processing Cluster Kafka Service Status: {{ 'running' if flink_processing_cluster_kafka_status is succeeded else 'not running' }}" - when: inventory_hostname == "flink-processing-cluster-kafka" + tasks: + - name: Check service status + block: + - name: Check service status + vars: + service_name: "{{ service_checks[inventory_hostname].name }}" + command: "{{ service_checks[inventory_hostname].command | default(omit) }}" + ansible.builtin.service: + name: "{{ service_name }}" + state: started + register: service_status + ignore_errors: true + + - name: Execute custom command if provided + ansible.builtin.command: "{{ command }}" + register: custom_status + ignore_errors: true + when: command is defined + + - name: Print service status + debug: + msg: "{{ service_checks[inventory_hostname].status_msg }}: {{ 'running' if (service_status is succeeded and not custom_status.failed) else 'not running' }}" + when: inventory_hostname in service_checks.keys() From f336ae561d0f3f0c7781897622bf3c64cf5bc415 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 22 Feb 2024 16:38:11 +0530 Subject: [PATCH 313/616] Update check_services.yml To check the services status into the jenkins --- ansible/check_services.yml | 92 +++++++++++++------------------------- 1 file changed, 32 insertions(+), 60 deletions(-) diff --git a/ansible/check_services.yml b/ansible/check_services.yml index 3ad9faa243..2b15dc8e35 100644 --- a/ansible/check_services.yml +++ b/ansible/check_services.yml @@ -1,4 +1,3 @@ ---- - name: Check Service Status hosts: - keycloak @@ -14,68 +13,41 @@ - processing-cluster-kafka-1 - flink-processing-cluster-kafka - vars: - service_checks: - keycloak: - name: keycloak - status_msg: "Keycloak Service Status" - cassandra: - name: cassandra - status_msg: "Cassandra Service Status" - lp-neo4j-ps: - name: neo4j - status_msg: "Neo4j Service Status" - command: "/home/learning/neo4j-learning/neo4j-community-3.3.9/bin/neo4j status" - composite-search-cluster: - name: elasticsearch - status_msg: "Elasticsearch Service Status" - command: "nc -z -v -w 2 {{ inventory_hostname }} 9200" - lp-redis-ps: - name: redis - status_msg: "Redis Service Status" - kafka: - name: kafka - status_msg: "Kafka Service Status" - lp-learning-ps: - name: tomcat - status_msg: "Tomcat Service Status" - command: "nc -z -v -w 2 {{ inventory_hostname }} 8080" - dp-influx-ps: - name: influxdb - status_msg: "InfluxDB Service Status" - postgresql-master-1: - name: postgresql - status_msg: "PostgreSQL Service Status" - mongo_master: - name: survey-sl-mongodb - status_msg: "MongoDB Service Status" - processing-cluster-kafka-1: - name: processing-cluster-kafka-1 - status_msg: "Processing Cluster Kafka-1 Service Status" - flink-processing-cluster-kafka: - name: flink-processing-cluster-kafka - status_msg: "Flink Processing Cluster Kafka Service Status" - tasks: - name: Check service status block: - - name: Check service status - vars: - service_name: "{{ service_checks[inventory_hostname].name }}" - command: "{{ service_checks[inventory_hostname].command | default(omit) }}" - ansible.builtin.service: - name: "{{ service_name }}" - state: started - register: service_status - ignore_errors: true - - - name: Execute custom command if provided - ansible.builtin.command: "{{ command }}" - register: custom_status - ignore_errors: true - when: command is defined + - name: Gather service facts + ansible.builtin.service_facts: - name: Print service status debug: - msg: "{{ service_checks[inventory_hostname].status_msg }}: {{ 'running' if (service_status is succeeded and not custom_status.failed) else 'not running' }}" - when: inventory_hostname in service_checks.keys() + msg: "{{ item.key }} Service Status: {{ 'running' if ansible_services[item.key]['state'] == 'running' else 'not running' }}" + loop: "{{ ansible_services.items() }}" + when: item.key in [ + 'keycloak', + 'cassandra', + 'neo4j', + 'elasticsearch', + 'redis', + 'kafka', + 'tomcat', + 'influxdb', + 'postgresql', + 'survey-sl-mongodb', + 'processing-cluster-kafka-1', + 'flink-processing-cluster-kafka' + ] + when: inventory_hostname in [ + 'keycloak', + 'cassandra', + 'lp-neo4j-ps', + 'composite-search-cluster', + 'lp-redis-ps', + 'kafka', + 'lp-learning-ps', + 'dp-influx-ps', + 'postgresql-master-1', + 'mongo_master', + 'processing-cluster-kafka-1', + 'flink-processing-cluster-kafka' + ] From 9b2ed8d908dfda659913be13c5b10a1a2008c21e Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 22 Feb 2024 16:40:04 +0530 Subject: [PATCH 314/616] Update check_services.yml Jenkins Output Check --- ansible/check_services.yml | 73 ++++++++++++-------------------------- 1 file changed, 23 insertions(+), 50 deletions(-) diff --git a/ansible/check_services.yml b/ansible/check_services.yml index 2b15dc8e35..e78bec0fab 100644 --- a/ansible/check_services.yml +++ b/ansible/check_services.yml @@ -1,53 +1,26 @@ - name: Check Service Status - hosts: - - keycloak - - cassandra - - lp-neo4j-ps - - composite-search-cluster - - lp-redis-ps - - kafka - - lp-learning-ps - - dp-influx-ps - - postgresql-master-1 - - mongo_master - - processing-cluster-kafka-1 - - flink-processing-cluster-kafka - + hosts: all + gather_facts: yes + tasks: - - name: Check service status - block: - - name: Gather service facts - ansible.builtin.service_facts: + - name: Gather service facts + ansible.builtin.service_facts: - - name: Print service status - debug: - msg: "{{ item.key }} Service Status: {{ 'running' if ansible_services[item.key]['state'] == 'running' else 'not running' }}" - loop: "{{ ansible_services.items() }}" - when: item.key in [ - 'keycloak', - 'cassandra', - 'neo4j', - 'elasticsearch', - 'redis', - 'kafka', - 'tomcat', - 'influxdb', - 'postgresql', - 'survey-sl-mongodb', - 'processing-cluster-kafka-1', - 'flink-processing-cluster-kafka' - ] - when: inventory_hostname in [ - 'keycloak', - 'cassandra', - 'lp-neo4j-ps', - 'composite-search-cluster', - 'lp-redis-ps', - 'kafka', - 'lp-learning-ps', - 'dp-influx-ps', - 'postgresql-master-1', - 'mongo_master', - 'processing-cluster-kafka-1', - 'flink-processing-cluster-kafka' - ] + - name: Print service status + debug: + msg: "{{ item.key }} Service Status: {{ 'running' if ansible_services[item.key]['state'] == 'running' else 'not running' }}" + loop: "{{ ansible_services.items() }}" + when: item.key in [ + 'keycloak', + 'cassandra', + 'neo4j', + 'elasticsearch', + 'redis', + 'kafka', + 'tomcat', + 'influxdb', + 'postgresql', + 'survey-sl-mongodb', + 'processing-cluster-kafka-1', + 'flink-processing-cluster-kafka' + ] From 0bfbd5ad74748ae412fa06915767bf4aeba9e668 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 22 Feb 2024 16:43:01 +0530 Subject: [PATCH 315/616] Update check_services.yml Reverted the Changes --- ansible/check_services.yml | 179 ++++++++++++++++++++++++++++++++----- 1 file changed, 155 insertions(+), 24 deletions(-) diff --git a/ansible/check_services.yml b/ansible/check_services.yml index e78bec0fab..6301078d02 100644 --- a/ansible/check_services.yml +++ b/ansible/check_services.yml @@ -1,26 +1,157 @@ - name: Check Service Status - hosts: all - gather_facts: yes - + hosts: + - keycloak + - cassandra + - lp-neo4j-ps + - composite-search-cluster + - lp-redis-ps + - kafka + - lp-learning-ps + - dp-influx-ps + - postgresql-master-1 + - mongo_master + - processing-cluster-kafka-1 + - flink-processing-cluster-kafka + tasks: - - name: Gather service facts - ansible.builtin.service_facts: - - - name: Print service status - debug: - msg: "{{ item.key }} Service Status: {{ 'running' if ansible_services[item.key]['state'] == 'running' else 'not running' }}" - loop: "{{ ansible_services.items() }}" - when: item.key in [ - 'keycloak', - 'cassandra', - 'neo4j', - 'elasticsearch', - 'redis', - 'kafka', - 'tomcat', - 'influxdb', - 'postgresql', - 'survey-sl-mongodb', - 'processing-cluster-kafka-1', - 'flink-processing-cluster-kafka' - ] + - name: Check keycloak service status + service: + name: keycloak + state: started + register: keycloak_status + when: inventory_hostname == "keycloak" + + - name: Print keycloak service status + debug: + msg: "Keycloak Service Status: {{ 'running' if keycloak_status is succeeded else 'not running' }}" + when: inventory_hostname == "keycloak" + + - name: Check Cassandra service status + service: + name: cassandra + state: started + register: cassandra_status + when: inventory_hostname == "cassandra" + + - name: Print Cassandra service status + debug: + msg: "Cassandra Service Status: {{ 'running' if cassandra_status is succeeded else 'not running' }}" + when: inventory_hostname == "cassandra" + + - name: Check Neo4j Service Status + command: "/home/learning/neo4j-learning/neo4j-community-3.3.9/bin/neo4j status" + register: neo4j_status + changed_when: false + become: true + when: inventory_hostname == "lp-neo4j-ps" + + - name: Print Neo4j service status + debug: + msg: "Neo4j Service Status: {{ 'running' if neo4j_status.rc == 0 else 'not running' }}" + when: inventory_hostname == "lp-neo4j-ps" + + - name: Check Elasticsearch Port + command: "nc -z -v -w 2 {{ inventory_hostname }} 9200" + ignore_errors: yes + register: elasticsearch_status + when: inventory_hostname == "composite-search-cluster" + + - name: Print Elasticsearch service status + debug: + msg: "Elasticsearch Service Status: {{ 'running' if elasticsearch_status.rc == 0 else 'not running' }}" + when: inventory_hostname == "composite-search-cluster" + + - name: Check redis service status + service: + name: redis + state: started + register: redis_status + when: inventory_hostname == "lp-redis-ps" + + - name: Print Redis service status + debug: + msg: "Redis Service Status: {{ 'running' if redis_status is succeeded else 'not running' }}" + when: inventory_hostname == "lp-redis-ps" + + - name: Check kafka service status + service: + name: kafka + state: started + register: kafka_status + when: inventory_hostname == "kafka" + + - name: Print Kafka service status + debug: + msg: "Kafka Service Status: {{ 'running' if kafka_status is succeeded else 'not running' }}" + when: inventory_hostname == "kafka" + + - name: Check Tomcat Port (learning VM) + command: "nc -z -v -w 2 {{ inventory_hostname }} 8080" + ignore_errors: yes + register: tomcat_status + when: inventory_hostname == "lp-learning-ps" + + - name: Print Tomcat service status + debug: + msg: "Tomcat Service Status: {{ 'running' if tomcat_status.rc == 0 else 'not running' }}" + when: inventory_hostname == "lp-learning-ps" + + - name: Check systemctl status influxdb + service: + name: influxdb + state: started + register: influxdb_status + when: inventory_hostname == "dp-influx-ps" + + - name: Print InfluxDB service status + debug: + msg: "InfluxDB Service Status: {{ 'running' if influxdb_status is succeeded else 'not running' }}" + when: inventory_hostname == "dp-influx-ps" + + - name: Check systemctl status postgresql + service: + name: postgresql + state: started + register: postgresql_status + when: inventory_hostname == "postgresql-master-1" + + - name: Print PostgreSQL service status + debug: + msg: "PostgreSQL Service Status: {{ 'running' if postgresql_status is succeeded else 'not running' }}" + when: inventory_hostname == "postgresql-master-1" + + - name: Check systemctl status survey-sl-mongodb + service: + name: survey-sl-mongodb + state: started + register: survey_sl_mongodb_status + when: inventory_hostname == "mongo_master" + + - name: Print MongoDB service status + debug: + msg: "MongoDB Service Status: {{ 'running' if survey_sl_mongodb_status is succeeded else 'not running' }}" + when: inventory_hostname == "mongo_master" + + - name: Check systemctl status processing-cluster-kafka-1 + service: + name: processing-cluster-kafka-1 + state: started + register: processing_cluster_kafka1_status + when: inventory_hostname == "processing-cluster-kafka-1" + + - name: Print Processing Cluster Kafka-1 service status + debug: + msg: "Processing Cluster Kafka-1 Service Status: {{ 'running' if processing_cluster_kafka1_status is succeeded else 'not running' }}" + when: inventory_hostname == "processing-cluster-kafka-1" + + - name: Check systemctl status flink-processing-cluster-kafka + service: + name: flink-processing-cluster-kafka + state: started + register: flink_processing_cluster_kafka_status + when: inventory_hostname == "flink-processing-cluster-kafka" + + - name: Print Flink Processing Cluster Kafka service status + debug: + msg: "Flink Processing Cluster Kafka Service Status: {{ 'running' if flink_processing_cluster_kafka_status is succeeded else 'not running' }}" + when: inventory_hostname == "flink-processing-cluster-kafka" From 0e96f708e30eaa8b20fd372fbde9a0e39bc962f2 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 22 Feb 2024 19:03:01 +0530 Subject: [PATCH 316/616] Update check_services.yml Check_services changes --- ansible/check_services.yml | 129 +++++++++++++------------------------ 1 file changed, 45 insertions(+), 84 deletions(-) diff --git a/ansible/check_services.yml b/ansible/check_services.yml index 6301078d02..7843d17465 100644 --- a/ansible/check_services.yml +++ b/ansible/check_services.yml @@ -1,157 +1,118 @@ -- name: Check Service Status - hosts: - - keycloak - - cassandra - - lp-neo4j-ps - - composite-search-cluster - - lp-redis-ps - - kafka - - lp-learning-ps - - dp-influx-ps - - postgresql-master-1 - - mongo_master - - processing-cluster-kafka-1 - - flink-processing-cluster-kafka - +- hosts: keycloak tasks: - name: Check keycloak service status service: name: keycloak state: started register: keycloak_status - when: inventory_hostname == "keycloak" - - name: Print keycloak service status + - name: Print Keycloak service status debug: - msg: "Keycloak Service Status: {{ 'running' if keycloak_status is succeeded else 'not running' }}" - when: inventory_hostname == "keycloak" + msg: "Keycloak Service Status: {{ keycloak_status.state }}" +- hosts: cassandra + tasks: - name: Check Cassandra service status service: name: cassandra state: started register: cassandra_status - when: inventory_hostname == "cassandra" - + - name: Print Cassandra service status debug: - msg: "Cassandra Service Status: {{ 'running' if cassandra_status is succeeded else 'not running' }}" - when: inventory_hostname == "cassandra" + msg: "Cassandra Service Status: {{ cassandra_status.state }}" +- hosts: lp-neo4j-ps + tasks: - name: Check Neo4j Service Status command: "/home/learning/neo4j-learning/neo4j-community-3.3.9/bin/neo4j status" - register: neo4j_status + register: service_status changed_when: false become: true - when: inventory_hostname == "lp-neo4j-ps" - name: Print Neo4j service status debug: - msg: "Neo4j Service Status: {{ 'running' if neo4j_status.rc == 0 else 'not running' }}" - when: inventory_hostname == "lp-neo4j-ps" + msg: "Neo4j Service Status: {{ service_status.stdout }}" +- hosts: composite-search-cluster + tasks: - name: Check Elasticsearch Port command: "nc -z -v -w 2 {{ inventory_hostname }} 9200" ignore_errors: yes - register: elasticsearch_status - when: inventory_hostname == "composite-search-cluster" - - - name: Print Elasticsearch service status - debug: - msg: "Elasticsearch Service Status: {{ 'running' if elasticsearch_status.rc == 0 else 'not running' }}" - when: inventory_hostname == "composite-search-cluster" + register: nc_result +- hosts: lp-redis-ps + tasks: - name: Check redis service status service: name: redis state: started register: redis_status - when: inventory_hostname == "lp-redis-ps" - name: Print Redis service status debug: - msg: "Redis Service Status: {{ 'running' if redis_status is succeeded else 'not running' }}" - when: inventory_hostname == "lp-redis-ps" + msg: "Redis Service Status: {{ redis_status.state }}" +- hosts: kafka + tasks: - name: Check kafka service status service: name: kafka state: started register: kafka_status - when: inventory_hostname == "kafka" - name: Print Kafka service status debug: - msg: "Kafka Service Status: {{ 'running' if kafka_status is succeeded else 'not running' }}" - when: inventory_hostname == "kafka" + msg: "Kafka Service Status: {{ kafka_status.state }}" +- hosts: lp-learning-ps + tasks: - name: Check Tomcat Port (learning VM) command: "nc -z -v -w 2 {{ inventory_hostname }} 8080" ignore_errors: yes - register: tomcat_status - when: inventory_hostname == "lp-learning-ps" + register: nc_tomcat_result - - name: Print Tomcat service status - debug: - msg: "Tomcat Service Status: {{ 'running' if tomcat_status.rc == 0 else 'not running' }}" - when: inventory_hostname == "lp-learning-ps" + # Add debug task here if needed for Tomcat service status +# - hosts: internal-proxy +# tasks: +# - name: Check Kong Port (Internal proxy) +# command: "nc -z -v -w 2 {{ inventory_hostname }} 8000" +# ignore_errors: yes +# register: nc_kong_result + +- hosts: dp-influx-ps + tasks: - name: Check systemctl status influxdb service: name: influxdb state: started register: influxdb_status - when: inventory_hostname == "dp-influx-ps" - + - name: Print InfluxDB service status debug: - msg: "InfluxDB Service Status: {{ 'running' if influxdb_status is succeeded else 'not running' }}" - when: inventory_hostname == "dp-influx-ps" + msg: "InfluxDB Service Status: {{ influxdb_status.state }}" +- hosts: postgresql-master-1 + tasks: - name: Check systemctl status postgresql service: name: postgresql state: started - register: postgresql_status - when: inventory_hostname == "postgresql-master-1" + register: postgresql_status - name: Print PostgreSQL service status debug: - msg: "PostgreSQL Service Status: {{ 'running' if postgresql_status is succeeded else 'not running' }}" - when: inventory_hostname == "postgresql-master-1" + msg: "PostgreSQL Service Status: {{ postgresql_status.state }}" - - name: Check systemctl status survey-sl-mongodb +- hosts: mongo_master + tasks: + - name: Check systemctl status mongod service: - name: survey-sl-mongodb + name: mongod state: started - register: survey_sl_mongodb_status - when: inventory_hostname == "mongo_master" + register: mongod_status - name: Print MongoDB service status debug: - msg: "MongoDB Service Status: {{ 'running' if survey_sl_mongodb_status is succeeded else 'not running' }}" - when: inventory_hostname == "mongo_master" - - - name: Check systemctl status processing-cluster-kafka-1 - service: - name: processing-cluster-kafka-1 - state: started - register: processing_cluster_kafka1_status - when: inventory_hostname == "processing-cluster-kafka-1" - - - name: Print Processing Cluster Kafka-1 service status - debug: - msg: "Processing Cluster Kafka-1 Service Status: {{ 'running' if processing_cluster_kafka1_status is succeeded else 'not running' }}" - when: inventory_hostname == "processing-cluster-kafka-1" - - - name: Check systemctl status flink-processing-cluster-kafka - service: - name: flink-processing-cluster-kafka - state: started - register: flink_processing_cluster_kafka_status - when: inventory_hostname == "flink-processing-cluster-kafka" - - - name: Print Flink Processing Cluster Kafka service status - debug: - msg: "Flink Processing Cluster Kafka Service Status: {{ 'running' if flink_processing_cluster_kafka_status is succeeded else 'not running' }}" - when: inventory_hostname == "flink-processing-cluster-kafka" + msg: "MongoDB Service Status: {{ mongod_status.state }}" From 918c84da2ef572b290e5d6ddd2042de9895da69a Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 22 Feb 2024 19:15:52 +0530 Subject: [PATCH 317/616] Update check_services.yml Modified check_services From 00d5d140f726b1234eae4bf5eb7270c234edf530 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 23 Feb 2024 20:32:47 +0530 Subject: [PATCH 318/616] Update check_pods_status.yml Check_status Test --- ansible/check_pods_status.yml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/ansible/check_pods_status.yml b/ansible/check_pods_status.yml index 0247937f64..af2085a8cc 100644 --- a/ansible/check_pods_status.yml +++ b/ansible/check_pods_status.yml @@ -5,18 +5,18 @@ environment: KUBECONFIG: "{{ kubeconfig_path }}" tasks: - - name: Check Non-Running Pods in Namespaces - shell: kubectl get pods -A | grep -v Running - register: non_running_pods + - name: Get Pod Statuses + shell: kubectl get pods --all-namespaces --field-selector=status.phase!=Running,status.phase!=Succeeded --field-selector='status.containerStatuses[*].state.waiting.reason!=PodInitializing' + register: pod_status changed_when: false failed_when: false - - name: Parse Non-Running Pods + - name: Parse Pod Statuses set_fact: - non_running_pod_lines: "{{ non_running_pods.stdout_lines }}" - when: non_running_pods.stdout_lines | length > 0 + pod_lines: "{{ pod_status.stdout_lines }}" + when: pod_status.stdout_lines | length > 0 - - name: Print Non-Running Pods + - name: Print Pod Statuses debug: - msg: "{{ non_running_pod_lines | join('\n') }}" - when: non_running_pod_lines is defined + msg: "{{ pod_lines | join('\n') }}" + when: pod_lines is defined From 1de6bc24a12afaba97d67ae8b9e328ab6402e448 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 23 Feb 2024 20:39:05 +0530 Subject: [PATCH 319/616] Update check_pods_status.yml Test --- ansible/check_pods_status.yml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/ansible/check_pods_status.yml b/ansible/check_pods_status.yml index af2085a8cc..f675762f3b 100644 --- a/ansible/check_pods_status.yml +++ b/ansible/check_pods_status.yml @@ -5,18 +5,18 @@ environment: KUBECONFIG: "{{ kubeconfig_path }}" tasks: - - name: Get Pod Statuses - shell: kubectl get pods --all-namespaces --field-selector=status.phase!=Running,status.phase!=Succeeded --field-selector='status.containerStatuses[*].state.waiting.reason!=PodInitializing' - register: pod_status + - name: Check Non-Running Pods in Namespaces + shell: kubectl get pods --all-namespaces --field-selector=status.phase!=Succeeded --field-selector='status.containerStatuses[*].state.waiting.reason!=PodInitializing' | grep -v Running + register: non_running_pods changed_when: false failed_when: false - - name: Parse Pod Statuses + - name: Parse Non-Running Pods set_fact: - pod_lines: "{{ pod_status.stdout_lines }}" - when: pod_status.stdout_lines | length > 0 + non_running_pod_lines: "{{ non_running_pods.stdout_lines }}" + when: non_running_pods.stdout_lines | length > 0 - - name: Print Pod Statuses + - name: Print Non-Running Pods debug: - msg: "{{ pod_lines | join('\n') }}" - when: pod_lines is defined + msg: "{{ non_running_pod_lines | join('\n') }}" + when: non_running_pod_lines is defined From d846809552f1d9b9bdca2b3f7c94d4692d97f0bb Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 23 Feb 2024 20:40:21 +0530 Subject: [PATCH 320/616] Update check_pods_status.yml Test --- ansible/check_pods_status.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/check_pods_status.yml b/ansible/check_pods_status.yml index f675762f3b..0247937f64 100644 --- a/ansible/check_pods_status.yml +++ b/ansible/check_pods_status.yml @@ -6,7 +6,7 @@ KUBECONFIG: "{{ kubeconfig_path }}" tasks: - name: Check Non-Running Pods in Namespaces - shell: kubectl get pods --all-namespaces --field-selector=status.phase!=Succeeded --field-selector='status.containerStatuses[*].state.waiting.reason!=PodInitializing' | grep -v Running + shell: kubectl get pods -A | grep -v Running register: non_running_pods changed_when: false failed_when: false From 2cd7097bd13a745d8386ebf1a178bea096024a82 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Sat, 24 Feb 2024 13:46:01 +0530 Subject: [PATCH 321/616] Update Jenkinsfile Adding mail id for notification testing --- pipelines/check-service-status/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipelines/check-service-status/Jenkinsfile b/pipelines/check-service-status/Jenkinsfile index 07c1395be2..5e3d086c0e 100644 --- a/pipelines/check-service-status/Jenkinsfile +++ b/pipelines/check-service-status/Jenkinsfile @@ -45,6 +45,6 @@ node() { } finally { slack_notify(currentBuild.result) - email_notify() + email_notify("bijesh.kashyap@trigyn.com") } } From d28fcda400cd033cdfe24ce78f90eb70e0b79dab Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Sat, 24 Feb 2024 14:24:05 +0530 Subject: [PATCH 322/616] Update Jenkinsfile removing my mail id for testing --- pipelines/check-service-status/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipelines/check-service-status/Jenkinsfile b/pipelines/check-service-status/Jenkinsfile index 5e3d086c0e..07c1395be2 100644 --- a/pipelines/check-service-status/Jenkinsfile +++ b/pipelines/check-service-status/Jenkinsfile @@ -45,6 +45,6 @@ node() { } finally { slack_notify(currentBuild.result) - email_notify("bijesh.kashyap@trigyn.com") + email_notify() } } From bb21b44cb99751cf2428641d122d47711025a2c2 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 28 Feb 2024 10:10:06 +0530 Subject: [PATCH 323/616] Update check_pods_status.yml Test --- ansible/check_pods_status.yml | 47 +++++++++++++++++++++++++++++------ 1 file changed, 40 insertions(+), 7 deletions(-) diff --git a/ansible/check_pods_status.yml b/ansible/check_pods_status.yml index 0247937f64..4d3f6191cc 100644 --- a/ansible/check_pods_status.yml +++ b/ansible/check_pods_status.yml @@ -1,4 +1,3 @@ ---- - name: Check Pod Status in All Namespaces hosts: localhost gather_facts: no @@ -10,13 +9,47 @@ register: non_running_pods changed_when: false failed_when: false + ignore_errors: true - - name: Parse Non-Running Pods + - name: Segregate Non-Running Pods set_fact: - non_running_pod_lines: "{{ non_running_pods.stdout_lines }}" - when: non_running_pods.stdout_lines | length > 0 + error_pods: [] + container_unknown_pods: [] + crashloop_backoff_pods: [] + unknown_pods: [] + when: non_running_pods.stdout != "" - - name: Print Non-Running Pods + - name: Extract Pod Status + set_fact: + pod_status: "{{ item.split()[2] }}" + loop: "{{ non_running_pods.stdout_lines }}" + when: item.split() | length > 2 + + - name: Group Pods by Status + set_fact: + error_pods: "{{ error_pods + [item] }}" + loop: "{{ pod_status }}" + when: item == 'Error' + + - set_fact: + container_unknown_pods: "{{ container_unknown_pods + [item] }}" + loop: "{{ pod_status }}" + when: item == 'ContainerCreating' or item == 'ContainerCreating,' or item == 'ContainerCreating,' + + - set_fact: + crashloop_backoff_pods: "{{ crashloop_backoff_pods + [item] }}" + loop: "{{ pod_status }}" + when: item == 'CrashLoopBackOff' + + - set_fact: + unknown_pods: "{{ unknown_pods + [item] }}" + loop: "{{ pod_status }}" + when: item not in ['Error', 'ContainerCreating', 'CrashLoopBackOff'] + + - name: Print Segregated Non-Running Pods debug: - msg: "{{ non_running_pod_lines | join('\n') }}" - when: non_running_pod_lines is defined + msg: | + Error Pods: {{ error_pods | join(', ') }} + Container Status Unknown Pods: {{ container_unknown_pods | join(', ') }} + CrashLoop Back-Off Pods: {{ crashloop_backoff_pods | join(', ') }} + Unknown Pods: {{ unknown_pods | join(', ') }} From d60f598768524fc9a7d212ac002549bf2bc58a98 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 28 Feb 2024 10:13:12 +0530 Subject: [PATCH 324/616] Update check_pods_status.yml Test --- ansible/check_pods_status.yml | 41 +++++++++++++++++++---------------- 1 file changed, 22 insertions(+), 19 deletions(-) diff --git a/ansible/check_pods_status.yml b/ansible/check_pods_status.yml index 4d3f6191cc..dd088e69fb 100644 --- a/ansible/check_pods_status.yml +++ b/ansible/check_pods_status.yml @@ -21,30 +21,33 @@ - name: Extract Pod Status set_fact: - pod_status: "{{ item.split()[2] }}" - loop: "{{ non_running_pods.stdout_lines }}" - when: item.split() | length > 2 + pod_statuses: "{{ non_running_pods.stdout_lines | map(attribute='split') | map('last') }}" + when: non_running_pods.stdout_lines | length > 0 - name: Group Pods by Status - set_fact: - error_pods: "{{ error_pods + [item] }}" - loop: "{{ pod_status }}" - when: item == 'Error' + loop: "{{ pod_statuses }}" + loop_control: + loop_var: pod_status + block: + - set_fact: + error_pods: "{{ error_pods + [item] }}" + loop: "{{ pod_status }}" + when: item == 'Error' - - set_fact: - container_unknown_pods: "{{ container_unknown_pods + [item] }}" - loop: "{{ pod_status }}" - when: item == 'ContainerCreating' or item == 'ContainerCreating,' or item == 'ContainerCreating,' + - set_fact: + container_unknown_pods: "{{ container_unknown_pods + [item] }}" + loop: "{{ pod_status }}" + when: item == 'ContainerCreating' or item == 'ContainerCreating,' or item == 'ContainerCreating,' - - set_fact: - crashloop_backoff_pods: "{{ crashloop_backoff_pods + [item] }}" - loop: "{{ pod_status }}" - when: item == 'CrashLoopBackOff' + - set_fact: + crashloop_backoff_pods: "{{ crashloop_backoff_pods + [item] }}" + loop: "{{ pod_status }}" + when: item == 'CrashLoopBackOff' - - set_fact: - unknown_pods: "{{ unknown_pods + [item] }}" - loop: "{{ pod_status }}" - when: item not in ['Error', 'ContainerCreating', 'CrashLoopBackOff'] + - set_fact: + unknown_pods: "{{ unknown_pods + [item] }}" + loop: "{{ pod_status }}" + when: item not in ['Error', 'ContainerCreating', 'CrashLoopBackOff'] - name: Print Segregated Non-Running Pods debug: From 8d743b79b0cefd92f3e823a8a3a84935498fe4aa Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 28 Feb 2024 10:14:59 +0530 Subject: [PATCH 325/616] Update check_pods_status.yml Loop directly over list of the pod status --- ansible/check_pods_status.yml | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/ansible/check_pods_status.yml b/ansible/check_pods_status.yml index dd088e69fb..e8c40c3853 100644 --- a/ansible/check_pods_status.yml +++ b/ansible/check_pods_status.yml @@ -25,29 +25,27 @@ when: non_running_pods.stdout_lines | length > 0 - name: Group Pods by Status - loop: "{{ pod_statuses }}" - loop_control: - loop_var: pod_status block: - - set_fact: + - name: Loop over Pod Statuses + set_fact: error_pods: "{{ error_pods + [item] }}" - loop: "{{ pod_status }}" - when: item == 'Error' + loop: "{{ pod_statuses }}" + when: "'Error' in item" - set_fact: container_unknown_pods: "{{ container_unknown_pods + [item] }}" - loop: "{{ pod_status }}" - when: item == 'ContainerCreating' or item == 'ContainerCreating,' or item == 'ContainerCreating,' + loop: "{{ pod_statuses }}" + when: "'ContainerCreating' in item or 'ContainerCreating,' in item" - set_fact: crashloop_backoff_pods: "{{ crashloop_backoff_pods + [item] }}" - loop: "{{ pod_status }}" - when: item == 'CrashLoopBackOff' + loop: "{{ pod_statuses }}" + when: "'CrashLoopBackOff' in item" - set_fact: unknown_pods: "{{ unknown_pods + [item] }}" - loop: "{{ pod_status }}" - when: item not in ['Error', 'ContainerCreating', 'CrashLoopBackOff'] + loop: "{{ pod_statuses }}" + when: "'Error' not in item and 'ContainerCreating' not in item and 'CrashLoopBackOff' not in item" - name: Print Segregated Non-Running Pods debug: From 74abbe0b7eddd870e08b26038d7bcc554862f29d Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 28 Feb 2024 10:20:10 +0530 Subject: [PATCH 326/616] Update check_pods_status.yml List Added --- ansible/check_pods_status.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/check_pods_status.yml b/ansible/check_pods_status.yml index e8c40c3853..30d1191dc1 100644 --- a/ansible/check_pods_status.yml +++ b/ansible/check_pods_status.yml @@ -21,7 +21,7 @@ - name: Extract Pod Status set_fact: - pod_statuses: "{{ non_running_pods.stdout_lines | map(attribute='split') | map('last') }}" + pod_statuses: "{{ non_running_pods.stdout_lines | map(attribute='split') | map('last') | list }}" when: non_running_pods.stdout_lines | length > 0 - name: Group Pods by Status From c72b7ba3939df37738a4b7a1a35cc26039e56adc Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 28 Feb 2024 10:23:35 +0530 Subject: [PATCH 327/616] Update check_pods_status.yml regex_replace added --- ansible/check_pods_status.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/check_pods_status.yml b/ansible/check_pods_status.yml index 30d1191dc1..eab3b9caf4 100644 --- a/ansible/check_pods_status.yml +++ b/ansible/check_pods_status.yml @@ -21,7 +21,7 @@ - name: Extract Pod Status set_fact: - pod_statuses: "{{ non_running_pods.stdout_lines | map(attribute='split') | map('last') | list }}" + pod_statuses: "{{ non_running_pods.stdout_lines | map('regex_replace', '^.*\\s+(\\S+)\\s*$', '\\1') | list }}" when: non_running_pods.stdout_lines | length > 0 - name: Group Pods by Status From cd241a5b05c1ea30d22726491fdd6cfe002bb418 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 28 Feb 2024 10:31:29 +0530 Subject: [PATCH 328/616] Update check_pods_status.yml Test --- ansible/check_pods_status.yml | 93 ++++++++++++++++++++++++----------- 1 file changed, 65 insertions(+), 28 deletions(-) diff --git a/ansible/check_pods_status.yml b/ansible/check_pods_status.yml index eab3b9caf4..c49c63e290 100644 --- a/ansible/check_pods_status.yml +++ b/ansible/check_pods_status.yml @@ -5,52 +5,89 @@ KUBECONFIG: "{{ kubeconfig_path }}" tasks: - name: Check Non-Running Pods in Namespaces - shell: kubectl get pods -A | grep -v Running - register: non_running_pods + shell: kubectl get pods -A + register: pod_status_output changed_when: false failed_when: false ignore_errors: true - - name: Segregate Non-Running Pods + - name: Extract Pod Information set_fact: + pod_list: "{{ pod_status_output.stdout_lines[1:] }}" # Skip header line + when: pod_status_output.stdout_lines | length > 1 + + - name: Group Pods by Status + set_fact: + running_pods: [] error_pods: [] - container_unknown_pods: [] + container_status_unknown_pods: [] crashloop_backoff_pods: [] unknown_pods: [] - when: non_running_pods.stdout != "" + when: pod_list is defined - - name: Extract Pod Status + - name: Loop through pod list + loop: "{{ pod_list }}" set_fact: - pod_statuses: "{{ non_running_pods.stdout_lines | map('regex_replace', '^.*\\s+(\\S+)\\s*$', '\\1') | list }}" - when: non_running_pods.stdout_lines | length > 0 + pod_info: "{{ item.split() }}" + loop_control: + label: "{{ item }}" + when: item | length > 0 - - name: Group Pods by Status + - name: Categorize Pods + loop: "{{ pod_list }}" + loop_control: + label: "{{ item }}" + set_fact: + pod_name: "{{ item.split()[0] }}" + pod_status: "{{ item.split()[2] }}" + pod_restarts: "{{ item.split()[3] }}" + pod_age: "{{ item.split()[4] }}" + when: item | length > 0 block: - - name: Loop over Pod Statuses - set_fact: - error_pods: "{{ error_pods + [item] }}" - loop: "{{ pod_statuses }}" - when: "'Error' in item" + - set_fact: + running_pods: "{{ running_pods + [pod_name] }}" + when: pod_status == 'Running' - set_fact: - container_unknown_pods: "{{ container_unknown_pods + [item] }}" - loop: "{{ pod_statuses }}" - when: "'ContainerCreating' in item or 'ContainerCreating,' in item" + error_pods: "{{ error_pods + [pod_name] }}" + when: pod_status == 'Error' - set_fact: - crashloop_backoff_pods: "{{ crashloop_backoff_pods + [item] }}" - loop: "{{ pod_statuses }}" - when: "'CrashLoopBackOff' in item" + container_status_unknown_pods: "{{ container_status_unknown_pods + [pod_name] }}" + when: pod_status == 'ContainerStatusUnknown' - set_fact: - unknown_pods: "{{ unknown_pods + [item] }}" - loop: "{{ pod_statuses }}" - when: "'Error' not in item and 'ContainerCreating' not in item and 'CrashLoopBackOff' not in item" + crashloop_backoff_pods: "{{ crashloop_backoff_pods + [pod_name] }}" + when: pod_status == 'CrashLoopBackOff' - - name: Print Segregated Non-Running Pods + - set_fact: + unknown_pods: "{{ unknown_pods + [pod_name] }}" + when: pod_status == 'Unknown' + + - name: Print Pod Information debug: msg: | - Error Pods: {{ error_pods | join(', ') }} - Container Status Unknown Pods: {{ container_unknown_pods | join(', ') }} - CrashLoop Back-Off Pods: {{ crashloop_backoff_pods | join(', ') }} - Unknown Pods: {{ unknown_pods | join(', ') }} + Running Pods: + {% for pod in running_pods %} + - {{ pod }} + {% endfor %} + + Error Pods: + {% for pod in error_pods %} + - {{ pod }} + {% endfor %} + + Container Status Unknown Pods: + {% for pod in container_status_unknown_pods %} + - {{ pod }} + {% endfor %} + + CrashLoop Back-Off Pods: + {% for pod in crashloop_backoff_pods %} + - {{ pod }} + {% endfor %} + + Unknown Pods: + {% for pod in unknown_pods %} + - {{ pod }} + {% endfor %} From fa1dadd68f9a9399aa120f5f7db17e1446b57d70 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 28 Feb 2024 10:32:48 +0530 Subject: [PATCH 329/616] Update check_pods_status.yml Test --- ansible/check_pods_status.yml | 56 +++++++++++++++++------------------ 1 file changed, 27 insertions(+), 29 deletions(-) diff --git a/ansible/check_pods_status.yml b/ansible/check_pods_status.yml index c49c63e290..d486e2bf2e 100644 --- a/ansible/check_pods_status.yml +++ b/ansible/check_pods_status.yml @@ -25,44 +25,42 @@ unknown_pods: [] when: pod_list is defined - - name: Loop through pod list + - name: Loop through pod list and categorize pods loop: "{{ pod_list }}" - set_fact: - pod_info: "{{ item.split() }}" loop_control: label: "{{ item }}" when: item | length > 0 - - - name: Categorize Pods - loop: "{{ pod_list }}" - loop_control: - label: "{{ item }}" - set_fact: - pod_name: "{{ item.split()[0] }}" - pod_status: "{{ item.split()[2] }}" - pod_restarts: "{{ item.split()[3] }}" - pod_age: "{{ item.split()[4] }}" - when: item | length > 0 block: - - set_fact: - running_pods: "{{ running_pods + [pod_name] }}" - when: pod_status == 'Running' + - name: Extract Pod Information + set_fact: + pod_info: "{{ item.split() }}" + - name: Categorize Pods + set_fact: + pod_name: "{{ pod_info[0] }}" + pod_status: "{{ pod_info[2] }}" + pod_restarts: "{{ pod_info[3] }}" + pod_age: "{{ pod_info[4] }}" + when: pod_info | length > 0 + block: + - set_fact: + running_pods: "{{ running_pods + [pod_name] }}" + when: pod_status == 'Running' - - set_fact: - error_pods: "{{ error_pods + [pod_name] }}" - when: pod_status == 'Error' + - set_fact: + error_pods: "{{ error_pods + [pod_name] }}" + when: pod_status == 'Error' - - set_fact: - container_status_unknown_pods: "{{ container_status_unknown_pods + [pod_name] }}" - when: pod_status == 'ContainerStatusUnknown' + - set_fact: + container_status_unknown_pods: "{{ container_status_unknown_pods + [pod_name] }}" + when: pod_status == 'ContainerStatusUnknown' - - set_fact: - crashloop_backoff_pods: "{{ crashloop_backoff_pods + [pod_name] }}" - when: pod_status == 'CrashLoopBackOff' + - set_fact: + crashloop_backoff_pods: "{{ crashloop_backoff_pods + [pod_name] }}" + when: pod_status == 'CrashLoopBackOff' - - set_fact: - unknown_pods: "{{ unknown_pods + [pod_name] }}" - when: pod_status == 'Unknown' + - set_fact: + unknown_pods: "{{ unknown_pods + [pod_name] }}" + when: pod_status == 'Unknown' - name: Print Pod Information debug: From b90c5d48f42909a44197ec0b2ec17a06c183b86f Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 28 Feb 2024 10:35:12 +0530 Subject: [PATCH 330/616] Update check_pods_status.yml Test Check pod status --- ansible/check_pods_status.yml | 57 ++++++++--------------------------- 1 file changed, 12 insertions(+), 45 deletions(-) diff --git a/ansible/check_pods_status.yml b/ansible/check_pods_status.yml index d486e2bf2e..f2853800b0 100644 --- a/ansible/check_pods_status.yml +++ b/ansible/check_pods_status.yml @@ -26,66 +26,33 @@ when: pod_list is defined - name: Loop through pod list and categorize pods - loop: "{{ pod_list }}" - loop_control: - label: "{{ item }}" - when: item | length > 0 block: - - name: Extract Pod Information - set_fact: - pod_info: "{{ item.split() }}" - name: Categorize Pods - set_fact: - pod_name: "{{ pod_info[0] }}" - pod_status: "{{ pod_info[2] }}" - pod_restarts: "{{ pod_info[3] }}" - pod_age: "{{ pod_info[4] }}" - when: pod_info | length > 0 + loop: "{{ pod_list }}" + loop_control: + label: "{{ item }}" + when: item | length > 0 block: + - name: Extract Pod Information + set_fact: + pod_info: "{{ item.split() }}" + - set_fact: + pod_name: "{{ pod_info[0] }}" + pod_status: "{{ pod_info[2] }}" + pod_restarts: "{{ pod_info[3] }}" + pod_age: "{{ pod_info[4] }}" - set_fact: running_pods: "{{ running_pods + [pod_name] }}" when: pod_status == 'Running' - - set_fact: error_pods: "{{ error_pods + [pod_name] }}" when: pod_status == 'Error' - - set_fact: container_status_unknown_pods: "{{ container_status_unknown_pods + [pod_name] }}" when: pod_status == 'ContainerStatusUnknown' - - set_fact: crashloop_backoff_pods: "{{ crashloop_backoff_pods + [pod_name] }}" when: pod_status == 'CrashLoopBackOff' - - set_fact: unknown_pods: "{{ unknown_pods + [pod_name] }}" when: pod_status == 'Unknown' - - - name: Print Pod Information - debug: - msg: | - Running Pods: - {% for pod in running_pods %} - - {{ pod }} - {% endfor %} - - Error Pods: - {% for pod in error_pods %} - - {{ pod }} - {% endfor %} - - Container Status Unknown Pods: - {% for pod in container_status_unknown_pods %} - - {{ pod }} - {% endfor %} - - CrashLoop Back-Off Pods: - {% for pod in crashloop_backoff_pods %} - - {{ pod }} - {% endfor %} - - Unknown Pods: - {% for pod in unknown_pods %} - - {{ pod }} - {% endfor %} From 1ae61476dfd53c4b11e11ebd14b046e2dc531fd9 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 28 Feb 2024 10:36:57 +0530 Subject: [PATCH 331/616] Update check_pods_status.yml Test --- ansible/check_pods_status.yml | 86 ++++++++++++++++++----------------- 1 file changed, 45 insertions(+), 41 deletions(-) diff --git a/ansible/check_pods_status.yml b/ansible/check_pods_status.yml index f2853800b0..f9b642856c 100644 --- a/ansible/check_pods_status.yml +++ b/ansible/check_pods_status.yml @@ -5,54 +5,58 @@ KUBECONFIG: "{{ kubeconfig_path }}" tasks: - name: Check Non-Running Pods in Namespaces - shell: kubectl get pods -A - register: pod_status_output + shell: kubectl get pods -A | grep -v Running + register: non_running_pods changed_when: false failed_when: false ignore_errors: true - - name: Extract Pod Information + - name: Segregate Non-Running Pods set_fact: - pod_list: "{{ pod_status_output.stdout_lines[1:] }}" # Skip header line - when: pod_status_output.stdout_lines | length > 1 - - - name: Group Pods by Status - set_fact: - running_pods: [] error_pods: [] - container_status_unknown_pods: [] + container_unknown_pods: [] crashloop_backoff_pods: [] unknown_pods: [] - when: pod_list is defined + when: non_running_pods.stdout != "" - - name: Loop through pod list and categorize pods + - name: Extract Pod Status + set_fact: + pod_status: "{{ item.split()[2] }}" + loop: "{{ non_running_pods.stdout_lines }}" + when: item.split() | length > 2 + + - name: Group Pods by Status block: - - name: Categorize Pods - loop: "{{ pod_list }}" - loop_control: - label: "{{ item }}" - when: item | length > 0 - block: - - name: Extract Pod Information - set_fact: - pod_info: "{{ item.split() }}" - - set_fact: - pod_name: "{{ pod_info[0] }}" - pod_status: "{{ pod_info[2] }}" - pod_restarts: "{{ pod_info[3] }}" - pod_age: "{{ pod_info[4] }}" - - set_fact: - running_pods: "{{ running_pods + [pod_name] }}" - when: pod_status == 'Running' - - set_fact: - error_pods: "{{ error_pods + [pod_name] }}" - when: pod_status == 'Error' - - set_fact: - container_status_unknown_pods: "{{ container_status_unknown_pods + [pod_name] }}" - when: pod_status == 'ContainerStatusUnknown' - - set_fact: - crashloop_backoff_pods: "{{ crashloop_backoff_pods + [pod_name] }}" - when: pod_status == 'CrashLoopBackOff' - - set_fact: - unknown_pods: "{{ unknown_pods + [pod_name] }}" - when: pod_status == 'Unknown' + - name: Error Pods + set_fact: + error_pods: "{{ error_pods + [item] }}" + loop: "{{ pod_status }}" + when: item == 'Error' + + - name: Container Status Unknown Pods + set_fact: + container_unknown_pods: "{{ container_unknown_pods + [item] }}" + loop: "{{ pod_status }}" + when: item == 'ContainerCreating' or item == 'ContainerCreating,' or item == 'ContainerCreating,' + + - name: CrashLoop Back-Off Pods + set_fact: + crashloop_backoff_pods: "{{ crashloop_backoff_pods + [item] }}" + loop: "{{ pod_status }}" + when: item == 'CrashLoopBackOff' + + - name: Unknown Pods + set_fact: + unknown_pods: "{{ unknown_pods + [item] }}" + loop: "{{ pod_status }}" + when: item not in ['Error', 'ContainerCreating', 'CrashLoopBackOff'] + + when: pod_status | length > 0 + + - name: Print Segregated Non-Running Pods + debug: + msg: | + Error Pods: {{ error_pods | join(', ') }} + Container Status Unknown Pods: {{ container_unknown_pods | join(', ') }} + CrashLoop Back-Off Pods: {{ crashloop_backoff_pods | join(', ') }} + Unknown Pods: {{ unknown_pods | join(', ') }} From 49434bb866cfefe112c456beca3dafc550697127 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 28 Feb 2024 11:44:49 +0530 Subject: [PATCH 332/616] Update check_pods_status.yml Test --- ansible/check_pods_status.yml | 74 ++++++++++++----------------------- 1 file changed, 24 insertions(+), 50 deletions(-) diff --git a/ansible/check_pods_status.yml b/ansible/check_pods_status.yml index f9b642856c..5fdfb7f48d 100644 --- a/ansible/check_pods_status.yml +++ b/ansible/check_pods_status.yml @@ -1,62 +1,36 @@ +--- - name: Check Pod Status in All Namespaces hosts: localhost gather_facts: no environment: KUBECONFIG: "{{ kubeconfig_path }}" tasks: - - name: Check Non-Running Pods in Namespaces - shell: kubectl get pods -A | grep -v Running - register: non_running_pods + - name: Get all namespaces + shell: kubectl get ns -o jsonpath='{.items[*].metadata.name}' + register: namespaces_output changed_when: false - failed_when: false - ignore_errors: true - - name: Segregate Non-Running Pods + - name: Loop through each namespace set_fact: - error_pods: [] - container_unknown_pods: [] - crashloop_backoff_pods: [] - unknown_pods: [] - when: non_running_pods.stdout != "" - - - name: Extract Pod Status - set_fact: - pod_status: "{{ item.split()[2] }}" - loop: "{{ non_running_pods.stdout_lines }}" - when: item.split() | length > 2 - - - name: Group Pods by Status - block: - - name: Error Pods - set_fact: - error_pods: "{{ error_pods + [item] }}" - loop: "{{ pod_status }}" - when: item == 'Error' - - - name: Container Status Unknown Pods - set_fact: - container_unknown_pods: "{{ container_unknown_pods + [item] }}" - loop: "{{ pod_status }}" - when: item == 'ContainerCreating' or item == 'ContainerCreating,' or item == 'ContainerCreating,' - - - name: CrashLoop Back-Off Pods - set_fact: - crashloop_backoff_pods: "{{ crashloop_backoff_pods + [item] }}" - loop: "{{ pod_status }}" - when: item == 'CrashLoopBackOff' - - - name: Unknown Pods - set_fact: - unknown_pods: "{{ unknown_pods + [item] }}" - loop: "{{ pod_status }}" - when: item not in ['Error', 'ContainerCreating', 'CrashLoopBackOff'] - - when: pod_status | length > 0 + all_namespaces: "{{ namespaces_output.stdout.split() }}" + loop: "{{ all_namespaces }}" + register: loop_result + + - name: Check Pods in each namespace + shell: kubectl get pods -n {{ item }} --no-headers=true + loop: "{{ all_namespaces }}" + register: pods_by_namespace + changed_when: false + failed_when: pods_by_namespace.rc != 0 # Adding error handling - - name: Print Segregated Non-Running Pods + - name: Print Pods with specified statuses in each namespace debug: msg: | - Error Pods: {{ error_pods | join(', ') }} - Container Status Unknown Pods: {{ container_unknown_pods | join(', ') }} - CrashLoop Back-Off Pods: {{ crashloop_backoff_pods | join(', ') }} - Unknown Pods: {{ unknown_pods | join(', ') }} + Namespace: {{ item.item }} + {% for pod in item.stdout_lines %} + - {{ pod.split()[0] }}: {{ pod.split()[2] }} ({{ pod.split()[1] }}) + {% endfor %} + loop: "{{ pods_by_namespace.results }}" + vars: + regex: "(CrashLoopBackOff|ContainerStatusUnknown|Error|Unknown|Running)" + when: item.stdout_lines | select('match', regex) | list | length > 0 From 113550f24a074982b9a184cc637287e796877b32 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 28 Feb 2024 11:55:27 +0530 Subject: [PATCH 333/616] Update check_pods_status.yml Original File Added --- ansible/check_pods_status.yml | 34 ++++++++++------------------------ 1 file changed, 10 insertions(+), 24 deletions(-) diff --git a/ansible/check_pods_status.yml b/ansible/check_pods_status.yml index 5fdfb7f48d..0247937f64 100644 --- a/ansible/check_pods_status.yml +++ b/ansible/check_pods_status.yml @@ -5,32 +5,18 @@ environment: KUBECONFIG: "{{ kubeconfig_path }}" tasks: - - name: Get all namespaces - shell: kubectl get ns -o jsonpath='{.items[*].metadata.name}' - register: namespaces_output + - name: Check Non-Running Pods in Namespaces + shell: kubectl get pods -A | grep -v Running + register: non_running_pods changed_when: false + failed_when: false - - name: Loop through each namespace + - name: Parse Non-Running Pods set_fact: - all_namespaces: "{{ namespaces_output.stdout.split() }}" - loop: "{{ all_namespaces }}" - register: loop_result + non_running_pod_lines: "{{ non_running_pods.stdout_lines }}" + when: non_running_pods.stdout_lines | length > 0 - - name: Check Pods in each namespace - shell: kubectl get pods -n {{ item }} --no-headers=true - loop: "{{ all_namespaces }}" - register: pods_by_namespace - changed_when: false - failed_when: pods_by_namespace.rc != 0 # Adding error handling - - - name: Print Pods with specified statuses in each namespace + - name: Print Non-Running Pods debug: - msg: | - Namespace: {{ item.item }} - {% for pod in item.stdout_lines %} - - {{ pod.split()[0] }}: {{ pod.split()[2] }} ({{ pod.split()[1] }}) - {% endfor %} - loop: "{{ pods_by_namespace.results }}" - vars: - regex: "(CrashLoopBackOff|ContainerStatusUnknown|Error|Unknown|Running)" - when: item.stdout_lines | select('match', regex) | list | length > 0 + msg: "{{ non_running_pod_lines | join('\n') }}" + when: non_running_pod_lines is defined From 9c837dfece4837c15f063ebc81640db31ea74789 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Thu, 7 Mar 2024 15:51:54 +0530 Subject: [PATCH 334/616] Update Jenkinsfile adding mail-id for notification testing --- pipelines/check-service-status/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipelines/check-service-status/Jenkinsfile b/pipelines/check-service-status/Jenkinsfile index 07c1395be2..5e3d086c0e 100644 --- a/pipelines/check-service-status/Jenkinsfile +++ b/pipelines/check-service-status/Jenkinsfile @@ -45,6 +45,6 @@ node() { } finally { slack_notify(currentBuild.result) - email_notify() + email_notify("bijesh.kashyap@trigyn.com") } } From f19bb86bf696448c0032b08385f31e9a2ecc42f5 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Thu, 7 Mar 2024 15:54:55 +0530 Subject: [PATCH 335/616] Update Jenkinsfile --- pipelines/check-service-status/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipelines/check-service-status/Jenkinsfile b/pipelines/check-service-status/Jenkinsfile index 5e3d086c0e..465b3d5f46 100644 --- a/pipelines/check-service-status/Jenkinsfile +++ b/pipelines/check-service-status/Jenkinsfile @@ -45,6 +45,6 @@ node() { } finally { slack_notify(currentBuild.result) - email_notify("bijesh.kashyap@trigyn.com") + email_notify("alertsdiksha@trigyn.com") } } From bbb8cfeedb2d2da03463ac1d9dfed821072f84fb Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Thu, 7 Mar 2024 15:57:09 +0530 Subject: [PATCH 336/616] Update Jenkinsfile removing mail-id (bijesh.kashyap@trigyn.com) --- pipelines/check-service-status/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipelines/check-service-status/Jenkinsfile b/pipelines/check-service-status/Jenkinsfile index 465b3d5f46..07c1395be2 100644 --- a/pipelines/check-service-status/Jenkinsfile +++ b/pipelines/check-service-status/Jenkinsfile @@ -45,6 +45,6 @@ node() { } finally { slack_notify(currentBuild.result) - email_notify("alertsdiksha@trigyn.com") + email_notify() } } From 899746bd59cdacf89368ff7ee8aaa0c570aa3137 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Thu, 14 Mar 2024 13:33:38 +0530 Subject: [PATCH 337/616] Create jenkinsfile created jenkins file for cassandra monitoring --- pipelines/cassandra-monitoring/jenkinsfile | 52 ++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 pipelines/cassandra-monitoring/jenkinsfile diff --git a/pipelines/cassandra-monitoring/jenkinsfile b/pipelines/cassandra-monitoring/jenkinsfile new file mode 100644 index 0000000000..bf791b83c5 --- /dev/null +++ b/pipelines/cassandra-monitoring/jenkinsfile @@ -0,0 +1,52 @@ +@Library('deploy-conf') _ +node() { + try { + timestamps { + String ANSI_GREEN = "\u001B[32m" + String ANSI_NORMAL = "\u001B[0m" + String ANSI_BOLD = "\u001B[1m" + String ANSI_RED = "\u001B[31m" + String ANSI_YELLOW = "\u001B[33m" + + stage('checkout public repo') { + folder = new File("$WORKSPACE/.git") + if (folder.exists()) + { + println "Found .git folder. Clearing it.." + sh'git clean -fxd' + } + checkout scm + } + + ansiColor('xterm') { + stage('deploy'){ + values = [:] + currentWs = sh(returnStdout: true, script: 'pwd').trim() + envDir = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-3].trim() + module = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-2].trim() + jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() + ansiblePlaybook = "${currentWs}/ansible/cassandra-backup.yml" + ansibleExtraArgs = "--vault-password-file /var/lib/jenkins/secrets/vault-pass -vv" + values.put('currentWs', currentWs) + values.put('env', envDir) + values.put('module', module) + values.put('jobName', jobName) + values.put('ansiblePlaybook', ansiblePlaybook) + values.put('ansibleExtraArgs', ansibleExtraArgs) + println values + ansible_playbook_run(values) + currentBuild.result = 'SUCCESS' + currentBuild.description = "Private: ${params.private_branch}, Public: ${params.branch_or_tag}" + } + } + } + } + catch (err) { + currentBuild.result = "FAILURE" + throw err + } + finally { + slack_notify(currentBuild.result) + email_notify() + } +} From 10b72b269d58044ca96ac93ca5a17ba1dd07725b Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Thu, 14 Mar 2024 13:39:47 +0530 Subject: [PATCH 338/616] Create main.yml added host cassandra --- ansible/roles/cassandra-monitoring/tasks/main.yml | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 ansible/roles/cassandra-monitoring/tasks/main.yml diff --git a/ansible/roles/cassandra-monitoring/tasks/main.yml b/ansible/roles/cassandra-monitoring/tasks/main.yml new file mode 100644 index 0000000000..351baacef8 --- /dev/null +++ b/ansible/roles/cassandra-monitoring/tasks/main.yml @@ -0,0 +1,11 @@ +- hosts: cassandra + tasks: + - name: Check Cassandra service status + service: + name: cassandra + state: started + register: cassandra_status + + - name: Print Cassandra service status + debug: + msg: "Cassandra Service Status: {{ cassandra_status.state }}" From e75660704380b3eec3e8e8917509543679f78720 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Thu, 14 Mar 2024 13:41:42 +0530 Subject: [PATCH 339/616] Update jenkinsfile updated ansible path --- pipelines/cassandra-monitoring/jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipelines/cassandra-monitoring/jenkinsfile b/pipelines/cassandra-monitoring/jenkinsfile index bf791b83c5..d0c8feeaf5 100644 --- a/pipelines/cassandra-monitoring/jenkinsfile +++ b/pipelines/cassandra-monitoring/jenkinsfile @@ -25,7 +25,7 @@ node() { envDir = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-3].trim() module = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-2].trim() jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() - ansiblePlaybook = "${currentWs}/ansible/cassandra-backup.yml" + ansiblePlaybook = "${currentWs}/ansible/roles/cassandra-monitoring/tasks/main.yml" ansibleExtraArgs = "--vault-password-file /var/lib/jenkins/secrets/vault-pass -vv" values.put('currentWs', currentWs) values.put('env', envDir) From 09ae707d979a515830c362f49c1bceb21ecf9055 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Thu, 14 Mar 2024 17:06:16 +0530 Subject: [PATCH 340/616] Update jenkinsfile adding host services --- pipelines/cassandra-monitoring/jenkinsfile | 70 +++++++++++----------- 1 file changed, 34 insertions(+), 36 deletions(-) diff --git a/pipelines/cassandra-monitoring/jenkinsfile b/pipelines/cassandra-monitoring/jenkinsfile index d0c8feeaf5..07c1395be2 100644 --- a/pipelines/cassandra-monitoring/jenkinsfile +++ b/pipelines/cassandra-monitoring/jenkinsfile @@ -1,50 +1,48 @@ @Library('deploy-conf') _ node() { try { - timestamps { - String ANSI_GREEN = "\u001B[32m" - String ANSI_NORMAL = "\u001B[0m" - String ANSI_BOLD = "\u001B[1m" - String ANSI_RED = "\u001B[31m" - String ANSI_YELLOW = "\u001B[33m" + String ANSI_GREEN = "\u001B[32m" + String ANSI_NORMAL = "\u001B[0m" + String ANSI_BOLD = "\u001B[1m" + String ANSI_RED = "\u001B[31m" + String ANSI_YELLOW = "\u001B[33m" - stage('checkout public repo') { - folder = new File("$WORKSPACE/.git") - if (folder.exists()) - { - println "Found .git folder. Clearing it.." - sh'git clean -fxd' - } - checkout scm + stage('checkout public repo') { + folder = new File("$WORKSPACE/.git") + if (folder.exists()) + { + println "Found .git folder. Clearing it.." + sh'git clean -fxd' } + checkout scm + } - ansiColor('xterm') { - stage('deploy'){ - values = [:] - currentWs = sh(returnStdout: true, script: 'pwd').trim() - envDir = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-3].trim() - module = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-2].trim() - jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() - ansiblePlaybook = "${currentWs}/ansible/roles/cassandra-monitoring/tasks/main.yml" - ansibleExtraArgs = "--vault-password-file /var/lib/jenkins/secrets/vault-pass -vv" - values.put('currentWs', currentWs) - values.put('env', envDir) - values.put('module', module) - values.put('jobName', jobName) - values.put('ansiblePlaybook', ansiblePlaybook) - values.put('ansibleExtraArgs', ansibleExtraArgs) - println values - ansible_playbook_run(values) - currentBuild.result = 'SUCCESS' - currentBuild.description = "Private: ${params.private_branch}, Public: ${params.branch_or_tag}" - } + ansiColor('xterm') { + stage('deploy'){ + values = [:] + envDir = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-3].trim() + module = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-2].trim() + jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() + currentWs = sh(returnStdout: true, script: 'pwd').trim() + ansiblePlaybook = "${currentWs}/ansible/check_services.yml" + ansibleExtraArgs = "--vault-password-file /var/lib/jenkins/secrets/vault-pass" + values.put('currentWs', currentWs) + values.put('env', envDir) + values.put('module', module) + values.put('jobName', jobName) + values.put('ansiblePlaybook', ansiblePlaybook) + values.put('ansibleExtraArgs', ansibleExtraArgs) + println values + ansible_playbook_run(values) + currentBuild.result = 'SUCCESS' + currentBuild.description = "Private: ${params.private_branch}, Public: ${params.branch_or_tag}" } } } catch (err) { - currentBuild.result = "FAILURE" + currentBuild.result = 'FAILURE' throw err - } + } finally { slack_notify(currentBuild.result) email_notify() From 8b01ddd31e5001c86b2f0cc3df6561e86b740ace Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Thu, 14 Mar 2024 17:29:49 +0530 Subject: [PATCH 341/616] Update and rename jenkinsfile to Jenkinsfile --- pipelines/cassandra-monitoring/{jenkinsfile => Jenkinsfile} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename pipelines/cassandra-monitoring/{jenkinsfile => Jenkinsfile} (100%) diff --git a/pipelines/cassandra-monitoring/jenkinsfile b/pipelines/cassandra-monitoring/Jenkinsfile similarity index 100% rename from pipelines/cassandra-monitoring/jenkinsfile rename to pipelines/cassandra-monitoring/Jenkinsfile From 6162df41d76dfa1c1c0fd3f1eb0c4e2e4ec87783 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Thu, 14 Mar 2024 21:08:50 +0530 Subject: [PATCH 342/616] Create cassandra_monitoring.yml --- ansible/cassandra_monitoring.yml | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 ansible/cassandra_monitoring.yml diff --git a/ansible/cassandra_monitoring.yml b/ansible/cassandra_monitoring.yml new file mode 100644 index 0000000000..351baacef8 --- /dev/null +++ b/ansible/cassandra_monitoring.yml @@ -0,0 +1,11 @@ +- hosts: cassandra + tasks: + - name: Check Cassandra service status + service: + name: cassandra + state: started + register: cassandra_status + + - name: Print Cassandra service status + debug: + msg: "Cassandra Service Status: {{ cassandra_status.state }}" From c5076baa0e64d3b540112f9bafb20ba36619693c Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Thu, 14 Mar 2024 21:12:44 +0530 Subject: [PATCH 343/616] Update Jenkinsfile adding ansible yml path --- pipelines/cassandra-monitoring/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipelines/cassandra-monitoring/Jenkinsfile b/pipelines/cassandra-monitoring/Jenkinsfile index 07c1395be2..8b2241f1e2 100644 --- a/pipelines/cassandra-monitoring/Jenkinsfile +++ b/pipelines/cassandra-monitoring/Jenkinsfile @@ -24,7 +24,7 @@ node() { module = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-2].trim() jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() currentWs = sh(returnStdout: true, script: 'pwd').trim() - ansiblePlaybook = "${currentWs}/ansible/check_services.yml" + ansiblePlaybook = "${currentWs}/ansible/cassandra_monitoring.yml" ansibleExtraArgs = "--vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('currentWs', currentWs) values.put('env', envDir) From c35d1d8d0843163a4a61c688bd08ec604214b198 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Fri, 15 Mar 2024 12:42:39 +0530 Subject: [PATCH 344/616] Update cassandra_monitoring.yml updating backup scripts for cassandra --- ansible/cassandra_monitoring.yml | 38 +++++++++++++++++++++++--------- 1 file changed, 27 insertions(+), 11 deletions(-) diff --git a/ansible/cassandra_monitoring.yml b/ansible/cassandra_monitoring.yml index 351baacef8..04e11eb008 100644 --- a/ansible/cassandra_monitoring.yml +++ b/ansible/cassandra_monitoring.yml @@ -1,11 +1,27 @@ -- hosts: cassandra - tasks: - - name: Check Cassandra service status - service: - name: cassandra - state: started - register: cassandra_status - - - name: Print Cassandra service status - debug: - msg: "Cassandra Service Status: {{ cassandra_status.state }}" +backup_script: + description: "Backup Cassandra syslogs" + script: | + #!/bin/bash + + # Directory containing Cassandra syslogs + log_dir="/var/log/cassandra" + + # Directory to store backups + backup_dir="/path/to/backup/directory" + + # Create backup directory if it doesn't exist + mkdir -p "$backup_dir" + + # Timestamp for the backup file + timestamp=$(date +"%Y%m%d%H%M%S") + + # Backup filename + backup_file="cassandra_syslogs_$timestamp.tar.gz" + + # Change directory to Cassandra log directory + cd "$log_dir" || exit + + # Compress syslog files into a tarball + tar -czf "$backup_dir/$backup_file" . + + echo "Backup completed: $backup_dir/$backup_file" From c05366d10ac5a9d14d76b75670182c943859e298 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Fri, 15 Mar 2024 12:57:13 +0530 Subject: [PATCH 345/616] Update cassandra_monitoring.yml modified backup scripts --- ansible/cassandra_monitoring.yml | 38 +++++++++----------------------- 1 file changed, 11 insertions(+), 27 deletions(-) diff --git a/ansible/cassandra_monitoring.yml b/ansible/cassandra_monitoring.yml index 04e11eb008..fc8d05ae61 100644 --- a/ansible/cassandra_monitoring.yml +++ b/ansible/cassandra_monitoring.yml @@ -1,27 +1,11 @@ -backup_script: - description: "Backup Cassandra syslogs" - script: | - #!/bin/bash - - # Directory containing Cassandra syslogs - log_dir="/var/log/cassandra" - - # Directory to store backups - backup_dir="/path/to/backup/directory" - - # Create backup directory if it doesn't exist - mkdir -p "$backup_dir" - - # Timestamp for the backup file - timestamp=$(date +"%Y%m%d%H%M%S") - - # Backup filename - backup_file="cassandra_syslogs_$timestamp.tar.gz" - - # Change directory to Cassandra log directory - cd "$log_dir" || exit - - # Compress syslog files into a tarball - tar -czf "$backup_dir/$backup_file" . - - echo "Backup completed: $backup_dir/$backup_file" +- hosts: cassandra + tasks: + - name: Check Cassandra system log + shell: + cmd: tail -n 5 /var/log/cassandra/system.log # Adjust the number of lines as needed + register: cassandra_system_log + - name: Print Cassandra system log + debug: + msg: "Cassandra System Log: {{ cassandra_system_log.stdout }}" + +has context menu From 5e48762b8f90a824a31bcccf0cd4e63d1a668263 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Fri, 15 Mar 2024 13:01:00 +0530 Subject: [PATCH 346/616] Update cassandra_monitoring.yml --- ansible/cassandra_monitoring.yml | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/ansible/cassandra_monitoring.yml b/ansible/cassandra_monitoring.yml index fc8d05ae61..655e0cc7be 100644 --- a/ansible/cassandra_monitoring.yml +++ b/ansible/cassandra_monitoring.yml @@ -1,11 +1,17 @@ - hosts: cassandra + tasks: + - name: Check Cassandra system log + shell: + cmd: tail -n 5 /var/log/cassandra/system.log # Adjust the number of lines as needed + register: cassandra_system_log + - name: Print Cassandra system log + debug: - msg: "Cassandra System Log: {{ cassandra_system_log.stdout }}" -has context menu + msg: "Cassandra System Log: {{ cassandra_system_log.stdout }}" From 4f4585b68fc32f93b0f4619868568ba4eb955924 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Sat, 16 Mar 2024 13:07:14 +0530 Subject: [PATCH 347/616] Update Jenkinsfile Adding email address to get notify --- pipelines/cassandra-monitoring/Jenkinsfile | 31 +++++++++++++++++++--- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/pipelines/cassandra-monitoring/Jenkinsfile b/pipelines/cassandra-monitoring/Jenkinsfile index 8b2241f1e2..c43a9f378b 100644 --- a/pipelines/cassandra-monitoring/Jenkinsfile +++ b/pipelines/cassandra-monitoring/Jenkinsfile @@ -1,4 +1,5 @@ @Library('deploy-conf') _ + node() { try { String ANSI_GREEN = "\u001B[32m" @@ -9,10 +10,9 @@ node() { stage('checkout public repo') { folder = new File("$WORKSPACE/.git") - if (folder.exists()) - { - println "Found .git folder. Clearing it.." - sh'git clean -fxd' + if (folder.exists()) { + println "Found .git folder. Clearing it.." + sh'git clean -fxd' } checkout scm } @@ -48,3 +48,26 @@ node() { email_notify() } } + +def email_notify() { + emailext ( + subject: 'Jenkins Pipeline Status', + body: "Pipeline ${currentBuild.result}: ${currentBuild.description}", + to: 'alertsdiksha@trigyn.com' + ) +} + +pipeline { + agent any + stages { + stage('Email Jenkins Pipeline') { + steps { + emailext ( + subject: 'Email Jenkins Pipeline', + body: 'Hello, This is an email from Jenkins pipeline.', + to: 'alertsdiksha@trigyn.com' + ) + } + } + } +} From 7e642de653c375757b4b3c327465e13a8b6f30ab Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Sat, 16 Mar 2024 13:21:19 +0530 Subject: [PATCH 348/616] Update Jenkinsfile modify email notify code --- pipelines/cassandra-monitoring/Jenkinsfile | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pipelines/cassandra-monitoring/Jenkinsfile b/pipelines/cassandra-monitoring/Jenkinsfile index c43a9f378b..2cb7cd606e 100644 --- a/pipelines/cassandra-monitoring/Jenkinsfile +++ b/pipelines/cassandra-monitoring/Jenkinsfile @@ -50,7 +50,8 @@ node() { } def email_notify() { - emailext ( + // Send email notification using the built-in 'emaile' step + emaile( subject: 'Jenkins Pipeline Status', body: "Pipeline ${currentBuild.result}: ${currentBuild.description}", to: 'alertsdiksha@trigyn.com' @@ -62,7 +63,8 @@ pipeline { stages { stage('Email Jenkins Pipeline') { steps { - emailext ( + // Send email notification using the built-in 'emaile' step + emaile( subject: 'Email Jenkins Pipeline', body: 'Hello, This is an email from Jenkins pipeline.', to: 'alertsdiksha@trigyn.com' From 83e812449cfa69416abb20b49eea21f0ac8c1af2 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Sat, 16 Mar 2024 13:27:44 +0530 Subject: [PATCH 349/616] Update Jenkinsfile Updates email notify code --- pipelines/cassandra-monitoring/Jenkinsfile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pipelines/cassandra-monitoring/Jenkinsfile b/pipelines/cassandra-monitoring/Jenkinsfile index 2cb7cd606e..4ce949d414 100644 --- a/pipelines/cassandra-monitoring/Jenkinsfile +++ b/pipelines/cassandra-monitoring/Jenkinsfile @@ -50,8 +50,8 @@ node() { } def email_notify() { - // Send email notification using the built-in 'emaile' step - emaile( + // Send email notification using the built-in 'mail' step + mail ( subject: 'Jenkins Pipeline Status', body: "Pipeline ${currentBuild.result}: ${currentBuild.description}", to: 'alertsdiksha@trigyn.com' @@ -63,8 +63,8 @@ pipeline { stages { stage('Email Jenkins Pipeline') { steps { - // Send email notification using the built-in 'emaile' step - emaile( + // Send email notification using the built-in 'mail' step + mail ( subject: 'Email Jenkins Pipeline', body: 'Hello, This is an email from Jenkins pipeline.', to: 'alertsdiksha@trigyn.com' From a906e3a67e2251c7516d4dd38e3034f30ed01fb6 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Sat, 16 Mar 2024 13:53:49 +0530 Subject: [PATCH 350/616] Update Jenkinsfile modified code for proper described notification --- pipelines/cassandra-monitoring/Jenkinsfile | 53 ++++++++++++++-------- 1 file changed, 34 insertions(+), 19 deletions(-) diff --git a/pipelines/cassandra-monitoring/Jenkinsfile b/pipelines/cassandra-monitoring/Jenkinsfile index 4ce949d414..f309b1a41e 100644 --- a/pipelines/cassandra-monitoring/Jenkinsfile +++ b/pipelines/cassandra-monitoring/Jenkinsfile @@ -49,27 +49,42 @@ node() { } } -def email_notify() { - // Send email notification using the built-in 'mail' step - mail ( - subject: 'Jenkins Pipeline Status', - body: "Pipeline ${currentBuild.result}: ${currentBuild.description}", - to: 'alertsdiksha@trigyn.com' - ) -} +def call(String email_list = "alertsdiksha@gov.in") { + try { + ansiColor('xterm') { + String ANSI_GREEN = "\u001B[32m" + String ANSI_NORMAL = "\u001B[0m" + String ANSI_BOLD = "\u001B[1m" + String ANSI_RED = "\u001B[31m" + String ANSI_YELLOW = "\u001B[33m" -pipeline { - agent any - stages { - stage('Email Jenkins Pipeline') { - steps { - // Send email notification using the built-in 'mail' step - mail ( - subject: 'Email Jenkins Pipeline', - body: 'Hello, This is an email from Jenkins pipeline.', - to: 'alertsdiksha@trigyn.com' - ) + if(email_list.length() > 0){ + emailext body: '''$PROJECT_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS: Check console output at $BUILD_URL to view the results.''', subject: '$PROJECT_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS!', to: email_list, attachLog: true, compressLog: true + return + } + stage('email_notify') { + try { + envDir = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-3].trim() + email_group_name = envDir.toUpperCase() + "_EMAIL_GROUP" + email_group = evaluate "$email_group_name" + emailext body: '''$PROJECT_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS: Check console output at $BUILD_URL to view the results.''', subject: '$PROJECT_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS!', to: email_group + return + } + catch (MissingPropertyException ex) { + println ANSI_YELLOW + ANSI_BOLD + "Could not find env specific email group. Check for global email group.." + ANSI_NORMAL + } + catch (ArrayIndexOutOfBoundsException ex) { + println ANSI_YELLOW + ANSI_BOLD + "Could not find env specific email group. Check for global email group.." + ANSI_NORMAL + } + + if(env.GLOBAL_EMAIL_GROUP != null) + emailext body: '''$PROJECT_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS: Check console output at $BUILD_URL to view the results.''', subject: '$PROJECT_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS!', to: env.GLOBAL_EMAIL_GROUP + else + println ANSI_YELLOW + ANSI_BOLD + "Could not find global email group variable. Skipping email notification.." + ANSI_NORMAL } } } + catch (err){ + throw err + } } From f7e287215735acf898d903beb7f68601a08c84ae Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Sat, 16 Mar 2024 14:04:12 +0530 Subject: [PATCH 351/616] Update Jenkinsfile Updated codes for notify --- pipelines/cassandra-monitoring/Jenkinsfile | 139 +++++++++++---------- 1 file changed, 73 insertions(+), 66 deletions(-) diff --git a/pipelines/cassandra-monitoring/Jenkinsfile b/pipelines/cassandra-monitoring/Jenkinsfile index f309b1a41e..381f639710 100644 --- a/pipelines/cassandra-monitoring/Jenkinsfile +++ b/pipelines/cassandra-monitoring/Jenkinsfile @@ -1,54 +1,5 @@ @Library('deploy-conf') _ - -node() { - try { - String ANSI_GREEN = "\u001B[32m" - String ANSI_NORMAL = "\u001B[0m" - String ANSI_BOLD = "\u001B[1m" - String ANSI_RED = "\u001B[31m" - String ANSI_YELLOW = "\u001B[33m" - - stage('checkout public repo') { - folder = new File("$WORKSPACE/.git") - if (folder.exists()) { - println "Found .git folder. Clearing it.." - sh'git clean -fxd' - } - checkout scm - } - - ansiColor('xterm') { - stage('deploy'){ - values = [:] - envDir = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-3].trim() - module = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-2].trim() - jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() - currentWs = sh(returnStdout: true, script: 'pwd').trim() - ansiblePlaybook = "${currentWs}/ansible/cassandra_monitoring.yml" - ansibleExtraArgs = "--vault-password-file /var/lib/jenkins/secrets/vault-pass" - values.put('currentWs', currentWs) - values.put('env', envDir) - values.put('module', module) - values.put('jobName', jobName) - values.put('ansiblePlaybook', ansiblePlaybook) - values.put('ansibleExtraArgs', ansibleExtraArgs) - println values - ansible_playbook_run(values) - currentBuild.result = 'SUCCESS' - currentBuild.description = "Private: ${params.private_branch}, Public: ${params.branch_or_tag}" - } - } - } - catch (err) { - currentBuild.result = 'FAILURE' - throw err - } - finally { - slack_notify(currentBuild.result) - email_notify() - } -} - + def call(String email_list = "alertsdiksha@gov.in") { try { ansiColor('xterm') { @@ -57,34 +8,90 @@ def call(String email_list = "alertsdiksha@gov.in") { String ANSI_BOLD = "\u001B[1m" String ANSI_RED = "\u001B[31m" String ANSI_YELLOW = "\u001B[33m" - - if(email_list.length() > 0){ - emailext body: '''$PROJECT_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS: Check console output at $BUILD_URL to view the results.''', subject: '$PROJECT_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS!', to: email_list, attachLog: true, compressLog: true - return - } + + if (email_list.length() > 0) { + // Send email using the built-in 'mail' step + mail ( + to: email_list, + subject: "$PROJECT_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS!", + body: "Pipeline ${currentBuild.result}: Check console output at $BUILD_URL to view the results." + ) + return + } + stage('email_notify') { try { envDir = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-3].trim() email_group_name = envDir.toUpperCase() + "_EMAIL_GROUP" email_group = evaluate "$email_group_name" - emailext body: '''$PROJECT_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS: Check console output at $BUILD_URL to view the results.''', subject: '$PROJECT_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS!', to: email_group + // Send email using the built-in 'mail' step + mail ( + to: email_group, + subject: "$PROJECT_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS!", + body: "Pipeline ${currentBuild.result}: Check console output at $BUILD_URL to view the results." + ) return - } - catch (MissingPropertyException ex) { + } catch (MissingPropertyException ex) { println ANSI_YELLOW + ANSI_BOLD + "Could not find env specific email group. Check for global email group.." + ANSI_NORMAL - } - catch (ArrayIndexOutOfBoundsException ex) { + } catch (ArrayIndexOutOfBoundsException ex) { println ANSI_YELLOW + ANSI_BOLD + "Could not find env specific email group. Check for global email group.." + ANSI_NORMAL } - - if(env.GLOBAL_EMAIL_GROUP != null) - emailext body: '''$PROJECT_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS: Check console output at $BUILD_URL to view the results.''', subject: '$PROJECT_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS!', to: env.GLOBAL_EMAIL_GROUP - else + + if (env.GLOBAL_EMAIL_GROUP != null) { + // Send email using the built-in 'mail' step + mail ( + to: env.GLOBAL_EMAIL_GROUP, + subject: "$PROJECT_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS!", + body: "Pipeline ${currentBuild.result}: Check console output at $BUILD_URL to view the results." + ) + } else { println ANSI_YELLOW + ANSI_BOLD + "Could not find global email group variable. Skipping email notification.." + ANSI_NORMAL + } } } - } - catch (err){ + } catch (err) { throw err } } + +pipeline { + agent any + stages { + stage('checkout public repo') { + steps { + script { + folder = new File("$WORKSPACE/.git") + if (folder.exists()) { + println "Found .git folder. Clearing it.." + sh 'git clean -fxd' + } + checkout scm + } + } + } + + stage('deploy') { + steps { + script { + values = [:] + envDir = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-3].trim() + module = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-2].trim() + jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() + currentWs = sh(returnStdout: true, script: 'pwd').trim() + ansiblePlaybook = "${currentWs}/ansible/cassandra_monitoring.yml" + ansibleExtraArgs = "--vault-password-file /var/lib/jenkins/secrets/vault-pass" + values.put('currentWs', currentWs) + values.put('env', envDir) + values.put('module', module) + values.put('jobName', jobName) + values.put('ansiblePlaybook', ansiblePlaybook) + values.put('ansibleExtraArgs', ansibleExtraArgs) + println values + ansible_playbook_run(values) + currentBuild.result = 'SUCCESS' + currentBuild.description = "Private: ${params.private_branch}, Public: ${params.branch_or_tag}" + } + } + } + } +} From f458f5b644ab4a58b953160c40ffe1cdfebb373c Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Sat, 16 Mar 2024 14:21:46 +0530 Subject: [PATCH 352/616] Update Jenkinsfile Updates code for Notification with full description --- pipelines/cassandra-monitoring/Jenkinsfile | 140 +++++++++------------ 1 file changed, 59 insertions(+), 81 deletions(-) diff --git a/pipelines/cassandra-monitoring/Jenkinsfile b/pipelines/cassandra-monitoring/Jenkinsfile index 381f639710..b513bdf882 100644 --- a/pipelines/cassandra-monitoring/Jenkinsfile +++ b/pipelines/cassandra-monitoring/Jenkinsfile @@ -1,96 +1,74 @@ @Library('deploy-conf') _ - -def call(String email_list = "alertsdiksha@gov.in") { + +node() { try { - ansiColor('xterm') { - String ANSI_GREEN = "\u001B[32m" - String ANSI_NORMAL = "\u001B[0m" - String ANSI_BOLD = "\u001B[1m" - String ANSI_RED = "\u001B[31m" - String ANSI_YELLOW = "\u001B[33m" - - if (email_list.length() > 0) { - // Send email using the built-in 'mail' step - mail ( - to: email_list, - subject: "$PROJECT_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS!", - body: "Pipeline ${currentBuild.result}: Check console output at $BUILD_URL to view the results." - ) - return + String ANSI_GREEN = "\u001B[32m" + String ANSI_NORMAL = "\u001B[0m" + String ANSI_BOLD = "\u001B[1m" + String ANSI_RED = "\u001B[31m" + String ANSI_YELLOW = "\u001B[33m" + + stage('checkout public repo') { + folder = new File("$WORKSPACE/.git") + if (folder.exists()) { + println "Found .git folder. Clearing it.." + sh'git clean -fxd' } - - stage('email_notify') { - try { - envDir = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-3].trim() - email_group_name = envDir.toUpperCase() + "_EMAIL_GROUP" - email_group = evaluate "$email_group_name" - // Send email using the built-in 'mail' step - mail ( - to: email_group, - subject: "$PROJECT_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS!", - body: "Pipeline ${currentBuild.result}: Check console output at $BUILD_URL to view the results." - ) - return - } catch (MissingPropertyException ex) { - println ANSI_YELLOW + ANSI_BOLD + "Could not find env specific email group. Check for global email group.." + ANSI_NORMAL - } catch (ArrayIndexOutOfBoundsException ex) { - println ANSI_YELLOW + ANSI_BOLD + "Could not find env specific email group. Check for global email group.." + ANSI_NORMAL - } - - if (env.GLOBAL_EMAIL_GROUP != null) { - // Send email using the built-in 'mail' step - mail ( - to: env.GLOBAL_EMAIL_GROUP, - subject: "$PROJECT_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS!", - body: "Pipeline ${currentBuild.result}: Check console output at $BUILD_URL to view the results." - ) - } else { - println ANSI_YELLOW + ANSI_BOLD + "Could not find global email group variable. Skipping email notification.." + ANSI_NORMAL - } + checkout scm + } + + ansiColor('xterm') { + stage('deploy'){ + values = [:] + envDir = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-3].trim() + module = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-2].trim() + jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() + currentWs = sh(returnStdout: true, script: 'pwd').trim() + ansiblePlaybook = "${currentWs}/ansible/cassandra_monitoring.yml" + ansibleExtraArgs = "--vault-password-file /var/lib/jenkins/secrets/vault-pass" + values.put('currentWs', currentWs) + values.put('env', envDir) + values.put('module', module) + values.put('jobName', jobName) + values.put('ansiblePlaybook', ansiblePlaybook) + values.put('ansibleExtraArgs', ansibleExtraArgs) + println values + ansible_playbook_run(values) + currentBuild.result = 'SUCCESS' + currentBuild.description = "Private: ${params.private_branch}, Public: ${params.branch_or_tag}" } } - } catch (err) { + } + catch (err) { + currentBuild.result = 'FAILURE' throw err + } + finally { + slack_notify(currentBuild.result) + email_notify() } } - + +def email_notify() { + // Send email notification using the built-in 'mail' step + mail ( + subject: 'Jenkins Pipeline Status', + body: "'''$PROJECT_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS: Check console output at $BUILD_URL to view the results.''', subject: '$PROJECT_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS!', to: email_list, attachLog: true, compressLog: true}", + to: 'alertsdiksha@trigyn.com' + ) +} + pipeline { agent any stages { - stage('checkout public repo') { + stage('Email Jenkins Pipeline') { steps { - script { - folder = new File("$WORKSPACE/.git") - if (folder.exists()) { - println "Found .git folder. Clearing it.." - sh 'git clean -fxd' - } - checkout scm - } - } - } - - stage('deploy') { - steps { - script { - values = [:] - envDir = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-3].trim() - module = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-2].trim() - jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() - currentWs = sh(returnStdout: true, script: 'pwd').trim() - ansiblePlaybook = "${currentWs}/ansible/cassandra_monitoring.yml" - ansibleExtraArgs = "--vault-password-file /var/lib/jenkins/secrets/vault-pass" - values.put('currentWs', currentWs) - values.put('env', envDir) - values.put('module', module) - values.put('jobName', jobName) - values.put('ansiblePlaybook', ansiblePlaybook) - values.put('ansibleExtraArgs', ansibleExtraArgs) - println values - ansible_playbook_run(values) - currentBuild.result = 'SUCCESS' - currentBuild.description = "Private: ${params.private_branch}, Public: ${params.branch_or_tag}" - } + // Send email notification using the built-in 'mail' step + mail ( + subject: 'Email Jenkins Pipeline', + body: 'Hello, This is an email from Dev Jenkins pipeline.', + to: 'alertsdiksha@trigyn.com' + ) } } } From 7f05463fe82e686f854e5f9ffbf9d786e48f148c Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Sat, 16 Mar 2024 14:24:32 +0530 Subject: [PATCH 353/616] Update Jenkinsfile --- pipelines/cassandra-monitoring/Jenkinsfile | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/pipelines/cassandra-monitoring/Jenkinsfile b/pipelines/cassandra-monitoring/Jenkinsfile index b513bdf882..0becf49697 100644 --- a/pipelines/cassandra-monitoring/Jenkinsfile +++ b/pipelines/cassandra-monitoring/Jenkinsfile @@ -57,19 +57,3 @@ def email_notify() { to: 'alertsdiksha@trigyn.com' ) } - -pipeline { - agent any - stages { - stage('Email Jenkins Pipeline') { - steps { - // Send email notification using the built-in 'mail' step - mail ( - subject: 'Email Jenkins Pipeline', - body: 'Hello, This is an email from Dev Jenkins pipeline.', - to: 'alertsdiksha@trigyn.com' - ) - } - } - } -} From b41eed524b630ab674aa299089febf003c9fb366 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Sat, 16 Mar 2024 14:30:01 +0530 Subject: [PATCH 354/616] Update Jenkinsfile Updates code for notification testing --- pipelines/cassandra-monitoring/Jenkinsfile | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/pipelines/cassandra-monitoring/Jenkinsfile b/pipelines/cassandra-monitoring/Jenkinsfile index 0becf49697..9b19bbb7ce 100644 --- a/pipelines/cassandra-monitoring/Jenkinsfile +++ b/pipelines/cassandra-monitoring/Jenkinsfile @@ -49,11 +49,22 @@ node() { } } -def email_notify() { +def email_notify(email_list = "alertsdiksha@trigyn.com") { + // Send email notification using the built-in 'mail' step + mail ( - subject: 'Jenkins Pipeline Status', - body: "'''$PROJECT_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS: Check console output at $BUILD_URL to view the results.''', subject: '$PROJECT_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS!', to: email_list, attachLog: true, compressLog: true}", - to: 'alertsdiksha@trigyn.com' + + subject: "$PROJECT_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS!", + + body: """$PROJECT_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS: Check console output at $BUILD_URL to view the results.""", + + to: email_list, + + attachLog: true, + + compressLog: true + ) + } From b51880093b5dfe4814b123d278b3e8e75fa993ae Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Sat, 16 Mar 2024 14:37:45 +0530 Subject: [PATCH 355/616] Update Jenkinsfile updated proper job name in code --- pipelines/cassandra-monitoring/Jenkinsfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pipelines/cassandra-monitoring/Jenkinsfile b/pipelines/cassandra-monitoring/Jenkinsfile index 9b19bbb7ce..fb879b6958 100644 --- a/pipelines/cassandra-monitoring/Jenkinsfile +++ b/pipelines/cassandra-monitoring/Jenkinsfile @@ -55,9 +55,9 @@ def email_notify(email_list = "alertsdiksha@trigyn.com") { mail ( - subject: "$PROJECT_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS!", + subject: "$JOB_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS!", - body: """$PROJECT_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS: Check console output at $BUILD_URL to view the results.""", + body: """$JOB_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS: Check console output at $BUILD_URL to view the results.""", to: email_list, From 58cc4ddf1ee977a59282f60caea8a9a9068e2462 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Sun, 17 Mar 2024 11:59:20 +0530 Subject: [PATCH 356/616] Update Jenkinsfile --- pipelines/cassandra-monitoring/Jenkinsfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pipelines/cassandra-monitoring/Jenkinsfile b/pipelines/cassandra-monitoring/Jenkinsfile index fb879b6958..de51f259d7 100644 --- a/pipelines/cassandra-monitoring/Jenkinsfile +++ b/pipelines/cassandra-monitoring/Jenkinsfile @@ -55,9 +55,9 @@ def email_notify(email_list = "alertsdiksha@trigyn.com") { mail ( - subject: "$JOB_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS!", + subject: "$JOB_NAME!", - body: """$JOB_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS: Check console output at $BUILD_URL to view the results.""", + body: """$JOB_NAME""", to: email_list, From d105a8e85ba89ae7fcad3d3f198c30d6cc7f3e1e Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Sun, 17 Mar 2024 12:10:18 +0530 Subject: [PATCH 357/616] Update Jenkinsfile Modified notify code for notification. --- pipelines/cassandra-monitoring/Jenkinsfile | 33 +++++++++++++--------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/pipelines/cassandra-monitoring/Jenkinsfile b/pipelines/cassandra-monitoring/Jenkinsfile index de51f259d7..210a50b638 100644 --- a/pipelines/cassandra-monitoring/Jenkinsfile +++ b/pipelines/cassandra-monitoring/Jenkinsfile @@ -49,22 +49,27 @@ node() { } } -def email_notify(email_list = "alertsdiksha@trigyn.com") { - +def email_notify() { // Send email notification using the built-in 'mail' step - mail ( - - subject: "$JOB_NAME!", - - body: """$JOB_NAME""", - - to: email_list, - - attachLog: true, - - compressLog: true - + subject: 'Jenkins Pipeline Status', + body: "Pipeline ${currentBuild.result}: ${currentBuild.description}", + to: 'alertsdiksha@trigyn.com' ) +} +pipeline { + agent any + stages { + stage('Email Jenkins Pipeline') { + steps { + // Send email notification using the built-in 'mail' step + mail ( + subject: 'Email Jenkins Pipeline', + body: 'Hello, This is an email from Dev Jenkins pipeline.', + to: 'alertsdiksha@trigyn.com' + ) + } + } + } } From 4e98c538f1d00d0a85a9a96c4cef483677573076 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Sun, 17 Mar 2024 12:30:59 +0530 Subject: [PATCH 358/616] Update Jenkinsfile --- pipelines/cassandra-monitoring/Jenkinsfile | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/pipelines/cassandra-monitoring/Jenkinsfile b/pipelines/cassandra-monitoring/Jenkinsfile index 210a50b638..76b21e02c9 100644 --- a/pipelines/cassandra-monitoring/Jenkinsfile +++ b/pipelines/cassandra-monitoring/Jenkinsfile @@ -53,23 +53,7 @@ def email_notify() { // Send email notification using the built-in 'mail' step mail ( subject: 'Jenkins Pipeline Status', - body: "Pipeline ${currentBuild.result}: ${currentBuild.description}", + body: """$JOB_NAME - Pipeline ${currentBuild.result}. Hello, This is an email from Dev Jenkins pipeline.""", to: 'alertsdiksha@trigyn.com' ) } - -pipeline { - agent any - stages { - stage('Email Jenkins Pipeline') { - steps { - // Send email notification using the built-in 'mail' step - mail ( - subject: 'Email Jenkins Pipeline', - body: 'Hello, This is an email from Dev Jenkins pipeline.', - to: 'alertsdiksha@trigyn.com' - ) - } - } - } -} From 86e5b6aff31105b34260a3fef624f0317013bc01 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Sun, 17 Mar 2024 12:35:50 +0530 Subject: [PATCH 359/616] Update Jenkinsfile --- pipelines/cassandra-monitoring/Jenkinsfile | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/pipelines/cassandra-monitoring/Jenkinsfile b/pipelines/cassandra-monitoring/Jenkinsfile index 76b21e02c9..a901644a32 100644 --- a/pipelines/cassandra-monitoring/Jenkinsfile +++ b/pipelines/cassandra-monitoring/Jenkinsfile @@ -50,10 +50,27 @@ node() { } def email_notify() { + + def buildNumber = env.BUILD_NUMBER + + def jobUrl = env.JOB_URL + // Send email notification using the built-in 'mail' step + mail ( + subject: 'Jenkins Pipeline Status', - body: """$JOB_NAME - Pipeline ${currentBuild.result}. Hello, This is an email from Dev Jenkins pipeline.""", + + body: """$JOB_NAME - Pipeline ${currentBuild.result}. + + Build Number: $buildNumber + + Job URL: $jobUrl + + Hello, This is an email from Dev Jenkins pipeline.""", + to: 'alertsdiksha@trigyn.com' + ) + } From e88a9b66d0367580b376f85107b639eb914cdd18 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Mon, 18 Mar 2024 18:27:45 +0530 Subject: [PATCH 360/616] Update cassandra_monitoring.yml Updated code for cassandra syslog backup --- ansible/cassandra_monitoring.yml | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/ansible/cassandra_monitoring.yml b/ansible/cassandra_monitoring.yml index 655e0cc7be..fccf3ddf12 100644 --- a/ansible/cassandra_monitoring.yml +++ b/ansible/cassandra_monitoring.yml @@ -1,17 +1,12 @@ -- hosts: cassandra - +--- +- name: Retrieve Cassandra system log + hosts: cassandra + gather_facts: no tasks: - - name: Check Cassandra system log - shell: - cmd: tail -n 5 /var/log/cassandra/system.log # Adjust the number of lines as needed - register: cassandra_system_log - - name: Print Cassandra system log - debug: - msg: "Cassandra System Log: {{ cassandra_system_log.stdout }}" From 6e8c7d775874a8bf9b6595125edb79f7d39152f0 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 20 Mar 2024 13:27:16 +0530 Subject: [PATCH 361/616] Create Jenkinsfile --- pipelines/kafka-lags-monitoring/Jenkinsfile | 52 +++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 pipelines/kafka-lags-monitoring/Jenkinsfile diff --git a/pipelines/kafka-lags-monitoring/Jenkinsfile b/pipelines/kafka-lags-monitoring/Jenkinsfile new file mode 100644 index 0000000000..6df655e2e3 --- /dev/null +++ b/pipelines/kafka-lags-monitoring/Jenkinsfile @@ -0,0 +1,52 @@ +@Library('deploy-conf') _ +node() { + try { + timestamps { + String ANSI_GREEN = "\u001B[32m" + String ANSI_NORMAL = "\u001B[0m" + String ANSI_BOLD = "\u001B[1m" + String ANSI_RED = "\u001B[31m" + String ANSI_YELLOW = "\u001B[33m" + + stage('checkout public repo') { + folder = new File("$WORKSPACE/.git") + if (folder.exists()) + { + println "Found .git folder. Clearing it.." + sh'git clean -fxd' + } + checkout scm + } + + ansiColor('xterm') { + stage('deploy'){ + values = [:] + currentWs = sh(returnStdout: true, script: 'pwd').trim() + envDir = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-3].trim() + module = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-2].trim() + jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() + ansiblePlaybook = "${currentWs}/ansible/kafka-lags-monitoring.yml" + ansibleExtraArgs = "--vault-password-file /var/lib/jenkins/secrets/vault-pass -vv" + values.put('currentWs', currentWs) + values.put('env', envDir) + values.put('module', module) + values.put('jobName', jobName) + values.put('ansiblePlaybook', ansiblePlaybook) + values.put('ansibleExtraArgs', ansibleExtraArgs) + println values + ansible_playbook_run(values) + currentBuild.result = 'SUCCESS' + currentBuild.description = "Private: ${params.private_branch}, Public: ${params.branch_or_tag}" + } + } + } + } + catch (err) { + currentBuild.result = "FAILURE" + throw err + } + finally { + slack_notify(currentBuild.result) + email_notify() + } +} From 187ab7f77e9074f4607eb3298b6bf28d768c55c9 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 20 Mar 2024 13:49:08 +0530 Subject: [PATCH 362/616] Create kafka-lags-monitoring.yml --- ansible/kafka-lags-monitoring.yml | 42 +++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 ansible/kafka-lags-monitoring.yml diff --git a/ansible/kafka-lags-monitoring.yml b/ansible/kafka-lags-monitoring.yml new file mode 100644 index 0000000000..8525b28b6b --- /dev/null +++ b/ansible/kafka-lags-monitoring.yml @@ -0,0 +1,42 @@ +- name: Display Kafka consumer group status + hosts: processing-cluster-kafka-1 + gather_facts: no + tasks: + - name: Loop through Kafka consumer groups and check lag status + command: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe --group "{{ item }}" + register: consumer_group_output + loop: + - "dev-audit-event-generator-group" + - "telemetry-group" + - "prometheus-metrics-consumer" + - "dev-post-publish-processor-group" + - "ml-project-service" + - "dev-audit-history-indexer-group" + - "learning-127.0.1.1" + - "dev-search-indexer-group" + - "outbound" + - "dev-enrolment-reconciliation-group" + - "devsamiksha" + - "dev-relation-cache-updater-group" + - "dev-content-publish-group" + - "dev-qrcode-image-generator-group" + loop_control: + label: "{{ item }}" + + - name: Print Kafka lag status for each group + debug: + msg: | + Consumer group '{{ item.item }}' has no active members. + + GROUP TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID + {{ item.stdout | regex_replace('^[\\s\\S]*GROUP[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }} + + {% set lag = item.stdout | regex_replace('^[\\s\\S]*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %} + {% if lag <= 9 %} + Kafka lag for group '{{ item.item }}' is normal (0 to 9) + {% else %} + Kafka lag for group '{{ item.item }}' is high + {% endif %} + loop: "{{ consumer_group_output.results }}" + loop_control: + label: "{{ item.item }}" From ea1fe973f0dc9bbbd580b563a3ea9dfeaf5bca7f Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 20 Mar 2024 14:09:29 +0530 Subject: [PATCH 363/616] Update kafka-lags-monitoring.yml --- ansible/kafka-lags-monitoring.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ansible/kafka-lags-monitoring.yml b/ansible/kafka-lags-monitoring.yml index 8525b28b6b..f0f5b919ef 100644 --- a/ansible/kafka-lags-monitoring.yml +++ b/ansible/kafka-lags-monitoring.yml @@ -1,5 +1,5 @@ - name: Display Kafka consumer group status - hosts: processing-cluster-kafka-1 + hosts: ingestion-cluster-kafka gather_facts: no tasks: - name: Loop through Kafka consumer groups and check lag status @@ -26,6 +26,7 @@ - name: Print Kafka lag status for each group debug: msg: | + ---------------------------------------- Consumer group '{{ item.item }}' has no active members. GROUP TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID From 842d59255277b11b3684845ea208d2bedcefae5e Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 20 Mar 2024 16:54:36 +0530 Subject: [PATCH 364/616] Rename kafka-lags-monitoring.yml to kafka_lags_monitoring.yml --- ansible/{kafka-lags-monitoring.yml => kafka_lags_monitoring.yml} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename ansible/{kafka-lags-monitoring.yml => kafka_lags_monitoring.yml} (100%) diff --git a/ansible/kafka-lags-monitoring.yml b/ansible/kafka_lags_monitoring.yml similarity index 100% rename from ansible/kafka-lags-monitoring.yml rename to ansible/kafka_lags_monitoring.yml From 3235c52a4f43d4ec33bf28e079fd1f4d16d1b5e7 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 20 Mar 2024 16:55:16 +0530 Subject: [PATCH 365/616] Update Jenkinsfile --- pipelines/kafka-lags-monitoring/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipelines/kafka-lags-monitoring/Jenkinsfile b/pipelines/kafka-lags-monitoring/Jenkinsfile index 6df655e2e3..d8c06cf7e1 100644 --- a/pipelines/kafka-lags-monitoring/Jenkinsfile +++ b/pipelines/kafka-lags-monitoring/Jenkinsfile @@ -25,7 +25,7 @@ node() { envDir = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-3].trim() module = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-2].trim() jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() - ansiblePlaybook = "${currentWs}/ansible/kafka-lags-monitoring.yml" + ansiblePlaybook = "${currentWs}/ansible/kafka_lags_monitoring.yml" ansibleExtraArgs = "--vault-password-file /var/lib/jenkins/secrets/vault-pass -vv" values.put('currentWs', currentWs) values.put('env', envDir) From 5734006de2f00c07594c6cfeb5ac4d27cecfb626 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 20 Mar 2024 18:08:06 +0530 Subject: [PATCH 366/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index f0f5b919ef..54f3aaa181 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -38,6 +38,7 @@ {% else %} Kafka lag for group '{{ item.item }}' is high {% endif %} + ---------------------------------------- loop: "{{ consumer_group_output.results }}" loop_control: label: "{{ item.item }}" From 8b139890fa47bd414abe1f4fa30b2cd96daa9f6d Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 20 Mar 2024 18:10:18 +0530 Subject: [PATCH 367/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 54f3aaa181..fb5ccc5971 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -26,7 +26,6 @@ - name: Print Kafka lag status for each group debug: msg: | - ---------------------------------------- Consumer group '{{ item.item }}' has no active members. GROUP TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID From 94d5212a40332f63cfb7398a5ae3e1074b1e8257 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 20 Mar 2024 18:14:19 +0530 Subject: [PATCH 368/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index fb5ccc5971..0442356ec3 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -35,7 +35,7 @@ {% if lag <= 9 %} Kafka lag for group '{{ item.item }}' is normal (0 to 9) {% else %} - Kafka lag for group '{{ item.item }}' is high + \033[0;31mKafka lag for group '{{ item.item }}' is high\033[0m {% endif %} ---------------------------------------- loop: "{{ consumer_group_output.results }}" From b3832b1e286005e7dd713b1948b084017a078e98 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 20 Mar 2024 18:22:56 +0530 Subject: [PATCH 369/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 0442356ec3..fb5ccc5971 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -35,7 +35,7 @@ {% if lag <= 9 %} Kafka lag for group '{{ item.item }}' is normal (0 to 9) {% else %} - \033[0;31mKafka lag for group '{{ item.item }}' is high\033[0m + Kafka lag for group '{{ item.item }}' is high {% endif %} ---------------------------------------- loop: "{{ consumer_group_output.results }}" From 624e0e4dcf262c21df9c5379ebd9688f6103ba1f Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 09:10:53 +0530 Subject: [PATCH 370/616] Update Jenkinsfile --- pipelines/kafka-lags-monitoring/Jenkinsfile | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/pipelines/kafka-lags-monitoring/Jenkinsfile b/pipelines/kafka-lags-monitoring/Jenkinsfile index d8c06cf7e1..d2ce1c4750 100644 --- a/pipelines/kafka-lags-monitoring/Jenkinsfile +++ b/pipelines/kafka-lags-monitoring/Jenkinsfile @@ -50,3 +50,24 @@ node() { email_notify() } } + +def email_notify() { + def buildNumber = env.BUILD_NUMBER + def jobUrl = env.JOB_URL + def pipelineName = "Your Pipeline Name" // Replace "Your Pipeline Name" with the actual name of your pipeline + + Send email notification using the 'mail' step + mail ( + to: 'alertsdiksha@trigyn.com', + subject: "Jenkins Pipeline Status - ${currentBuild.result}", + body: """ + Job Name: ${JOB_NAME} + Build Number: ${buildNumber} + Job URL: ${jobUrl} + Pipeline Name: ${pipelineName} + Pipeline Status: ${currentBuild.result} + + Hello, This is an email from ${pipelineName} pipeline. + """ + ) +} From d74c3ae8163544e947414c434a81fb0a58052b16 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 09:19:28 +0530 Subject: [PATCH 371/616] Update Jenkinsfile --- pipelines/kafka-lags-monitoring/Jenkinsfile | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/pipelines/kafka-lags-monitoring/Jenkinsfile b/pipelines/kafka-lags-monitoring/Jenkinsfile index d2ce1c4750..d8c06cf7e1 100644 --- a/pipelines/kafka-lags-monitoring/Jenkinsfile +++ b/pipelines/kafka-lags-monitoring/Jenkinsfile @@ -50,24 +50,3 @@ node() { email_notify() } } - -def email_notify() { - def buildNumber = env.BUILD_NUMBER - def jobUrl = env.JOB_URL - def pipelineName = "Your Pipeline Name" // Replace "Your Pipeline Name" with the actual name of your pipeline - - Send email notification using the 'mail' step - mail ( - to: 'alertsdiksha@trigyn.com', - subject: "Jenkins Pipeline Status - ${currentBuild.result}", - body: """ - Job Name: ${JOB_NAME} - Build Number: ${buildNumber} - Job URL: ${jobUrl} - Pipeline Name: ${pipelineName} - Pipeline Status: ${currentBuild.result} - - Hello, This is an email from ${pipelineName} pipeline. - """ - ) -} From f823c1907b5a1ae490663db69c868d2bd32ed05e Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 11:43:59 +0530 Subject: [PATCH 372/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 35 +++++++++++++------------------ 1 file changed, 14 insertions(+), 21 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index fb5ccc5971..87a5af8505 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -2,24 +2,14 @@ hosts: ingestion-cluster-kafka gather_facts: no tasks: + - name: Get Kafka consumer group list + command: /opt/kafka/bin/kafka-topics.sh --bootstrap-server localhost:9092 --list + register: kafka_group_list + - name: Loop through Kafka consumer groups and check lag status command: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe --group "{{ item }}" register: consumer_group_output - loop: - - "dev-audit-event-generator-group" - - "telemetry-group" - - "prometheus-metrics-consumer" - - "dev-post-publish-processor-group" - - "ml-project-service" - - "dev-audit-history-indexer-group" - - "learning-127.0.1.1" - - "dev-search-indexer-group" - - "outbound" - - "dev-enrolment-reconciliation-group" - - "devsamiksha" - - "dev-relation-cache-updater-group" - - "dev-content-publish-group" - - "dev-qrcode-image-generator-group" + loop: "{{ kafka_group_list.stdout_lines }}" loop_control: label: "{{ item }}" @@ -28,14 +18,17 @@ msg: | Consumer group '{{ item.item }}' has no active members. - GROUP TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID - {{ item.stdout | regex_replace('^[\\s\\S]*GROUP[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }} + GROUP TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID + {% for line in item.stdout_lines[1:] %} + {{ line }} + {% endfor %} - {% set lag = item.stdout | regex_replace('^[\\s\\S]*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %} - {% if lag <= 9 %} - Kafka lag for group '{{ item.item }}' is normal (0 to 9) + {% set lag = item.stdout | regex_findall('LAG\\s+(\\d+)') | map('int') | list %} + {% set high_lag = lag | select('>=', 10) | list %} + {% if high_lag %} + Kafka lag for group '{{ item.item }}' is high (greater than 9) {% else %} - Kafka lag for group '{{ item.item }}' is high + Kafka lag for group '{{ item.item }}' is normal (0 to 9) {% endif %} ---------------------------------------- loop: "{{ consumer_group_output.results }}" From 765e58b0f9ee3faf861c4ac2c374cee94de90680 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 13:32:42 +0530 Subject: [PATCH 373/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 50 +++++++++++++++++++++---------- 1 file changed, 34 insertions(+), 16 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 87a5af8505..3e0b2c1770 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -2,35 +2,53 @@ hosts: ingestion-cluster-kafka gather_facts: no tasks: - - name: Get Kafka consumer group list - command: /opt/kafka/bin/kafka-topics.sh --bootstrap-server localhost:9092 --list - register: kafka_group_list - - name: Loop through Kafka consumer groups and check lag status command: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe --group "{{ item }}" register: consumer_group_output - loop: "{{ kafka_group_list.stdout_lines }}" + loop: + - "dev-audit-event-generator-group" + - "telemetry-group" + - "prometheus-metrics-consumer" + - "dev-post-publish-processor-group" + - "ml-project-service" + - "dev-audit-history-indexer-group" + - "learning-127.0.1.1" + - "dev-search-indexer-group" + - "outbound" + - "dev-enrolment-reconciliation-group" + - "devsamiksha" + - "dev-relation-cache-updater-group" + - "dev-content-publish-group" + - "dev-qrcode-image-generator-group" loop_control: label: "{{ item }}" - - name: Print Kafka lag status for each group debug: msg: | + {% if item.stdout.find('No such consumer group') == -1 %} Consumer group '{{ item.item }}' has no active members. - - GROUP TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID - {% for line in item.stdout_lines[1:] %} - {{ line }} + +-------------------------------------------------------------------+ + | Consumer group: '{{ item.item }}' | + +------------------+------------------+----------+------------------+ + | GROUP | TOPIC | PARTITION | LAG | + +------------------+------------------+-----------+------------------+ + {% set consumer_details = item.stdout | regex_replace('.*?GROUP\\s+TOPIC\\s+PARTITION\\s+CURRENT-OFFSET\\s+LOG-END-OFFSET\\s+LAG\\s+CONSUMER-ID\\s+HOST\\s+CLIENT-ID\\s+(.*?)\\n.*', '\\1', dotall=True) %} + {% set consumer_lines = consumer_details.split('\n') %} + {% for line in consumer_lines %} + {% if line %} + {% set columns = line.split() %} + | {{ columns[0] | center(16) }} | {{ columns[1] | center(16) }} | {{ columns[2] | center(10) }} | {{ columns[5] | center(16) }} | + {% endif %} {% endfor %} - - {% set lag = item.stdout | regex_findall('LAG\\s+(\\d+)') | map('int') | list %} - {% set high_lag = lag | select('>=', 10) | list %} - {% if high_lag %} - Kafka lag for group '{{ item.item }}' is high (greater than 9) - {% else %} + +------------------+------------------+-----------+------------------+ + {% set lag = item.stdout | regex_replace('^[\\s\\S]*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %} + {% if lag <= 9 %} Kafka lag for group '{{ item.item }}' is normal (0 to 9) + {% else %} + Kafka lag for group '{{ item.item }}' is high {% endif %} ---------------------------------------- + {% endif %} loop: "{{ consumer_group_output.results }}" loop_control: label: "{{ item.item }}" From 17242635162729580306918dd4067aa5ed161e27 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 13:35:47 +0530 Subject: [PATCH 374/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 3e0b2c1770..7adf6fc9e9 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -12,7 +12,7 @@ - "dev-post-publish-processor-group" - "ml-project-service" - "dev-audit-history-indexer-group" - - "learning-127.0.1.1" + #- "learning-127.0.1.1" - "dev-search-indexer-group" - "outbound" - "dev-enrolment-reconciliation-group" @@ -32,7 +32,7 @@ +------------------+------------------+----------+------------------+ | GROUP | TOPIC | PARTITION | LAG | +------------------+------------------+-----------+------------------+ - {% set consumer_details = item.stdout | regex_replace('.*?GROUP\\s+TOPIC\\s+PARTITION\\s+CURRENT-OFFSET\\s+LOG-END-OFFSET\\s+LAG\\s+CONSUMER-ID\\s+HOST\\s+CLIENT-ID\\s+(.*?)\\n.*', '\\1', dotall=True) %} + {% set consumer_details = item.stdout | regex_replace('.*?GROUP\\s+TOPIC\\s+PARTITION\\s+CURRENT-OFFSET\\s+LOG-END-OFFSET\\s+LAG\\s+CONSUMER-ID\\s+HOST\\s+CLIENT-ID\\s+(.*?)\\n.*', '\\1') %} {% set consumer_lines = consumer_details.split('\n') %} {% for line in consumer_lines %} {% if line %} From 562c6b3ea104d76d328c1465613c1dd7be2ad63a Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 13:41:44 +0530 Subject: [PATCH 375/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 7adf6fc9e9..78f4b2b251 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -27,20 +27,20 @@ msg: | {% if item.stdout.find('No such consumer group') == -1 %} Consumer group '{{ item.item }}' has no active members. - +-------------------------------------------------------------------+ - | Consumer group: '{{ item.item }}' | - +------------------+------------------+----------+------------------+ - | GROUP | TOPIC | PARTITION | LAG | - +------------------+------------------+-----------+------------------+ + +-----------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Consumer group: '{{ item.item }}' | + +------------------+------------------+-----------+------------------+------------------+------------------+------------------+------------------+------------------+ + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | + +------------------+------------------+-----------+------------------+------------------+------------------+------------------+------------------+------------------+ {% set consumer_details = item.stdout | regex_replace('.*?GROUP\\s+TOPIC\\s+PARTITION\\s+CURRENT-OFFSET\\s+LOG-END-OFFSET\\s+LAG\\s+CONSUMER-ID\\s+HOST\\s+CLIENT-ID\\s+(.*?)\\n.*', '\\1') %} {% set consumer_lines = consumer_details.split('\n') %} {% for line in consumer_lines %} {% if line %} {% set columns = line.split() %} - | {{ columns[0] | center(16) }} | {{ columns[1] | center(16) }} | {{ columns[2] | center(10) }} | {{ columns[5] | center(16) }} | + | {{ columns[0] | center(16) }} | {{ columns[1] | center(16) }} | {{ columns[2] | center(10) }} | {{ columns[3] | center(16) }} | {{ columns[4] | center(16) }} | {{ columns[5] | center(16) }} | {{ columns[6] | center(16) }} | {{ columns[7] | center(16) }} | {% endif %} {% endfor %} - +------------------+------------------+-----------+------------------+ + +------------------+------------------+-----------+------------------+------------------+------------------+------------------+------------------+------------------+ {% set lag = item.stdout | regex_replace('^[\\s\\S]*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %} {% if lag <= 9 %} Kafka lag for group '{{ item.item }}' is normal (0 to 9) From 638f813ce5f21aba25dc8e04fcc13b358ec03478 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 13:45:35 +0530 Subject: [PATCH 376/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 78f4b2b251..ac3653adbd 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -12,7 +12,7 @@ - "dev-post-publish-processor-group" - "ml-project-service" - "dev-audit-history-indexer-group" - #- "learning-127.0.1.1" + - "learning-127.0.1.1" - "dev-search-indexer-group" - "outbound" - "dev-enrolment-reconciliation-group" @@ -27,10 +27,10 @@ msg: | {% if item.stdout.find('No such consumer group') == -1 %} Consumer group '{{ item.item }}' has no active members. - +-----------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Consumer group: '{{ item.item }}' | + +---------------------------------------------------------------------------------------------------------------------------------------+ + | Consumer group: '{{ item.item }}' | +------------------+------------------+-----------+------------------+------------------+------------------+------------------+------------------+------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | +------------------+------------------+-----------+------------------+------------------+------------------+------------------+------------------+------------------+ {% set consumer_details = item.stdout | regex_replace('.*?GROUP\\s+TOPIC\\s+PARTITION\\s+CURRENT-OFFSET\\s+LOG-END-OFFSET\\s+LAG\\s+CONSUMER-ID\\s+HOST\\s+CLIENT-ID\\s+(.*?)\\n.*', '\\1') %} {% set consumer_lines = consumer_details.split('\n') %} From b53105b15fd0632818e68c5ade5430a0cd4c583d Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 13:54:35 +0530 Subject: [PATCH 377/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index ac3653adbd..7b3130a5fb 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -27,8 +27,8 @@ msg: | {% if item.stdout.find('No such consumer group') == -1 %} Consumer group '{{ item.item }}' has no active members. - +---------------------------------------------------------------------------------------------------------------------------------------+ - | Consumer group: '{{ item.item }}' | + +-------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Consumer group: '{{ item.item }}' | +------------------+------------------+-----------+------------------+------------------+------------------+------------------+------------------+------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | +------------------+------------------+-----------+------------------+------------------+------------------+------------------+------------------+------------------+ @@ -37,7 +37,7 @@ {% for line in consumer_lines %} {% if line %} {% set columns = line.split() %} - | {{ columns[0] | center(16) }} | {{ columns[1] | center(16) }} | {{ columns[2] | center(10) }} | {{ columns[3] | center(16) }} | {{ columns[4] | center(16) }} | {{ columns[5] | center(16) }} | {{ columns[6] | center(16) }} | {{ columns[7] | center(16) }} | + | {{ columns[0] | center(18) }} | {{ columns[1] | center(18) }} | {{ columns[2] | center(10) }} | {{ columns[3] | center(16) }} | {{ columns[4] | center(16) }} | {{ columns[5] | center(16) }} | {{ columns[6] | center(16) }} | {{ columns[7] | center(16) }} | {% endif %} {% endfor %} +------------------+------------------+-----------+------------------+------------------+------------------+------------------+------------------+------------------+ From 006b9e67677fe8083af78654ca90047b2b576b5e Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 14:00:04 +0530 Subject: [PATCH 378/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 7b3130a5fb..a37d6985a9 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -26,8 +26,7 @@ debug: msg: | {% if item.stdout.find('No such consumer group') == -1 %} - Consumer group '{{ item.item }}' has no active members. - +-------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +--------------------------------------------------------------------------------------------------------------------------------------------------------+ | Consumer group: '{{ item.item }}' | +------------------+------------------+-----------+------------------+------------------+------------------+------------------+------------------+------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | @@ -36,8 +35,7 @@ {% set consumer_lines = consumer_details.split('\n') %} {% for line in consumer_lines %} {% if line %} - {% set columns = line.split() %} - | {{ columns[0] | center(18) }} | {{ columns[1] | center(18) }} | {{ columns[2] | center(10) }} | {{ columns[3] | center(16) }} | {{ columns[4] | center(16) }} | {{ columns[5] | center(16) }} | {{ columns[6] | center(16) }} | {{ columns[7] | center(16) }} | + | {{ line.center(18) }} | {% endif %} {% endfor %} +------------------+------------------+-----------+------------------+------------------+------------------+------------------+------------------+------------------+ From c8d01a7f3bf562f3d7ca53083b8b78471e70e3fd Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 14:08:45 +0530 Subject: [PATCH 379/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index a37d6985a9..5aa3f7ae80 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -26,8 +26,7 @@ debug: msg: | {% if item.stdout.find('No such consumer group') == -1 %} - +--------------------------------------------------------------------------------------------------------------------------------------------------------+ - | Consumer group: '{{ item.item }}' | + Consumer group: '{{ item.item }}' | +------------------+------------------+-----------+------------------+------------------+------------------+------------------+------------------+------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | +------------------+------------------+-----------+------------------+------------------+------------------+------------------+------------------+------------------+ @@ -35,7 +34,8 @@ {% set consumer_lines = consumer_details.split('\n') %} {% for line in consumer_lines %} {% if line %} - | {{ line.center(18) }} | + {% set columns = line.split() %} + | {{ columns[0][:14].center(14) }} | {{ columns[1][:14].center(14) }} | {{ columns[2].center(9) }} | {{ columns[3].center(18) }} | {{ columns[4].center(18) }} | {{ columns[5].center(10) }} | {{ columns[6].center(16) }} | {{ columns[7].center(16) }} | {{ columns[8].center(16) }} | {% endif %} {% endfor %} +------------------+------------------+-----------+------------------+------------------+------------------+------------------+------------------+------------------+ From 54ce808a7c0b6ec4e6f2579ab8eab9962fe7aa63 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 14:14:22 +0530 Subject: [PATCH 380/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 5aa3f7ae80..abd64cd48e 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -26,19 +26,19 @@ debug: msg: | {% if item.stdout.find('No such consumer group') == -1 %} - Consumer group: '{{ item.item }}' | - +------------------+------------------+-----------+------------------+------------------+------------------+------------------+------------------+------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | - +------------------+------------------+-----------+------------------+------------------+------------------+------------------+------------------+------------------+ + Consumer group: '{{ item.item }}' + +----------------------+----------------------+-----------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+ + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | + +----------------------+----------------------+-----------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+ {% set consumer_details = item.stdout | regex_replace('.*?GROUP\\s+TOPIC\\s+PARTITION\\s+CURRENT-OFFSET\\s+LOG-END-OFFSET\\s+LAG\\s+CONSUMER-ID\\s+HOST\\s+CLIENT-ID\\s+(.*?)\\n.*', '\\1') %} {% set consumer_lines = consumer_details.split('\n') %} {% for line in consumer_lines %} {% if line %} {% set columns = line.split() %} - | {{ columns[0][:14].center(14) }} | {{ columns[1][:14].center(14) }} | {{ columns[2].center(9) }} | {{ columns[3].center(18) }} | {{ columns[4].center(18) }} | {{ columns[5].center(10) }} | {{ columns[6].center(16) }} | {{ columns[7].center(16) }} | {{ columns[8].center(16) }} | + | {{ columns[0][:20].center(20) }} | {{ columns[1][:20].center(20) }} | {{ columns[2].center(9) }} | {{ columns[3].center(20) }} | {{ columns[4].center(20) }} | {{ columns[5].center(20) }} | {{ columns[6].center(20) }} | {{ columns[7].center(20) }} | {{ columns[8].center(20) }} | {% endif %} {% endfor %} - +------------------+------------------+-----------+------------------+------------------+------------------+------------------+------------------+------------------+ + +----------------------+----------------------+-----------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+ {% set lag = item.stdout | regex_replace('^[\\s\\S]*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %} {% if lag <= 9 %} Kafka lag for group '{{ item.item }}' is normal (0 to 9) From 4532b55bf4f2d33c0b279fb95f14ae2e788ad392 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 14:28:56 +0530 Subject: [PATCH 381/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index abd64cd48e..6a15caa024 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -26,7 +26,7 @@ debug: msg: | {% if item.stdout.find('No such consumer group') == -1 %} - Consumer group: '{{ item.item }}' + Consumer group: '{{ item.item }}' +----------------------+----------------------+-----------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | +----------------------+----------------------+-----------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+ From 33106dfbb214c49d833efef2a0f0485558a83c33 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 14:42:31 +0530 Subject: [PATCH 382/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 6a15caa024..95b72b23a4 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -26,16 +26,19 @@ debug: msg: | {% if item.stdout.find('No such consumer group') == -1 %} - Consumer group: '{{ item.item }}' - +----------------------+----------------------+-----------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | - +----------------------+----------------------+-----------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+ + {% set header_printed = false %} {% set consumer_details = item.stdout | regex_replace('.*?GROUP\\s+TOPIC\\s+PARTITION\\s+CURRENT-OFFSET\\s+LOG-END-OFFSET\\s+LAG\\s+CONSUMER-ID\\s+HOST\\s+CLIENT-ID\\s+(.*?)\\n.*', '\\1') %} {% set consumer_lines = consumer_details.split('\n') %} {% for line in consumer_lines %} {% if line %} + {% if not header_printed %} + +----------------------+----------------------+-----------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+ + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | + +----------------------+----------------------+-----------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+ + {% set header_printed = true %} + {% endif %} {% set columns = line.split() %} - | {{ columns[0][:20].center(20) }} | {{ columns[1][:20].center(20) }} | {{ columns[2].center(9) }} | {{ columns[3].center(20) }} | {{ columns[4].center(20) }} | {{ columns[5].center(20) }} | {{ columns[6].center(20) }} | {{ columns[7].center(20) }} | {{ columns[8].center(20) }} | + | {{ columns[0][:15].center(15) }} | {{ columns[1][:15].center(15) }} | {{ columns[2].center(9) }} | {{ columns[3].center(15) }} | {{ columns[4].center(15) }} | {{ columns[5].center(15) }} | {{ columns[6].center(15) }} | {{ columns[7].center(15) }} | {{ columns[8].center(15) }} | {% endif %} {% endfor %} +----------------------+----------------------+-----------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+ From 0962d06349516ceef5c1933d262170e214a74455 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 14:50:07 +0530 Subject: [PATCH 383/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 95b72b23a4..30c83a52cc 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -3,7 +3,7 @@ gather_facts: no tasks: - name: Loop through Kafka consumer groups and check lag status - command: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe --group "{{ item }}" + command: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe register: consumer_group_output loop: - "dev-audit-event-generator-group" @@ -32,23 +32,16 @@ {% for line in consumer_lines %} {% if line %} {% if not header_printed %} - +----------------------+----------------------+-----------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | - +----------------------+----------------------+-----------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+ + +--------+--------+-----------+--------+--------+--------+--------+--------+--------+ + | GROUP | TOPIC | PARTITION | OFFSET | LOG | LAG | CON-ID | HOST | CLIENT | + +--------+--------+-----------+--------+--------+--------+--------+--------+--------+ {% set header_printed = true %} {% endif %} {% set columns = line.split() %} - | {{ columns[0][:15].center(15) }} | {{ columns[1][:15].center(15) }} | {{ columns[2].center(9) }} | {{ columns[3].center(15) }} | {{ columns[4].center(15) }} | {{ columns[5].center(15) }} | {{ columns[6].center(15) }} | {{ columns[7].center(15) }} | {{ columns[8].center(15) }} | + | {{ columns[0][:6].center(6) }} | {{ columns[1][:6].center(6) }} | {{ columns[2].center(9) }} | {{ columns[3][:6].center(6) }} | {{ columns[4][:6].center(6) }} | {{ columns[5].center(6) }} | {{ columns[6][:6].center(6) }} | {{ columns[7][:6].center(6) }} | {{ columns[8][:6].center(6) }} | {% endif %} {% endfor %} - +----------------------+----------------------+-----------+----------------------+----------------------+----------------------+----------------------+----------------------+----------------------+ - {% set lag = item.stdout | regex_replace('^[\\s\\S]*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %} - {% if lag <= 9 %} - Kafka lag for group '{{ item.item }}' is normal (0 to 9) - {% else %} - Kafka lag for group '{{ item.item }}' is high - {% endif %} - ---------------------------------------- + +--------+--------+-----------+--------+--------+--------+--------+--------+--------+ {% endif %} loop: "{{ consumer_group_output.results }}" loop_control: From 276f65b151ac2da913c9515428a87fa84d496e59 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 15:00:17 +0530 Subject: [PATCH 384/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 32 ++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 30c83a52cc..1d9d2b5c05 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -25,24 +25,26 @@ - name: Print Kafka lag status for each group debug: msg: | - {% if item.stdout.find('No such consumer group') == -1 %} {% set header_printed = false %} - {% set consumer_details = item.stdout | regex_replace('.*?GROUP\\s+TOPIC\\s+PARTITION\\s+CURRENT-OFFSET\\s+LOG-END-OFFSET\\s+LAG\\s+CONSUMER-ID\\s+HOST\\s+CLIENT-ID\\s+(.*?)\\n.*', '\\1') %} - {% set consumer_lines = consumer_details.split('\n') %} - {% for line in consumer_lines %} - {% if line %} - {% if not header_printed %} - +--------+--------+-----------+--------+--------+--------+--------+--------+--------+ - | GROUP | TOPIC | PARTITION | OFFSET | LOG | LAG | CON-ID | HOST | CLIENT | - +--------+--------+-----------+--------+--------+--------+--------+--------+--------+ - {% set header_printed = true %} - {% endif %} - {% set columns = line.split() %} - | {{ columns[0][:6].center(6) }} | {{ columns[1][:6].center(6) }} | {{ columns[2].center(9) }} | {{ columns[3][:6].center(6) }} | {{ columns[4][:6].center(6) }} | {{ columns[5].center(6) }} | {{ columns[6][:6].center(6) }} | {{ columns[7][:6].center(6) }} | {{ columns[8][:6].center(6) }} | - {% endif %} + {% for item in consumer_group_output.results %} + {% if item.stdout.find('No such consumer group') == -1 %} + {% set consumer_details = item.stdout | regex_replace('.*?GROUP\\s+TOPIC\\s+PARTITION\\s+CURRENT-OFFSET\\s+LOG-END-OFFSET\\s+LAG\\s+CONSUMER-ID\\s+HOST\\s+CLIENT-ID\\s+(.*?)\\n.*', '\\1') %} + {% set consumer_lines = consumer_details.split('\n') %} + {% for line in consumer_lines %} + {% if line %} + {% if not header_printed %} + +--------+--------+-----------+--------+--------+--------+--------+--------+--------+ + | GROUP | TOPIC | PARTITION | OFFSET | LOG | LAG | CON-ID | HOST | CLIENT | + +--------+--------+-----------+--------+--------+--------+--------+--------+--------+ + {% set header_printed = true %} + {% endif %} + {% set columns = line.split() %} + | {{ columns[0][:6].center(6) }} | {{ columns[1][:6].center(6) }} | {{ columns[2].center(9) }} | {{ columns[3][:6].center(6) }} | {{ columns[4][:6].center(6) }} | {{ columns[5].center(6) }} | {{ columns[6][:6].center(6) }} | {{ columns[7][:6].center(6) }} | {{ columns[8][:6].center(6) }} | + {% endif %} + {% endfor %} + {% endif %} {% endfor %} +--------+--------+-----------+--------+--------+--------+--------+--------+--------+ - {% endif %} loop: "{{ consumer_group_output.results }}" loop_control: label: "{{ item.item }}" From 41367c74310c7cbbde0638496184bf1a015fdb38 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 15:04:47 +0530 Subject: [PATCH 385/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 1d9d2b5c05..eade37aef7 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -3,7 +3,7 @@ gather_facts: no tasks: - name: Loop through Kafka consumer groups and check lag status - command: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe + command: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --group "{{ item }}" --describe register: consumer_group_output loop: - "dev-audit-event-generator-group" From 7787377c1175dd8b23b5c1fc07d44e661598e582 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 15:14:45 +0530 Subject: [PATCH 386/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index eade37aef7..90a6b3b2a1 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -1,9 +1,10 @@ +--- - name: Display Kafka consumer group status - hosts: ingestion-cluster-kafka + hosts: localhost gather_facts: no tasks: - name: Loop through Kafka consumer groups and check lag status - command: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --group "{{ item }}" --describe + command: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe --group "{{ item }}" register: consumer_group_output loop: - "dev-audit-event-generator-group" @@ -33,18 +34,18 @@ {% for line in consumer_lines %} {% if line %} {% if not header_printed %} - +--------+--------+-----------+--------+--------+--------+--------+--------+--------+ - | GROUP | TOPIC | PARTITION | OFFSET | LOG | LAG | CON-ID | HOST | CLIENT | - +--------+--------+-----------+--------+--------+--------+--------+--------+--------+ + +---------------------+------------------+-----------+----------------+----------------+-----+------------+------------+ + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | + +---------------------+------------------+-----------+----------------+----------------+-----+------------+------------+ {% set header_printed = true %} {% endif %} {% set columns = line.split() %} - | {{ columns[0][:6].center(6) }} | {{ columns[1][:6].center(6) }} | {{ columns[2].center(9) }} | {{ columns[3][:6].center(6) }} | {{ columns[4][:6].center(6) }} | {{ columns[5].center(6) }} | {{ columns[6][:6].center(6) }} | {{ columns[7][:6].center(6) }} | {{ columns[8][:6].center(6) }} | + | {{ columns[0][:20].center(20) }} | {{ columns[1][:16].center(16) }} | {{ columns[2].center(9) }} | {{ columns[3].center(16) }} | {{ columns[4].center(16) }} | {{ columns[5].center(4) }} | {{ columns[6][:12].center(12) }} | {{ columns[7][:12].center(12) }} | {% endif %} {% endfor %} {% endif %} {% endfor %} - +--------+--------+-----------+--------+--------+--------+--------+--------+--------+ + +---------------------+------------------+-----------+----------------+----------------+-----+------------+------------+ loop: "{{ consumer_group_output.results }}" loop_control: label: "{{ item.item }}" From 476fe9e117b603a0bf6f1687cdf751b1bdfd1e4c Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 15:22:10 +0530 Subject: [PATCH 387/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 32 ++++++++++--------------------- 1 file changed, 10 insertions(+), 22 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 90a6b3b2a1..473fab3c09 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -1,6 +1,5 @@ ---- - name: Display Kafka consumer group status - hosts: localhost + hosts: ingestion-cluster-kafka gather_facts: no tasks: - name: Loop through Kafka consumer groups and check lag status @@ -23,29 +22,18 @@ - "dev-qrcode-image-generator-group" loop_control: label: "{{ item }}" - - name: Print Kafka lag status for each group + + - name: Generate Kafka group status table debug: msg: | - {% set header_printed = false %} - {% for item in consumer_group_output.results %} - {% if item.stdout.find('No such consumer group') == -1 %} - {% set consumer_details = item.stdout | regex_replace('.*?GROUP\\s+TOPIC\\s+PARTITION\\s+CURRENT-OFFSET\\s+LOG-END-OFFSET\\s+LAG\\s+CONSUMER-ID\\s+HOST\\s+CLIENT-ID\\s+(.*?)\\n.*', '\\1') %} - {% set consumer_lines = consumer_details.split('\n') %} - {% for line in consumer_lines %} - {% if line %} - {% if not header_printed %} - +---------------------+------------------+-----------+----------------+----------------+-----+------------+------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | - +---------------------+------------------+-----------+----------------+----------------+-----+------------+------------+ - {% set header_printed = true %} - {% endif %} - {% set columns = line.split() %} - | {{ columns[0][:20].center(20) }} | {{ columns[1][:16].center(16) }} | {{ columns[2].center(9) }} | {{ columns[3].center(16) }} | {{ columns[4].center(16) }} | {{ columns[5].center(4) }} | {{ columns[6][:12].center(12) }} | {{ columns[7][:12].center(12) }} | - {% endif %} - {% endfor %} - {% endif %} + +---------------------------------------+---------------------------------------+------------+----------------+----------------+---------------+---------------+---------------+ + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | + +---------------------------------------+---------------------------------------+------------+----------------+----------------+---------------+---------------+---------------+ + {% for result in consumer_group_output.results %} + {% set lag = result.stdout | regex_replace('.*LAG\\s+(\\d+).*', '\\1') | int %} + | {{ result.item | string | pad(39) }} | {{ result.stdout | regex_replace('.*TOPIC\\s+(\\S+).*', '\\1') | string | pad(39) }} | {{ result.stdout | regex_replace('.*PARTITION\\s+(\\d+).*', '\\1') | string | pad(11) }} | {{ result.stdout | regex_replace('.*CURRENT-OFFSET\\s+(\\d+).*', '\\1') | string | pad(15) }} | {{ result.stdout | regex_replace('.*LOG-END-OFFSET\\s+(\\d+).*', '\\1') | string | pad(15) }} | {{ lag | string | pad(12) }} | {{ result.stdout | regex_replace('.*CONSUMER-ID\\s+(\\S+).*', '\\1') | string | pad(15) }} | {{ result.stdout | regex_replace('.*HOST\\s+(\\S+).*', '\\1') | string | pad(15) }} | {{ result.stdout | regex_replace('.*CLIENT-ID\\s+(\\S+).*', '\\1') | string | pad(15) }} | {% endfor %} - +---------------------+------------------+-----------+----------------+----------------+-----+------------+------------+ + +---------------------------------------+---------------------------------------+------------+----------------+----------------+---------------+---------------+---------------+---------------+ loop: "{{ consumer_group_output.results }}" loop_control: label: "{{ item.item }}" From a0b3f25b77ba466fd6e76a061861d6836c0b5d5d Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 15:24:27 +0530 Subject: [PATCH 388/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 473fab3c09..7745509c2e 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -26,12 +26,12 @@ - name: Generate Kafka group status table debug: msg: | - +---------------------------------------+---------------------------------------+------------+----------------+----------------+---------------+---------------+---------------+ + +---------------------------------------+---------------------------------------+------------+----------------+----------------+---------------+---------------+---------------+---------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | - +---------------------------------------+---------------------------------------+------------+----------------+----------------+---------------+---------------+---------------+ + +---------------------------------------+---------------------------------------+------------+----------------+----------------+---------------+---------------+---------------+---------------+ {% for result in consumer_group_output.results %} {% set lag = result.stdout | regex_replace('.*LAG\\s+(\\d+).*', '\\1') | int %} - | {{ result.item | string | pad(39) }} | {{ result.stdout | regex_replace('.*TOPIC\\s+(\\S+).*', '\\1') | string | pad(39) }} | {{ result.stdout | regex_replace('.*PARTITION\\s+(\\d+).*', '\\1') | string | pad(11) }} | {{ result.stdout | regex_replace('.*CURRENT-OFFSET\\s+(\\d+).*', '\\1') | string | pad(15) }} | {{ result.stdout | regex_replace('.*LOG-END-OFFSET\\s+(\\d+).*', '\\1') | string | pad(15) }} | {{ lag | string | pad(12) }} | {{ result.stdout | regex_replace('.*CONSUMER-ID\\s+(\\S+).*', '\\1') | string | pad(15) }} | {{ result.stdout | regex_replace('.*HOST\\s+(\\S+).*', '\\1') | string | pad(15) }} | {{ result.stdout | regex_replace('.*CLIENT-ID\\s+(\\S+).*', '\\1') | string | pad(15) }} | + | {{ result.item | string | regex_replace('^', ' ') }} | {{ result.stdout | regex_replace('.*TOPIC\\s+(\\S+).*', '\\1') | string | regex_replace('^', ' ') }} | {{ result.stdout | regex_replace('.*PARTITION\\s+(\\d+).*', '\\1') | string | regex_replace('^', ' ') }} | {{ result.stdout | regex_replace('.*CURRENT-OFFSET\\s+(\\d+).*', '\\1') | string | regex_replace('^', ' ') }} | {{ result.stdout | regex_replace('.*LOG-END-OFFSET\\s+(\\d+).*', '\\1') | string | regex_replace('^', ' ') }} | {{ lag | string | regex_replace('^', ' ') }} | {{ result.stdout | regex_replace('.*CONSUMER-ID\\s+(\\S+).*', '\\1') | string | regex_replace('^', ' ') }} | {{ result.stdout | regex_replace('.*HOST\\s+(\\S+).*', '\\1') | string | regex_replace('^', ' ') }} | {{ result.stdout | regex_replace('.*CLIENT-ID\\s+(\\S+).*', '\\1') | string | regex_replace('^', ' ') }} | {% endfor %} +---------------------------------------+---------------------------------------+------------+----------------+----------------+---------------+---------------+---------------+---------------+ loop: "{{ consumer_group_output.results }}" From 488b44fe549ecdd4ebebe5c9f5fc42d082b249fd Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 16:22:09 +0530 Subject: [PATCH 389/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 7745509c2e..ddde6bc191 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -22,18 +22,22 @@ - "dev-qrcode-image-generator-group" loop_control: label: "{{ item }}" - - - name: Generate Kafka group status table + - name: Print Kafka lag status for each group debug: msg: | - +---------------------------------------+---------------------------------------+------------+----------------+----------------+---------------+---------------+---------------+---------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | - +---------------------------------------+---------------------------------------+------------+----------------+----------------+---------------+---------------+---------------+---------------+ - {% for result in consumer_group_output.results %} - {% set lag = result.stdout | regex_replace('.*LAG\\s+(\\d+).*', '\\1') | int %} - | {{ result.item | string | regex_replace('^', ' ') }} | {{ result.stdout | regex_replace('.*TOPIC\\s+(\\S+).*', '\\1') | string | regex_replace('^', ' ') }} | {{ result.stdout | regex_replace('.*PARTITION\\s+(\\d+).*', '\\1') | string | regex_replace('^', ' ') }} | {{ result.stdout | regex_replace('.*CURRENT-OFFSET\\s+(\\d+).*', '\\1') | string | regex_replace('^', ' ') }} | {{ result.stdout | regex_replace('.*LOG-END-OFFSET\\s+(\\d+).*', '\\1') | string | regex_replace('^', ' ') }} | {{ lag | string | regex_replace('^', ' ') }} | {{ result.stdout | regex_replace('.*CONSUMER-ID\\s+(\\S+).*', '\\1') | string | regex_replace('^', ' ') }} | {{ result.stdout | regex_replace('.*HOST\\s+(\\S+).*', '\\1') | string | regex_replace('^', ' ') }} | {{ result.stdout | regex_replace('.*CLIENT-ID\\s+(\\S+).*', '\\1') | string | regex_replace('^', ' ') }} | - {% endfor %} - +---------------------------------------+---------------------------------------+------------+----------------+----------------+---------------+---------------+---------------+---------------+ + Kafka lag status for group '{{ item.item }}': + +-------------+-------------------------------+-----------+----------------+---------------+------------+------------+-------------+ + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET| LAG | CONSUMER-ID| CLIENT-ID | + +-------------+-------------------------------+-----------+----------------+---------------+------------+------------+-------------+ + {{ item.stdout | regex_replace('^[\\s\\S]*GROUP[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }} + {% set lag = item.stdout | regex_replace('^[\\s\\S]*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %} + +-------------+-------------------------------+-----------+----------------+---------------+------------+------------+-------------+ + {% if lag <= 9 %} + Kafka lag for group '{{ item.item }}' is normal (0 to 9) + {% else %} + Kafka lag for group '{{ item.item }}' is high + {% endif %} + ---------------------------------------- loop: "{{ consumer_group_output.results }}" loop_control: label: "{{ item.item }}" From aeb9a1c71062c0c12094b9da96102ea528127578 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 16:39:10 +0530 Subject: [PATCH 390/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 32 ++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index ddde6bc191..f392c1bfbe 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -25,19 +25,25 @@ - name: Print Kafka lag status for each group debug: msg: | - Kafka lag status for group '{{ item.item }}': - +-------------+-------------------------------+-----------+----------------+---------------+------------+------------+-------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET| LAG | CONSUMER-ID| CLIENT-ID | - +-------------+-------------------------------+-----------+----------------+---------------+------------+------------+-------------+ - {{ item.stdout | regex_replace('^[\\s\\S]*GROUP[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }} - {% set lag = item.stdout | regex_replace('^[\\s\\S]*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %} - +-------------+-------------------------------+-----------+----------------+---------------+------------+------------+-------------+ - {% if lag <= 9 %} - Kafka lag for group '{{ item.item }}' is normal (0 to 9) - {% else %} - Kafka lag for group '{{ item.item }}' is high - {% endif %} - ---------------------------------------- + {% set table_data = [] %} + {% for result in consumer_group_output.results %} + {% set group_data = {} %} + {% set group_data['group'] = result.item %} + {% set group_data['topic'] = result.stdout | regex_replace('^[\\s\\S]*TOPIC[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') %} + {% set group_data['partition'] = result.stdout | regex_replace('^[\\s\\S]*PARTITION[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') %} + {% set group_data['current_offset'] = result.stdout | regex_replace('^[\\s\\S]*CURRENT-OFFSET[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') %} + {% set group_data['log_end_offset'] = result.stdout | regex_replace('^[\\s\\S]*LOG-END-OFFSET[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') %} + {% set group_data['lag'] = result.stdout | regex_replace('^[\\s\\S]*LAG[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') | int %} + {% set group_data['consumer_id'] = result.stdout | regex_replace('^[\\s\\S]*CONSUMER-ID[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') %} + {% set group_data['host'] = result.stdout | regex_replace('^[\\s\\S]*HOST[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') %} + {% set group_data['client_id'] = result.stdout | regex_replace('^[\\s\\S]*CLIENT-ID[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') %} + {% set table_data = table_data + [group_data] %} + {% endfor %} + +--------------+--------------+------------+----------------+----------------+-----+-------------+------+------------+ + {% for data in table_data %} + | {{ data['group'] | default('N/A') }} | {{ data['topic'] | default('N/A') }} | {{ data['partition'] | default('N/A') }} | {{ data['current_offset'] | default('N/A') }} | {{ data['log_end_offset'] | default('N/A') }} | {{ data['lag'] | default('N/A') }} | {{ data['consumer_id'] | default('N/A') }} | {{ data['host'] | default('N/A') }} | {{ data['client_id'] | default('N/A') }} | + {% endfor %} + +--------------+--------------+------------+----------------+----------------+-----+-------------+------+------------+ loop: "{{ consumer_group_output.results }}" loop_control: label: "{{ item.item }}" From 52c52a7ce4f432fa5e166b48eb592a33cc6f038d Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 16:41:47 +0530 Subject: [PATCH 391/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index f392c1bfbe..8bc6d4961d 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -28,15 +28,15 @@ {% set table_data = [] %} {% for result in consumer_group_output.results %} {% set group_data = {} %} - {% set group_data['group'] = result.item %} - {% set group_data['topic'] = result.stdout | regex_replace('^[\\s\\S]*TOPIC[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') %} - {% set group_data['partition'] = result.stdout | regex_replace('^[\\s\\S]*PARTITION[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') %} - {% set group_data['current_offset'] = result.stdout | regex_replace('^[\\s\\S]*CURRENT-OFFSET[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') %} - {% set group_data['log_end_offset'] = result.stdout | regex_replace('^[\\s\\S]*LOG-END-OFFSET[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') %} - {% set group_data['lag'] = result.stdout | regex_replace('^[\\s\\S]*LAG[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') | int %} - {% set group_data['consumer_id'] = result.stdout | regex_replace('^[\\s\\S]*CONSUMER-ID[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') %} - {% set group_data['host'] = result.stdout | regex_replace('^[\\s\\S]*HOST[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') %} - {% set group_data['client_id'] = result.stdout | regex_replace('^[\\s\\S]*CLIENT-ID[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') %} + {% set _ = group_data.update({'group': result.item}) %} + {% set _ = group_data.update({'topic': result.stdout | regex_replace('^[\\s\\S]*TOPIC[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }) %} + {% set _ = group_data.update({'partition': result.stdout | regex_replace('^[\\s\\S]*PARTITION[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }) %} + {% set _ = group_data.update({'current_offset': result.stdout | regex_replace('^[\\s\\S]*CURRENT-OFFSET[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }) %} + {% set _ = group_data.update({'log_end_offset': result.stdout | regex_replace('^[\\s\\S]*LOG-END-OFFSET[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }) %} + {% set _ = group_data.update({'lag': result.stdout | regex_replace('^[\\s\\S]*LAG[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') | int }) %} + {% set _ = group_data.update({'consumer_id': result.stdout | regex_replace('^[\\s\\S]*CONSUMER-ID[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }) %} + {% set _ = group_data.update({'host': result.stdout | regex_replace('^[\\s\\S]*HOST[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }) %} + {% set _ = group_data.update({'client_id': result.stdout | regex_replace('^[\\s\\S]*CLIENT-ID[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }) %} {% set table_data = table_data + [group_data] %} {% endfor %} +--------------+--------------+------------+----------------+----------------+-----+-------------+------+------------+ From 24f666624529d643480d56031007af355c56e342 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 16:52:15 +0530 Subject: [PATCH 392/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 32 +++++++++++++------------------ 1 file changed, 13 insertions(+), 19 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 8bc6d4961d..ad7a821343 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -22,28 +22,22 @@ - "dev-qrcode-image-generator-group" loop_control: label: "{{ item }}" + - name: Print Kafka lag status for each group debug: msg: | - {% set table_data = [] %} - {% for result in consumer_group_output.results %} - {% set group_data = {} %} - {% set _ = group_data.update({'group': result.item}) %} - {% set _ = group_data.update({'topic': result.stdout | regex_replace('^[\\s\\S]*TOPIC[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }) %} - {% set _ = group_data.update({'partition': result.stdout | regex_replace('^[\\s\\S]*PARTITION[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }) %} - {% set _ = group_data.update({'current_offset': result.stdout | regex_replace('^[\\s\\S]*CURRENT-OFFSET[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }) %} - {% set _ = group_data.update({'log_end_offset': result.stdout | regex_replace('^[\\s\\S]*LOG-END-OFFSET[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }) %} - {% set _ = group_data.update({'lag': result.stdout | regex_replace('^[\\s\\S]*LAG[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') | int }) %} - {% set _ = group_data.update({'consumer_id': result.stdout | regex_replace('^[\\s\\S]*CONSUMER-ID[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }) %} - {% set _ = group_data.update({'host': result.stdout | regex_replace('^[\\s\\S]*HOST[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }) %} - {% set _ = group_data.update({'client_id': result.stdout | regex_replace('^[\\s\\S]*CLIENT-ID[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }) %} - {% set table_data = table_data + [group_data] %} - {% endfor %} - +--------------+--------------+------------+----------------+----------------+-----+-------------+------+------------+ - {% for data in table_data %} - | {{ data['group'] | default('N/A') }} | {{ data['topic'] | default('N/A') }} | {{ data['partition'] | default('N/A') }} | {{ data['current_offset'] | default('N/A') }} | {{ data['log_end_offset'] | default('N/A') }} | {{ data['lag'] | default('N/A') }} | {{ data['consumer_id'] | default('N/A') }} | {{ data['host'] | default('N/A') }} | {{ data['client_id'] | default('N/A') }} | - {% endfor %} - +--------------+--------------+------------+----------------+----------------+-----+-------------+------+------------+ + Kafka lag status for group '{{ item.item }}': + +--------------+-------------------------------+-----------+----------------+---------------+------------+------------+-------------+ + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | CLIENT-ID | + +--------------+-------------------------------+-----------+----------------+---------------+------------+------------+-------------+ + | {{ item.stdout | regex_replace('.*GROUP\\s+([^\\s]+).*', '\\1') | default('N/A') | string.center(14) }} | {{ item.stdout | regex_replace('.*TOPIC\\s+([^\\s]+).*', '\\1') | default('N/A') | string.center(30) }} | {{ item.stdout | regex_replace('.*PARTITION\\s+([^\\s]+).*', '\\1') | default('N/A') | string.center(12) }} | {{ item.stdout | regex_replace('.*CURRENT-OFFSET\\s+([^\\s]+).*', '\\1') | default('N/A') | string.center(15) }} | {{ item.stdout | regex_replace('.*LOG-END-OFFSET\\s+([^\\s]+).*', '\\1') | default('N/A') | string.center(15) }} | {{ item.stdout | regex_replace('.*LAG\\s+([^\\s]+).*', '\\1') | default('N/A') | string.center(4) }} | {{ item.stdout | regex_replace('.*CONSUMER-ID\\s+([^\\s]+).*', '\\1') | default('N/A') | string.center(11) }} | {{ item.stdout | regex_replace('.*HOST\\s+([^\\s]+).*', '\\1') | default('N/A') | string.center(6) }} | {{ item.stdout | regex_replace('.*CLIENT-ID\\s+([^\\s]+).*', '\\1') | default('N/A') | string.center(9) }} | + +--------------+-------------------------------+-----------+----------------+---------------+------------+------------+-------------+ + {% set lag = item.stdout | regex_replace('.*LAG\\s+([^\\s]+).*', '\\1') | int %} + {% if lag <= 9 %} + Kafka lag for group '{{ item.item }}' is normal (0 to 9) + {% else %} + Kafka lag for group '{{ item.item }}' is high + {% endif %} loop: "{{ consumer_group_output.results }}" loop_control: label: "{{ item.item }}" From a59feea25e50e109e517ae6e18f6818150d03ba7 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 16:55:28 +0530 Subject: [PATCH 393/616] Update kafka_lags_monitoring.yml From 13d1f0f8e23ccfe26ef7d2efb28470bfa8b4bad7 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 16:58:12 +0530 Subject: [PATCH 394/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index ad7a821343..5233425a17 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -28,9 +28,9 @@ msg: | Kafka lag status for group '{{ item.item }}': +--------------+-------------------------------+-----------+----------------+---------------+------------+------------+-------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | CLIENT-ID | + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID| CLIENT-ID | +--------------+-------------------------------+-----------+----------------+---------------+------------+------------+-------------+ - | {{ item.stdout | regex_replace('.*GROUP\\s+([^\\s]+).*', '\\1') | default('N/A') | string.center(14) }} | {{ item.stdout | regex_replace('.*TOPIC\\s+([^\\s]+).*', '\\1') | default('N/A') | string.center(30) }} | {{ item.stdout | regex_replace('.*PARTITION\\s+([^\\s]+).*', '\\1') | default('N/A') | string.center(12) }} | {{ item.stdout | regex_replace('.*CURRENT-OFFSET\\s+([^\\s]+).*', '\\1') | default('N/A') | string.center(15) }} | {{ item.stdout | regex_replace('.*LOG-END-OFFSET\\s+([^\\s]+).*', '\\1') | default('N/A') | string.center(15) }} | {{ item.stdout | regex_replace('.*LAG\\s+([^\\s]+).*', '\\1') | default('N/A') | string.center(4) }} | {{ item.stdout | regex_replace('.*CONSUMER-ID\\s+([^\\s]+).*', '\\1') | default('N/A') | string.center(11) }} | {{ item.stdout | regex_replace('.*HOST\\s+([^\\s]+).*', '\\1') | default('N/A') | string.center(6) }} | {{ item.stdout | regex_replace('.*CLIENT-ID\\s+([^\\s]+).*', '\\1') | default('N/A') | string.center(9) }} | + | {{ item.stdout | regex_replace('.*GROUP\\s+([^\\s]+).*', '\\1') | default('N/A') }} | {{ item.stdout | regex_replace('.*TOPIC\\s+([^\\s]+).*', '\\1') | default('N/A') }} | {{ item.stdout | regex_replace('.*PARTITION\\s+([^\\s]+).*', '\\1') | default('N/A') }} | {{ item.stdout | regex_replace('.*CURRENT-OFFSET\\s+([^\\s]+).*', '\\1') | default('N/A') }} | {{ item.stdout | regex_replace('.*LOG-END-OFFSET\\s+([^\\s]+).*', '\\1') | default('N/A') }} | {{ item.stdout | regex_replace('.*LAG\\s+([^\\s]+).*', '\\1') | default('N/A') }} | {{ item.stdout | regex_replace('.*CONSUMER-ID\\s+([^\\s]+).*', '\\1') | default('N/A') }} | {{ item.stdout | regex_replace('.*HOST\\s+([^\\s]+).*', '\\1') | default('N/A') }} | {{ item.stdout | regex_replace('.*CLIENT-ID\\s+([^\\s]+).*', '\\1') | default('N/A') }} | +--------------+-------------------------------+-----------+----------------+---------------+------------+------------+-------------+ {% set lag = item.stdout | regex_replace('.*LAG\\s+([^\\s]+).*', '\\1') | int %} {% if lag <= 9 %} From c7c3170a1df362f233b52af622705f0fb658692b Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 17:11:37 +0530 Subject: [PATCH 395/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 5233425a17..4e5c0a920e 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -22,17 +22,15 @@ - "dev-qrcode-image-generator-group" loop_control: label: "{{ item }}" - - name: Print Kafka lag status for each group debug: msg: | - Kafka lag status for group '{{ item.item }}': +--------------+-------------------------------+-----------+----------------+---------------+------------+------------+-------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID| CLIENT-ID | - +--------------+-------------------------------+-----------+----------------+---------------+------------+------------+-------------+ - | {{ item.stdout | regex_replace('.*GROUP\\s+([^\\s]+).*', '\\1') | default('N/A') }} | {{ item.stdout | regex_replace('.*TOPIC\\s+([^\\s]+).*', '\\1') | default('N/A') }} | {{ item.stdout | regex_replace('.*PARTITION\\s+([^\\s]+).*', '\\1') | default('N/A') }} | {{ item.stdout | regex_replace('.*CURRENT-OFFSET\\s+([^\\s]+).*', '\\1') | default('N/A') }} | {{ item.stdout | regex_replace('.*LOG-END-OFFSET\\s+([^\\s]+).*', '\\1') | default('N/A') }} | {{ item.stdout | regex_replace('.*LAG\\s+([^\\s]+).*', '\\1') | default('N/A') }} | {{ item.stdout | regex_replace('.*CONSUMER-ID\\s+([^\\s]+).*', '\\1') | default('N/A') }} | {{ item.stdout | regex_replace('.*HOST\\s+([^\\s]+).*', '\\1') | default('N/A') }} | {{ item.stdout | regex_replace('.*CLIENT-ID\\s+([^\\s]+).*', '\\1') | default('N/A') }} | + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET| LAG | CONSUMER-ID| CLIENT-ID | +--------------+-------------------------------+-----------+----------------+---------------+------------+------------+-------------+ {% set lag = item.stdout | regex_replace('.*LAG\\s+([^\\s]+).*', '\\1') | int %} + {{ item.stdout | regex_replace('.*GROUP\\\\s+([^\\\\s]+).*', '\\\\1') | default('N/A') }} | {{ item.stdout | regex_replace('.*TOPIC\\\\s+([^\\\\s]+).*', '\\\\1') | default('N/A') }} | {{ item.stdout | regex_replace('.*PARTITION\\\\s+([^\\\\s]+).*', '\\\\1') | default('N/A') }} | {{ item.stdout | regex_replace('.*CURRENT-OFFSET\\\\s+([^\\s]+).*', '\\1') | default('N/A') }} | {{ item.stdout | regex_replace('.*LOG-END-OFFSET\\\\s+([^\\s]+).*', '\\1') | default('N/A') }} | {{ item.stdout | regex_replace('.*LAG\\\\s+([^\\s]+).*', '\\1') | default('N/A') }} | {{ item.stdout | regex_replace('.*CONSUMER-ID\\\\s+([^\\s]+).*', '\\1') | default('N/A') }} | {{ item.stdout | regex_replace('.*HOST\\\\s+([^\\s]+).*', '\\1') | default('N/A') }} | {{ item.stdout | regex_replace('.*CLIENT-ID\\\\s+([^\\s]+).*', '\\1') | default('N/A') }} | + +--------------+-------------------------------+-----------+----------------+---------------+------------+------------+-------------+ {% if lag <= 9 %} Kafka lag for group '{{ item.item }}' is normal (0 to 9) {% else %} From cdfa1150b160e44db56922f37f305189c073f307 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 17:15:26 +0530 Subject: [PATCH 396/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 4e5c0a920e..ce2cbaefa2 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -25,12 +25,7 @@ - name: Print Kafka lag status for each group debug: msg: | - +--------------+-------------------------------+-----------+----------------+---------------+------------+------------+-------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET| LAG | CONSUMER-ID| CLIENT-ID | - +--------------+-------------------------------+-----------+----------------+---------------+------------+------------+-------------+ {% set lag = item.stdout | regex_replace('.*LAG\\s+([^\\s]+).*', '\\1') | int %} - {{ item.stdout | regex_replace('.*GROUP\\\\s+([^\\\\s]+).*', '\\\\1') | default('N/A') }} | {{ item.stdout | regex_replace('.*TOPIC\\\\s+([^\\\\s]+).*', '\\\\1') | default('N/A') }} | {{ item.stdout | regex_replace('.*PARTITION\\\\s+([^\\\\s]+).*', '\\\\1') | default('N/A') }} | {{ item.stdout | regex_replace('.*CURRENT-OFFSET\\\\s+([^\\s]+).*', '\\1') | default('N/A') }} | {{ item.stdout | regex_replace('.*LOG-END-OFFSET\\\\s+([^\\s]+).*', '\\1') | default('N/A') }} | {{ item.stdout | regex_replace('.*LAG\\\\s+([^\\s]+).*', '\\1') | default('N/A') }} | {{ item.stdout | regex_replace('.*CONSUMER-ID\\\\s+([^\\s]+).*', '\\1') | default('N/A') }} | {{ item.stdout | regex_replace('.*HOST\\\\s+([^\\s]+).*', '\\1') | default('N/A') }} | {{ item.stdout | regex_replace('.*CLIENT-ID\\\\s+([^\\s]+).*', '\\1') | default('N/A') }} | - +--------------+-------------------------------+-----------+----------------+---------------+------------+------------+-------------+ {% if lag <= 9 %} Kafka lag for group '{{ item.item }}' is normal (0 to 9) {% else %} From 0189ad6ff7e72646f2be3a4e021771cf8866f7de Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 20:30:24 +0530 Subject: [PATCH 397/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index ce2cbaefa2..3f193163ab 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -1,4 +1,5 @@ -- name: Display Kafka consumer group status +--- +- name: Display Kafka consumer group status with structured table format hosts: ingestion-cluster-kafka gather_facts: no tasks: @@ -22,15 +23,19 @@ - "dev-qrcode-image-generator-group" loop_control: label: "{{ item }}" - - name: Print Kafka lag status for each group + + - name: Print Kafka lag status for each group in table format debug: msg: | - {% set lag = item.stdout | regex_replace('.*LAG\\s+([^\\s]+).*', '\\1') | int %} - {% if lag <= 9 %} - Kafka lag for group '{{ item.item }}' is normal (0 to 9) + Consumer group '{{ item.item }}' has no active members. + {% for line in item.stdout_lines %} + {% if loop.first %} + {{ line }} {# This prints the header line as is #} + |---------------------------------|------------------|-----------|----------------|-----------------|--------|---------------|---------|-----------------------| {% else %} - Kafka lag for group '{{ item.item }}' is high + | {{ '%-33s' | format(line.split()[0] | default('', true)) }} | {{ '%-18s' | format(line.split()[1] | default('', true)) }} | {{ '%-9s' | format(line.split()[2] | default('', true)) }} | {{ '%-14s' | format(line.split()[3] | default('', true)) }} | {{ '%-15s' | format(line.split()[4] | default('', true)) }} | {{ '%-6s' | format(line.split()[5] | default('', true)) }} | {{ '%-13s' | format(line.split()[6] | default('', true)) }} | {{ '%-7s' | format(line.split()[7] | default('', true)) }} | {{ '%-21s' | format(line.split()[8] | default('', true)) }} | {% endif %} + {% endfor %} loop: "{{ consumer_group_output.results }}" loop_control: label: "{{ item.item }}" From 979225a016f1029fe48f2f3f7ce385ce5b9c5a54 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 20:46:57 +0530 Subject: [PATCH 398/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 80 +++++++++++++++---------------- 1 file changed, 39 insertions(+), 41 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 3f193163ab..3cdd589a7b 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -1,41 +1,39 @@ ---- -- name: Display Kafka consumer group status with structured table format - hosts: ingestion-cluster-kafka - gather_facts: no - tasks: - - name: Loop through Kafka consumer groups and check lag status - command: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe --group "{{ item }}" - register: consumer_group_output - loop: - - "dev-audit-event-generator-group" - - "telemetry-group" - - "prometheus-metrics-consumer" - - "dev-post-publish-processor-group" - - "ml-project-service" - - "dev-audit-history-indexer-group" - - "learning-127.0.1.1" - - "dev-search-indexer-group" - - "outbound" - - "dev-enrolment-reconciliation-group" - - "devsamiksha" - - "dev-relation-cache-updater-group" - - "dev-content-publish-group" - - "dev-qrcode-image-generator-group" - loop_control: - label: "{{ item }}" - - - name: Print Kafka lag status for each group in table format - debug: - msg: | - Consumer group '{{ item.item }}' has no active members. - {% for line in item.stdout_lines %} - {% if loop.first %} - {{ line }} {# This prints the header line as is #} - |---------------------------------|------------------|-----------|----------------|-----------------|--------|---------------|---------|-----------------------| - {% else %} - | {{ '%-33s' | format(line.split()[0] | default('', true)) }} | {{ '%-18s' | format(line.split()[1] | default('', true)) }} | {{ '%-9s' | format(line.split()[2] | default('', true)) }} | {{ '%-14s' | format(line.split()[3] | default('', true)) }} | {{ '%-15s' | format(line.split()[4] | default('', true)) }} | {{ '%-6s' | format(line.split()[5] | default('', true)) }} | {{ '%-13s' | format(line.split()[6] | default('', true)) }} | {{ '%-7s' | format(line.split()[7] | default('', true)) }} | {{ '%-21s' | format(line.split()[8] | default('', true)) }} | - {% endif %} - {% endfor %} - loop: "{{ consumer_group_output.results }}" - loop_control: - label: "{{ item.item }}" +- name: Loop through Kafka consumer groups and check lag status + command: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe --group "{{ item }}" + register: consumer_group_output + loop: + - "dev-audit-event-generator-group" + - "telemetry-group" + - "prometheus-metrics-consumer" + - "dev-post-publish-processor-group" + - "ml-project-service" + - "dev-audit-history-indexer-group" + - "learning-127.0.1.1" + - "dev-search-indexer-group" + - "outbound" + - "dev-enrolment-reconciliation-group" + - "devsamiksha" + - "dev-relation-cache-updater-group" + - "dev-content-publish-group" + - "dev-qrcode-image-generator-group" + loop_control: + label: "{{ item }}" + +- name: Install tabulate Python library + pip: + name: tabulate + +- name: Format Kafka consumer group status into table format + run_once: true + script: | + from tabulate import tabulate + output_data = [] + for item in consumer_group_output.results: + group_name = item.item + status = item.stdout + lag = int(status.split('LAG')[1].split()[0]) if 'LAG' in status else None + lag_status = 'Normal' if lag is not None and lag <= 9 else 'High' if lag is not None and lag > 9 else 'Unknown' + output_data.append([group_name, lag, lag_status]) + + table = tabulate(output_data, headers=["Consumer Group", "Kafka Lag", "Lag Status"], tablefmt="pipe") + print(table) From 65d34fd312bacba22d5281e69787e071fa97cd3f Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 20:48:38 +0530 Subject: [PATCH 399/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 81 ++++++++++++++++--------------- 1 file changed, 43 insertions(+), 38 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 3cdd589a7b..631b29905f 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -1,39 +1,44 @@ -- name: Loop through Kafka consumer groups and check lag status - command: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe --group "{{ item }}" - register: consumer_group_output - loop: - - "dev-audit-event-generator-group" - - "telemetry-group" - - "prometheus-metrics-consumer" - - "dev-post-publish-processor-group" - - "ml-project-service" - - "dev-audit-history-indexer-group" - - "learning-127.0.1.1" - - "dev-search-indexer-group" - - "outbound" - - "dev-enrolment-reconciliation-group" - - "devsamiksha" - - "dev-relation-cache-updater-group" - - "dev-content-publish-group" - - "dev-qrcode-image-generator-group" - loop_control: - label: "{{ item }}" +--- +- name: Display Kafka consumer group status in table format + hosts: localhost + gather_facts: no + tasks: + - name: Loop through Kafka consumer groups and check lag status + shell: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe --group "{{ item }}" + register: consumer_group_output + loop: + - "dev-audit-event-generator-group" + - "telemetry-group" + - "prometheus-metrics-consumer" + - "dev-post-publish-processor-group" + - "ml-project-service" + - "dev-audit-history-indexer-group" + - "learning-127.0.1.1" + - "dev-search-indexer-group" + - "outbound" + - "dev-enrolment-reconciliation-group" + - "devsamiksha" + - "dev-relation-cache-updater-group" + - "dev-content-publish-group" + - "dev-qrcode-image-generator-group" + loop_control: + label: "{{ item }}" -- name: Install tabulate Python library - pip: - name: tabulate - -- name: Format Kafka consumer group status into table format - run_once: true - script: | - from tabulate import tabulate - output_data = [] - for item in consumer_group_output.results: - group_name = item.item - status = item.stdout - lag = int(status.split('LAG')[1].split()[0]) if 'LAG' in status else None - lag_status = 'Normal' if lag is not None and lag <= 9 else 'High' if lag is not None and lag > 9 else 'Unknown' - output_data.append([group_name, lag, lag_status]) - - table = tabulate(output_data, headers=["Consumer Group", "Kafka Lag", "Lag Status"], tablefmt="pipe") - print(table) + - name: Install tabulate Python library + pip: + name: tabulate + + - name: Format Kafka consumer group status into table format + run_once: true + script: | + from tabulate import tabulate + output_data = [] + for item in consumer_group_output.results: + group_name = item.item + status = item.stdout + lag = int(status.split('LAG')[1].split()[0]) if 'LAG' in status else None + lag_status = 'Normal' if lag is not None and lag <= 9 else 'High' if lag is not None and lag > 9 else 'Unknown' + output_data.append([group_name, lag, lag_status]) + + table = tabulate(output_data, headers=["Consumer Group", "Kafka Lag", "Lag Status"], tablefmt="pipe") + print(table) From d098a9b8675b4a9dd7e98f81eef370baf038ffca Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 20:53:33 +0530 Subject: [PATCH 400/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 631b29905f..c6320280aa 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -1,6 +1,6 @@ --- - name: Display Kafka consumer group status in table format - hosts: localhost + hosts: ingestion-cluster-kafka gather_facts: no tasks: - name: Loop through Kafka consumer groups and check lag status From e8d249b1e86d1ccbfd65d3896d58307fddd93240 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 21:05:12 +0530 Subject: [PATCH 401/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index c6320280aa..99dcabf652 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -1,10 +1,13 @@ ---- -- name: Display Kafka consumer group status in table format +- name: Display Kafka consumer group status hosts: ingestion-cluster-kafka gather_facts: no tasks: + - name: Install tabulate Python library + pip: + name: tabulate + - name: Loop through Kafka consumer groups and check lag status - shell: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe --group "{{ item }}" + command: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe --group "{{ item }}" register: consumer_group_output loop: - "dev-audit-event-generator-group" @@ -23,22 +26,16 @@ - "dev-qrcode-image-generator-group" loop_control: label: "{{ item }}" - - - name: Install tabulate Python library - pip: - name: tabulate - + - name: Format Kafka consumer group status into table format run_once: true script: | from tabulate import tabulate output_data = [] - for item in consumer_group_output.results: - group_name = item.item - status = item.stdout - lag = int(status.split('LAG')[1].split()[0]) if 'LAG' in status else None - lag_status = 'Normal' if lag is not None and lag <= 9 else 'High' if lag is not None and lag > 9 else 'Unknown' - output_data.append([group_name, lag, lag_status]) + headers = ["GROUP", "TOPIC", "PARTITION", "CURRENT-OFFSET", "LOG-END-OFFSET", "LAG", "CONSUMER-ID", "HOST", "CLIENT-ID"] + for line in consumer_group_output.stdout_lines[2:]: # Skip the header and separator lines + row_data = line.split() + output_data.append(row_data[:9]) # Extract only the relevant columns - table = tabulate(output_data, headers=["Consumer Group", "Kafka Lag", "Lag Status"], tablefmt="pipe") + table = tabulate(output_data, headers=headers, tablefmt="pipe") print(table) From 7284fd2bf7d8624411e2d0024db63430e76be4e8 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 21:25:41 +0530 Subject: [PATCH 402/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 99dcabf652..8fe85b773d 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -32,10 +32,14 @@ script: | from tabulate import tabulate output_data = [] + headers_printed = False headers = ["GROUP", "TOPIC", "PARTITION", "CURRENT-OFFSET", "LOG-END-OFFSET", "LAG", "CONSUMER-ID", "HOST", "CLIENT-ID"] - for line in consumer_group_output.stdout_lines[2:]: # Skip the header and separator lines + for line in consumer_group_output.stdout_lines[2:]: row_data = line.split() - output_data.append(row_data[:9]) # Extract only the relevant columns + output_data.append(row_data[:9]) - table = tabulate(output_data, headers=headers, tablefmt="pipe") - print(table) + for data in output_data: + if not headers_printed: + print("| " + " | ".join(headers) + " |") + headers_printed = True + print("| " + " | ".join(data) + " |") From b8235e00a590e280a314aebc95959c97101e351d Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 21:28:40 +0530 Subject: [PATCH 403/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 8fe85b773d..b97d7f4bd8 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -30,16 +30,12 @@ - name: Format Kafka consumer group status into table format run_once: true script: | - from tabulate import tabulate output_data = [] - headers_printed = False headers = ["GROUP", "TOPIC", "PARTITION", "CURRENT-OFFSET", "LOG-END-OFFSET", "LAG", "CONSUMER-ID", "HOST", "CLIENT-ID"] - for line in consumer_group_output.stdout_lines[2:]: - row_data = line.split() - output_data.append(row_data[:9]) + for line in consumer_group_output.results | map(attribute='stdout_lines[2:]') | list: + for item in line: + row_data = item.split() + output_data.append(row_data[:9]) - for data in output_data: - if not headers_printed: - print("| " + " | ".join(headers) + " |") - headers_printed = True - print("| " + " | ".join(data) + " |") + table = tabulate(output_data, headers=headers, tablefmt="pipe") + print(table) From cb6b928ed4d9d63e00ca872638e29735829ab420 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 21:33:01 +0530 Subject: [PATCH 404/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index b97d7f4bd8..ea0fd21c18 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -32,10 +32,11 @@ script: | output_data = [] headers = ["GROUP", "TOPIC", "PARTITION", "CURRENT-OFFSET", "LOG-END-OFFSET", "LAG", "CONSUMER-ID", "HOST", "CLIENT-ID"] - for line in consumer_group_output.results | map(attribute='stdout_lines[2:]') | list: - for item in line: - row_data = item.split() - output_data.append(row_data[:9]) - + for result in consumer_group_output.results: + for line in result.stdout_lines[2:]: + row_data = line.split() + output_data.append(row_data[:9]) + + from tabulate import tabulate table = tabulate(output_data, headers=headers, tablefmt="pipe") print(table) From 9e9aa89ed759a8a6ed16a3789d6f804adf07541d Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 21:44:38 +0530 Subject: [PATCH 405/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index ea0fd21c18..498a9edbd8 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -7,7 +7,7 @@ name: tabulate - name: Loop through Kafka consumer groups and check lag status - command: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe --group "{{ item }}" + raw: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe --group "{{ item }}" register: consumer_group_output loop: - "dev-audit-event-generator-group" @@ -30,13 +30,15 @@ - name: Format Kafka consumer group status into table format run_once: true script: | - output_data = [] + from tabulate import tabulate + + output_data = [] # Define the output_data list here + headers = ["GROUP", "TOPIC", "PARTITION", "CURRENT-OFFSET", "LOG-END-OFFSET", "LAG", "CONSUMER-ID", "HOST", "CLIENT-ID"] + for result in consumer_group_output.results: for line in result.stdout_lines[2:]: row_data = line.split() output_data.append(row_data[:9]) - from tabulate import tabulate - table = tabulate(output_data, headers=headers, tablefmt="pipe") - print(table) + print(tabulate(output_data, headers=headers, tablefmt="pipe")) From 30ccd3136802d6474cb94d31251e50f65406f0e6 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 21:48:12 +0530 Subject: [PATCH 406/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 498a9edbd8..2e3bf65fcf 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -30,8 +30,6 @@ - name: Format Kafka consumer group status into table format run_once: true script: | - from tabulate import tabulate - output_data = [] # Define the output_data list here headers = ["GROUP", "TOPIC", "PARTITION", "CURRENT-OFFSET", "LOG-END-OFFSET", "LAG", "CONSUMER-ID", "HOST", "CLIENT-ID"] @@ -41,4 +39,6 @@ row_data = line.split() output_data.append(row_data[:9]) + from tabulate import tabulate # Import tabulate here + print(tabulate(output_data, headers=headers, tablefmt="pipe")) From 111a1a7a4655d6fad42caa9b74f04eacb7c8c295 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 21 Mar 2024 21:49:34 +0530 Subject: [PATCH 407/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 38 ++++++++++++++----------------- 1 file changed, 17 insertions(+), 21 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 2e3bf65fcf..8d393c9cb4 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -2,12 +2,8 @@ hosts: ingestion-cluster-kafka gather_facts: no tasks: - - name: Install tabulate Python library - pip: - name: tabulate - - name: Loop through Kafka consumer groups and check lag status - raw: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe --group "{{ item }}" + command: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe --group "{{ item }}" register: consumer_group_output loop: - "dev-audit-event-generator-group" @@ -26,19 +22,19 @@ - "dev-qrcode-image-generator-group" loop_control: label: "{{ item }}" - - - name: Format Kafka consumer group status into table format - run_once: true - script: | - output_data = [] # Define the output_data list here - - headers = ["GROUP", "TOPIC", "PARTITION", "CURRENT-OFFSET", "LOG-END-OFFSET", "LAG", "CONSUMER-ID", "HOST", "CLIENT-ID"] - - for result in consumer_group_output.results: - for line in result.stdout_lines[2:]: - row_data = line.split() - output_data.append(row_data[:9]) - - from tabulate import tabulate # Import tabulate here - - print(tabulate(output_data, headers=headers, tablefmt="pipe")) + - name: Print Kafka lag status for each group + debug: + msg: | + Consumer group '{{ item.item }}' has no active members. + GROUP TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID + {{ item.stdout | regex_replace('^[\\s\\S]*GROUP[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }} + {% set lag = item.stdout | regex_replace('^[\\s\\S]*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %} + {% if lag <= 9 %} + Kafka lag for group '{{ item.item }}' is normal (0 to 9) + {% else %} + Kafka lag for group '{{ item.item }}' is high + {% endif %} + ---------------------------------------- + loop: "{{ consumer_group_output.results }}" + loop_control: + label: "{{ item.item }}" From 0b55645b14346a75c89b9324be60093f4bdac46c Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 22 Mar 2024 11:49:58 +0530 Subject: [PATCH 408/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 59 ++++++++++++++++++++++--------- 1 file changed, 43 insertions(+), 16 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 8d393c9cb4..28515f8295 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -1,8 +1,9 @@ +--- - name: Display Kafka consumer group status hosts: ingestion-cluster-kafka gather_facts: no tasks: - - name: Loop through Kafka consumer groups and check lag status + - name: Check lag status for specified Kafka consumer groups command: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe --group "{{ item }}" register: consumer_group_output loop: @@ -22,19 +23,45 @@ - "dev-qrcode-image-generator-group" loop_control: label: "{{ item }}" - - name: Print Kafka lag status for each group + ignore_errors: yes # Handle errors if the group is not found or command fails + + - name: Prepare formatted Kafka consumer group output + set_fact: + kafka_groups: "{{ consumer_group_output.results | map(extract_group_info) | table_format }}" + vars: + extract_group_info: >- + def extract_group_info(result): + group_name = result.item + data = result.stdout_lines + # Skip the header line (assuming it's the first line) + data.pop(0) + members = [] + for line in data: + data = line.split() + if data: + members.append({ + 'topic': data[0], + 'partition': data[1], + 'current_offset': data[2], + 'log_end_offset': data[3], + 'lag': data[4], + 'consumer_id': data[5] if len(data) > 5 else '', + 'host': data[6] if len(data) > 6 else '', + 'client_id': data[7] if len(data) > 7 else '', + }) + return { 'group_name': group_name, 'members': members } + + table_format: >- + def table_format(groups): + headers = list(groups[0]['members'][0].keys()) if groups and groups[0]['members'] else [] + max_lengths = {key: len(key) for key in headers} + format_string = '|' + '|'.join(['{{:<{}}}'.format(max_lengths[key]) for key in headers]) + '|' + output = [format_string.format(**{key: key for key in headers})] # Group header row + for group in groups: + for member in group['members']: + output.append(format_string.format(**member)) + return '\n'.join(output) + + - name: Print formatted Kafka consumer group output debug: - msg: | - Consumer group '{{ item.item }}' has no active members. - GROUP TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID - {{ item.stdout | regex_replace('^[\\s\\S]*GROUP[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }} - {% set lag = item.stdout | regex_replace('^[\\s\\S]*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %} - {% if lag <= 9 %} - Kafka lag for group '{{ item.item }}' is normal (0 to 9) - {% else %} - Kafka lag for group '{{ item.item }}' is high - {% endif %} - ---------------------------------------- - loop: "{{ consumer_group_output.results }}" - loop_control: - label: "{{ item.item }}" + msg: "{{ kafka_groups }}" From fb2c6ec72a5adb558013f7a502a7b654c78e1717 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 22 Mar 2024 12:20:16 +0530 Subject: [PATCH 409/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 53 ++++++++++--------------------- 1 file changed, 16 insertions(+), 37 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 28515f8295..6b1ca5cd67 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -3,7 +3,7 @@ hosts: ingestion-cluster-kafka gather_facts: no tasks: - - name: Check lag status for specified Kafka consumer groups + - name: Loop through Kafka consumer groups and check lag status command: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe --group "{{ item }}" register: consumer_group_output loop: @@ -23,45 +23,24 @@ - "dev-qrcode-image-generator-group" loop_control: label: "{{ item }}" - ignore_errors: yes # Handle errors if the group is not found or command fails - - - name: Prepare formatted Kafka consumer group output + - name: Prepare Kafka consumer group output set_fact: - kafka_groups: "{{ consumer_group_output.results | map(extract_group_info) | table_format }}" + kafka_groups: "{{ consumer_group_output.results | map(extract_group_info) | list }}" vars: extract_group_info: >- - def extract_group_info(result): - group_name = result.item - data = result.stdout_lines - # Skip the header line (assuming it's the first line) - data.pop(0) - members = [] - for line in data: - data = line.split() - if data: - members.append({ - 'topic': data[0], - 'partition': data[1], - 'current_offset': data[2], - 'log_end_offset': data[3], - 'lag': data[4], - 'consumer_id': data[5] if len(data) > 5 else '', - 'host': data[6] if len(data) > 6 else '', - 'client_id': data[7] if len(data) > 7 else '', - }) - return { 'group_name': group_name, 'members': members } + lambda result: { + 'group_name': result.item, + 'members': [ + dict(zip(['group', 'topic', 'partition', 'current_offset', 'log_end_offset', 'lag', 'consumer_id', 'host', 'client_id'], line.split())) + for line in result.stdout_lines[1:] if line.strip() + ] + } - table_format: >- - def table_format(groups): - headers = list(groups[0]['members'][0].keys()) if groups and groups[0]['members'] else [] - max_lengths = {key: len(key) for key in headers} - format_string = '|' + '|'.join(['{{:<{}}}'.format(max_lengths[key]) for key in headers]) + '|' - output = [format_string.format(**{key: key for key in headers})] # Group header row - for group in groups: - for member in group['members']: - output.append(format_string.format(**member)) - return '\n'.join(output) + - name: Render Kafka consumer group output template + template: + src: kafka_output_template.j2 + dest: /path/to/kafka_consumer_group_output.txt - - name: Print formatted Kafka consumer group output + - name: Print Kafka consumer group output file location debug: - msg: "{{ kafka_groups }}" + msg: "Kafka consumer group output saved to /path/to/kafka_consumer_group_output.txt" From 45997c2516ba35570023b435f45a668a2ddfc661 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 22 Mar 2024 12:24:23 +0530 Subject: [PATCH 410/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 6b1ca5cd67..3b299aa090 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -39,8 +39,8 @@ - name: Render Kafka consumer group output template template: src: kafka_output_template.j2 - dest: /path/to/kafka_consumer_group_output.txt + dest: /tmp/kafka_consumer_group_output.txt - name: Print Kafka consumer group output file location debug: - msg: "Kafka consumer group output saved to /path/to/kafka_consumer_group_output.txt" + msg: "Kafka consumer group output saved to /tmp/kafka_consumer_group_output.txt" From 918142ada426c563be06c7035971a843efb1398a Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 22 Mar 2024 12:29:25 +0530 Subject: [PATCH 411/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 3b299aa090..fdcabbd64c 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -25,17 +25,24 @@ label: "{{ item }}" - name: Prepare Kafka consumer group output set_fact: - kafka_groups: "{{ consumer_group_output.results | map(extract_group_info) | list }}" + kafka_groups: "{{ consumer_group_output.results | map('extract_group_info') | list }}" vars: extract_group_info: >- - lambda result: { - 'group_name': result.item, - 'members': [ - dict(zip(['group', 'topic', 'partition', 'current_offset', 'log_end_offset', 'lag', 'consumer_id', 'host', 'client_id'], line.split())) - for line in result.stdout_lines[1:] if line.strip() - ] - } - + {% set results = [] %} + {% for result in consumer_group_output.results %} + {% set group_info = {'group_name': result.item, 'members': []} %} + {% for line in result.stdout_lines[1:] %} + {% if line.strip() %} + {% set member_info = {'group': group_info['group_name']} %} + {% for key, value in dict(zip(['topic', 'partition', 'current_offset', 'log_end_offset', 'lag', 'consumer_id', 'host', 'client_id'], line.split())).items() %} + {% set _ = member_info.update({key: value}) %} + {% endfor %} + {% set _ = group_info['members'].append(member_info) %} + {% endif %} + {% endfor %} + {% set _ = results.append(group_info) %} + {% endfor %} + {{ results }} - name: Render Kafka consumer group output template template: src: kafka_output_template.j2 From 3ed8d61f436e3c1d1c7ecb25ff300e6a9438fa26 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 22 Mar 2024 12:31:28 +0530 Subject: [PATCH 412/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index fdcabbd64c..10477b0572 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -25,9 +25,7 @@ label: "{{ item }}" - name: Prepare Kafka consumer group output set_fact: - kafka_groups: "{{ consumer_group_output.results | map('extract_group_info') | list }}" - vars: - extract_group_info: >- + kafka_groups: | {% set results = [] %} {% for result in consumer_group_output.results %} {% set group_info = {'group_name': result.item, 'members': []} %} @@ -43,6 +41,7 @@ {% set _ = results.append(group_info) %} {% endfor %} {{ results }} + - name: Render Kafka consumer group output template template: src: kafka_output_template.j2 From c4136570c8bcd9e54bfc9833cff195fba0a6a78b Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 22 Mar 2024 12:34:28 +0530 Subject: [PATCH 413/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 10477b0572..f9d39b31ef 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -32,9 +32,7 @@ {% for line in result.stdout_lines[1:] %} {% if line.strip() %} {% set member_info = {'group': group_info['group_name']} %} - {% for key, value in dict(zip(['topic', 'partition', 'current_offset', 'log_end_offset', 'lag', 'consumer_id', 'host', 'client_id'], line.split())).items() %} - {% set _ = member_info.update({key: value}) %} - {% endfor %} + {% set _ = member_info.update(dict(line.split() | items2dict(keys=['topic', 'partition', 'current_offset', 'log_end_offset', 'lag', 'consumer_id', 'host', 'client_id']))) %} {% set _ = group_info['members'].append(member_info) %} {% endif %} {% endfor %} From ca99bff86b3a809f82023e45f8b816f5d44850b9 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 22 Mar 2024 12:36:36 +0530 Subject: [PATCH 414/616] Update kafka_lags_monitoring.yml From 4d1d7777d504db83f5d2b551c51a9aa4d7e16ecf Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 22 Mar 2024 12:37:25 +0530 Subject: [PATCH 415/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index f9d39b31ef..35da18a8f6 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -32,7 +32,16 @@ {% for line in result.stdout_lines[1:] %} {% if line.strip() %} {% set member_info = {'group': group_info['group_name']} %} - {% set _ = member_info.update(dict(line.split() | items2dict(keys=['topic', 'partition', 'current_offset', 'log_end_offset', 'lag', 'consumer_id', 'host', 'client_id']))) %} + {% set _ = member_info.update({ + 'topic': line.split()[0], + 'partition': line.split()[1], + 'current_offset': line.split()[2], + 'log_end_offset': line.split()[3], + 'lag': line.split()[4], + 'consumer_id': line.split()[5] if len(line.split()) > 5 else '', + 'host': line.split()[6] if len(line.split()) > 6 else '', + 'client_id': line.split()[7] if len(line.split()) > 7 else '', + }) %} {% set _ = group_info['members'].append(member_info) %} {% endif %} {% endfor %} From 63ef7b48cfdad2ba3bbb799e9a45b9b120f3c156 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 22 Mar 2024 12:40:03 +0530 Subject: [PATCH 416/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 35da18a8f6..f8474801b9 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -38,9 +38,9 @@ 'current_offset': line.split()[2], 'log_end_offset': line.split()[3], 'lag': line.split()[4], - 'consumer_id': line.split()[5] if len(line.split()) > 5 else '', - 'host': line.split()[6] if len(line.split()) > 6 else '', - 'client_id': line.split()[7] if len(line.split()) > 7 else '', + 'consumer_id': line.split()[5] if line.split()|length > 5 else '', + 'host': line.split()[6] if line.split()|length > 6 else '', + 'client_id': line.split()[7] if line.split()|length > 7 else '', }) %} {% set _ = group_info['members'].append(member_info) %} {% endif %} From bd9b79cff77702505653c92285fe78bbfdced936 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 22 Mar 2024 12:43:48 +0530 Subject: [PATCH 417/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 56 ++++++++++++------------------- 1 file changed, 22 insertions(+), 34 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index f8474801b9..dadd91c401 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -1,4 +1,3 @@ ---- - name: Display Kafka consumer group status hosts: ingestion-cluster-kafka gather_facts: no @@ -23,37 +22,26 @@ - "dev-qrcode-image-generator-group" loop_control: label: "{{ item }}" - - name: Prepare Kafka consumer group output - set_fact: - kafka_groups: | - {% set results = [] %} - {% for result in consumer_group_output.results %} - {% set group_info = {'group_name': result.item, 'members': []} %} - {% for line in result.stdout_lines[1:] %} - {% if line.strip() %} - {% set member_info = {'group': group_info['group_name']} %} - {% set _ = member_info.update({ - 'topic': line.split()[0], - 'partition': line.split()[1], - 'current_offset': line.split()[2], - 'log_end_offset': line.split()[3], - 'lag': line.split()[4], - 'consumer_id': line.split()[5] if line.split()|length > 5 else '', - 'host': line.split()[6] if line.split()|length > 6 else '', - 'client_id': line.split()[7] if line.split()|length > 7 else '', - }) %} - {% set _ = group_info['members'].append(member_info) %} - {% endif %} - {% endfor %} - {% set _ = results.append(group_info) %} + - name: Save Kafka lag status to file + copy: + content: | + {% for item in consumer_group_output.results %} + Consumer group '{{ item.item }}' has no active members. + {{ item.stdout | regex_replace('^[\\s\\S]*GROUP[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }} + {% set lag = item.stdout | regex_replace('^[\\s\\S]*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %} + {% if lag <= 9 %} + Kafka lag for group '{{ item.item }}' is normal (0 to 9) + {% else %} + Kafka lag for group '{{ item.item }}' is high + {% endif %} {% endfor %} - {{ results }} - - - name: Render Kafka consumer group output template - template: - src: kafka_output_template.j2 - dest: /tmp/kafka_consumer_group_output.txt - - - name: Print Kafka consumer group output file location - debug: - msg: "Kafka consumer group output saved to /tmp/kafka_consumer_group_output.txt" + dest: /path/to/output/file.txt + delegate_to: localhost + - name: Print Kafka lag status for each group + command: cat /path/to/output/file.txt + register: printed_output + changed_when: false + ignore_errors: true + delegate_to: localhost + - debug: + msg: "{{ printed_output.stdout_lines }}" From e39a6c5d6d2ac3d6c7960fcc6f2bee5c006d9620 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 22 Mar 2024 12:46:00 +0530 Subject: [PATCH 418/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index dadd91c401..8f1f70e22e 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -2,6 +2,13 @@ hosts: ingestion-cluster-kafka gather_facts: no tasks: + - name: Create directory for output file + file: + path: /path/to/output + state: directory + delegate_to: localhost + become: yes + - name: Loop through Kafka consumer groups and check lag status command: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe --group "{{ item }}" register: consumer_group_output @@ -22,6 +29,7 @@ - "dev-qrcode-image-generator-group" loop_control: label: "{{ item }}" + - name: Save Kafka lag status to file copy: content: | @@ -37,11 +45,14 @@ {% endfor %} dest: /path/to/output/file.txt delegate_to: localhost + become: yes + - name: Print Kafka lag status for each group command: cat /path/to/output/file.txt register: printed_output changed_when: false ignore_errors: true delegate_to: localhost + - debug: msg: "{{ printed_output.stdout_lines }}" From c5b22b45a37fee68a458110dc3722358ab8b2ce6 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 22 Mar 2024 13:20:03 +0530 Subject: [PATCH 419/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 8f1f70e22e..e93ed93f19 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -9,6 +9,12 @@ delegate_to: localhost become: yes + - name: Initialize the content with table headers + set_fact: + output_content: | + | Group | Partition | Current Offset | Log End Offset | Lag | Consumer ID | Host | Client ID | + |---------------------------------|-----------|----------------|----------------|-----|-------------|------|-----------| + - name: Loop through Kafka consumer groups and check lag status command: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe --group "{{ item }}" register: consumer_group_output @@ -30,19 +36,17 @@ loop_control: label: "{{ item }}" + - name: Append data to output content + set_fact: + output_content: "{{ output_content }}\n{{ consumer_group_output.stdout | regex_replace('^[\\s\\S]*GROUP[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }}" + + - name: Check lag status and update output content + set_fact: + output_content: "{{ output_content }}\n{% set lag = consumer_group_output.stdout | regex_replace('^[\\s\\S]*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %}{% if lag <= 9 %}Kafka lag for group '{{ consumer_group_output.item }}' is normal (0 to 9){% else %}Kafka lag for group '{{ consumer_group_output.item }}' is high{% endif %}" + - name: Save Kafka lag status to file copy: - content: | - {% for item in consumer_group_output.results %} - Consumer group '{{ item.item }}' has no active members. - {{ item.stdout | regex_replace('^[\\s\\S]*GROUP[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }} - {% set lag = item.stdout | regex_replace('^[\\s\\S]*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %} - {% if lag <= 9 %} - Kafka lag for group '{{ item.item }}' is normal (0 to 9) - {% else %} - Kafka lag for group '{{ item.item }}' is high - {% endif %} - {% endfor %} + content: "{{ output_content }}" dest: /path/to/output/file.txt delegate_to: localhost become: yes @@ -55,4 +59,4 @@ delegate_to: localhost - debug: - msg: "{{ printed_output.stdout_lines }}" + msg: "{{ printed_output.stdout }}" From a92837270ad4bd3c18a19416d8876ef14138d5f6 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 22 Mar 2024 13:22:45 +0530 Subject: [PATCH 420/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index e93ed93f19..196787da05 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -16,7 +16,7 @@ |---------------------------------|-----------|----------------|----------------|-----|-------------|------|-----------| - name: Loop through Kafka consumer groups and check lag status - command: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe --group "{{ item }}" + shell: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe --group "{{ item }}" register: consumer_group_output loop: - "dev-audit-event-generator-group" @@ -42,7 +42,7 @@ - name: Check lag status and update output content set_fact: - output_content: "{{ output_content }}\n{% set lag = consumer_group_output.stdout | regex_replace('^[\\s\\S]*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %}{% if lag <= 9 %}Kafka lag for group '{{ consumer_group_output.item }}' is normal (0 to 9){% else %}Kafka lag for group '{{ consumer_group_output.item }}' is high{% endif %}" + output_content: "{{ output_content }}\n{% set lag = consumer_group_output.stdout | regex_replace('^[\\s\\S]*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %}{% if lag <= 9 %}Kafka lag for group '{{ item }}' is normal (0 to 9){% else %}Kafka lag for group '{{ item }}' is high{% endif %}" - name: Save Kafka lag status to file copy: From 97ed7bf181e0f6e73b8de9ec612e9f36c76553ed Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 22 Mar 2024 13:24:34 +0530 Subject: [PATCH 421/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 196787da05..b4dd322361 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -38,11 +38,11 @@ - name: Append data to output content set_fact: - output_content: "{{ output_content }}\n{{ consumer_group_output.stdout | regex_replace('^[\\s\\S]*GROUP[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }}" + output_content: "{{ output_content }}\n{{ item.stdout_lines | regex_replace('^[\\s\\S]*GROUP[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }}" - name: Check lag status and update output content set_fact: - output_content: "{{ output_content }}\n{% set lag = consumer_group_output.stdout | regex_replace('^[\\s\\S]*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %}{% if lag <= 9 %}Kafka lag for group '{{ item }}' is normal (0 to 9){% else %}Kafka lag for group '{{ item }}' is high{% endif %}" + output_content: "{{ output_content }}\n{% set lag = item.stdout_lines | regex_replace('^[\\s\\S]*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %}{% if lag <= 9 %}Kafka lag for group '{{ item.item }}' is normal (0 to 9){% else %}Kafka lag for group '{{ item.item }}' is high{% endif %}" - name: Save Kafka lag status to file copy: From bc3b2a6a7550dd7b4b39219db257207bbfa8a23c Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 22 Mar 2024 13:28:00 +0530 Subject: [PATCH 422/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index b4dd322361..4d03494187 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -38,11 +38,11 @@ - name: Append data to output content set_fact: - output_content: "{{ output_content }}\n{{ item.stdout_lines | regex_replace('^[\\s\\S]*GROUP[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }}" + output_content: "{{ output_content }}\n{% for line in consumer_group_output.stdout_lines %}{{ line | regex_replace('^[\\s\\S]*GROUP[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }}{% endfor %}" - name: Check lag status and update output content set_fact: - output_content: "{{ output_content }}\n{% set lag = item.stdout_lines | regex_replace('^[\\s\\S]*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %}{% if lag <= 9 %}Kafka lag for group '{{ item.item }}' is normal (0 to 9){% else %}Kafka lag for group '{{ item.item }}' is high{% endif %}" + output_content: "{{ output_content }}\n{% set lag = consumer_group_output.stdout | regex_replace('^[\\s\\S]*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %}{% if lag <= 9 %}Kafka lag for group '{{ item }}' is normal (0 to 9){% else %}Kafka lag for group '{{ item }}' is high{% endif %}" - name: Save Kafka lag status to file copy: From 73cc51967a4803d5ca97cf24f07a4df630712cc3 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 22 Mar 2024 13:39:31 +0530 Subject: [PATCH 423/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 4d03494187..195ddd1b51 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -38,11 +38,11 @@ - name: Append data to output content set_fact: - output_content: "{{ output_content }}\n{% for line in consumer_group_output.stdout_lines %}{{ line | regex_replace('^[\\s\\S]*GROUP[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }}{% endfor %}" + output_content: "{{ output_content }}\n{% for line in consumer_group_output.results[item]['stdout_lines'] %}{{ line | regex_replace('^[\\s\\S]*GROUP[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }}{% endfor %}" - name: Check lag status and update output content set_fact: - output_content: "{{ output_content }}\n{% set lag = consumer_group_output.stdout | regex_replace('^[\\s\\S]*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %}{% if lag <= 9 %}Kafka lag for group '{{ item }}' is normal (0 to 9){% else %}Kafka lag for group '{{ item }}' is high{% endif %}" + output_content: "{{ output_content }}\n{% set lag = consumer_group_output.results[item]['stdout'] | regex_replace('^[\\s\\S]*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %}{% if lag <= 9 %}Kafka lag for group '{{ item }}' is normal (0 to 9){% else %}Kafka lag for group '{{ item }}' is high{% endif %}" - name: Save Kafka lag status to file copy: From 5cf73c839cd58ca9808171db75fc61fcf086a541 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 22 Mar 2024 13:46:29 +0530 Subject: [PATCH 424/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 81 ++++++++++++------------------- 1 file changed, 31 insertions(+), 50 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 195ddd1b51..cb38ea9ef3 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -2,61 +2,42 @@ hosts: ingestion-cluster-kafka gather_facts: no tasks: - - name: Create directory for output file - file: - path: /path/to/output - state: directory - delegate_to: localhost - become: yes - - - name: Initialize the content with table headers - set_fact: - output_content: | - | Group | Partition | Current Offset | Log End Offset | Lag | Consumer ID | Host | Client ID | - |---------------------------------|-----------|----------------|----------------|-----|-------------|------|-----------| + - name: Get Kafka consumer group list + command: /opt/kafka/bin/kafka-topics.sh --bootstrap-server localhost:9092 --list + register: kafka_group_list + failed_when: "kafka_group_list.stderr and 'Error: Consumer group' in kafka_group_list.stderr" - name: Loop through Kafka consumer groups and check lag status - shell: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe --group "{{ item }}" + command: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe --group "{{ item }}" register: consumer_group_output - loop: - - "dev-audit-event-generator-group" - - "telemetry-group" - - "prometheus-metrics-consumer" - - "dev-post-publish-processor-group" - - "ml-project-service" - - "dev-audit-history-indexer-group" - - "learning-127.0.1.1" - - "dev-search-indexer-group" - - "outbound" - - "dev-enrolment-reconciliation-group" - - "devsamiksha" - - "dev-relation-cache-updater-group" - - "dev-content-publish-group" - - "dev-qrcode-image-generator-group" + loop: "{{ kafka_group_list.stdout_lines }}" loop_control: label: "{{ item }}" - - - name: Append data to output content - set_fact: - output_content: "{{ output_content }}\n{% for line in consumer_group_output.results[item]['stdout_lines'] %}{{ line | regex_replace('^[\\s\\S]*GROUP[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }}{% endfor %}" - - - name: Check lag status and update output content - set_fact: - output_content: "{{ output_content }}\n{% set lag = consumer_group_output.results[item]['stdout'] | regex_replace('^[\\s\\S]*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %}{% if lag <= 9 %}Kafka lag for group '{{ item }}' is normal (0 to 9){% else %}Kafka lag for group '{{ item }}' is high{% endif %}" - - - name: Save Kafka lag status to file - copy: - content: "{{ output_content }}" - dest: /path/to/output/file.txt - delegate_to: localhost - become: yes + failed_when: "consumer_group_output.stderr and 'Error: Consumer group' in consumer_group_output.stderr" - name: Print Kafka lag status for each group - command: cat /path/to/output/file.txt - register: printed_output - changed_when: false - ignore_errors: true - delegate_to: localhost + debug: + msg: | + Checking Kafka Consumer Group: {{ item.item }} + Consumer group '{{ item.item }}' has no active members. +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +| GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | +| +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + {% for line in item.stdout_lines[1:] %} + {{ line }} + {% endfor %} + + {% set lag = item.stdout | regex_findall('LAG\\s+(\\d+)') | map('int') | list %} + {% set high_lag = lag | select('>=', 10) | list %} + {% if high_lag %} + Kafka lag for group '{{ item.item }}' is high (greater than 9) + {% else %} + Kafka lag for group '{{ item.item }}' is normal (0 to 9) + {% endif %} + ---------------------------------------- + loop: "{{ consumer_group_output.results }}" + loop_control: + label: "{{ item.item }}" + when: not item.failed # Skip printing for failed items - - debug: - msg: "{{ printed_output.stdout }}" From 01d9651c60a3f152d0daca6084089be3ce6cc123 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 22 Mar 2024 13:48:24 +0530 Subject: [PATCH 425/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 45 +++++++++++++++---------------- 1 file changed, 21 insertions(+), 24 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index cb38ea9ef3..8d393c9cb4 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -2,42 +2,39 @@ hosts: ingestion-cluster-kafka gather_facts: no tasks: - - name: Get Kafka consumer group list - command: /opt/kafka/bin/kafka-topics.sh --bootstrap-server localhost:9092 --list - register: kafka_group_list - failed_when: "kafka_group_list.stderr and 'Error: Consumer group' in kafka_group_list.stderr" - - name: Loop through Kafka consumer groups and check lag status command: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe --group "{{ item }}" register: consumer_group_output - loop: "{{ kafka_group_list.stdout_lines }}" + loop: + - "dev-audit-event-generator-group" + - "telemetry-group" + - "prometheus-metrics-consumer" + - "dev-post-publish-processor-group" + - "ml-project-service" + - "dev-audit-history-indexer-group" + - "learning-127.0.1.1" + - "dev-search-indexer-group" + - "outbound" + - "dev-enrolment-reconciliation-group" + - "devsamiksha" + - "dev-relation-cache-updater-group" + - "dev-content-publish-group" + - "dev-qrcode-image-generator-group" loop_control: label: "{{ item }}" - failed_when: "consumer_group_output.stderr and 'Error: Consumer group' in consumer_group_output.stderr" - - name: Print Kafka lag status for each group debug: msg: | - Checking Kafka Consumer Group: {{ item.item }} Consumer group '{{ item.item }}' has no active members. --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -| GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - {% for line in item.stdout_lines[1:] %} - {{ line }} - {% endfor %} - - {% set lag = item.stdout | regex_findall('LAG\\s+(\\d+)') | map('int') | list %} - {% set high_lag = lag | select('>=', 10) | list %} - {% if high_lag %} - Kafka lag for group '{{ item.item }}' is high (greater than 9) - {% else %} + GROUP TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID + {{ item.stdout | regex_replace('^[\\s\\S]*GROUP[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }} + {% set lag = item.stdout | regex_replace('^[\\s\\S]*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %} + {% if lag <= 9 %} Kafka lag for group '{{ item.item }}' is normal (0 to 9) + {% else %} + Kafka lag for group '{{ item.item }}' is high {% endif %} ---------------------------------------- loop: "{{ consumer_group_output.results }}" loop_control: label: "{{ item.item }}" - when: not item.failed # Skip printing for failed items - From e7e5c0b064f74d73d7eb62defbe65b2282a6b977 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 22 Mar 2024 13:49:57 +0530 Subject: [PATCH 426/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 8d393c9cb4..c4e5f9516e 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -26,7 +26,7 @@ debug: msg: | Consumer group '{{ item.item }}' has no active members. - GROUP TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID + // GROUP TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID {{ item.stdout | regex_replace('^[\\s\\S]*GROUP[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }} {% set lag = item.stdout | regex_replace('^[\\s\\S]*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %} {% if lag <= 9 %} From 17b2d6e15ec938487efc4adab84dcffc15d0e2e8 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 22 Mar 2024 13:51:20 +0530 Subject: [PATCH 427/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index c4e5f9516e..db2cb64472 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -26,7 +26,7 @@ debug: msg: | Consumer group '{{ item.item }}' has no active members. - // GROUP TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID + # GROUP TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID {{ item.stdout | regex_replace('^[\\s\\S]*GROUP[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }} {% set lag = item.stdout | regex_replace('^[\\s\\S]*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %} {% if lag <= 9 %} From 217b1ef8def08bd8ff48817890c906e0f87c25ac Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 22 Mar 2024 14:09:32 +0530 Subject: [PATCH 428/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index db2cb64472..07cf0b18c7 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -22,19 +22,16 @@ - "dev-qrcode-image-generator-group" loop_control: label: "{{ item }}" + - name: Print Kafka lag status for each group debug: msg: | - Consumer group '{{ item.item }}' has no active members. - # GROUP TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID - {{ item.stdout | regex_replace('^[\\s\\S]*GROUP[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }} - {% set lag = item.stdout | regex_replace('^[\\s\\S]*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %} + {% set lag = item.stdout | regex_replace('.*LAG\\s+(\\d+)', '\\1') | int %} {% if lag <= 9 %} Kafka lag for group '{{ item.item }}' is normal (0 to 9) {% else %} Kafka lag for group '{{ item.item }}' is high {% endif %} - ---------------------------------------- loop: "{{ consumer_group_output.results }}" loop_control: label: "{{ item.item }}" From 757fd5ff534d5135fea7a5234b0038d946faa720 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 22 Mar 2024 14:14:17 +0530 Subject: [PATCH 429/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 07cf0b18c7..28cd1b6f71 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -32,6 +32,14 @@ {% else %} Kafka lag for group '{{ item.item }}' is high {% endif %} + ---------------------------------------- + {% if lag > 0 %} + GROUP TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID + {{ item.stdout | regex_replace('^[\\s\\S]*GROUP[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }} + {% else %} + No active consumers for this group + {% endif %} + ---------------------------------------- loop: "{{ consumer_group_output.results }}" loop_control: label: "{{ item.item }}" From a00de990dbfa81c212286036fa35aebbe11391f6 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 22 Mar 2024 14:23:40 +0530 Subject: [PATCH 430/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 28cd1b6f71..2268745300 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -33,9 +33,9 @@ Kafka lag for group '{{ item.item }}' is high {% endif %} ---------------------------------------- - {% if lag > 0 %} + {% if 'No active consumers' not in item.stdout %} GROUP TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID - {{ item.stdout | regex_replace('^[\\s\\S]*GROUP[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }} + {{ item.stdout | regex_replace('.*GROUP\\s+(.*?)\\s+TOPIC', '\\1 TOPIC') | regex_replace('(\\n)+', '\\n') }} {% else %} No active consumers for this group {% endif %} From 9c2b323d1ff72d9042e4193dcbe9eedf98676c46 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 22 Mar 2024 14:41:55 +0530 Subject: [PATCH 431/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 2268745300..c447ac058b 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -32,9 +32,7 @@ {% else %} Kafka lag for group '{{ item.item }}' is high {% endif %} - ---------------------------------------- {% if 'No active consumers' not in item.stdout %} - GROUP TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID {{ item.stdout | regex_replace('.*GROUP\\s+(.*?)\\s+TOPIC', '\\1 TOPIC') | regex_replace('(\\n)+', '\\n') }} {% else %} No active consumers for this group From 0a9862f9867c1a6f339ff4eb0a551e9833c34807 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 22 Mar 2024 14:51:47 +0530 Subject: [PATCH 432/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index c447ac058b..857f6d3c29 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -23,21 +23,17 @@ loop_control: label: "{{ item }}" - - name: Print Kafka lag status for each group + - name: Print Kafka lag status and detailed information for each group debug: msg: | - {% set lag = item.stdout | regex_replace('.*LAG\\s+(\\d+)', '\\1') | int %} - {% if lag <= 9 %} - Kafka lag for group '{{ item.item }}' is normal (0 to 9) - {% else %} - Kafka lag for group '{{ item.item }}' is high - {% endif %} {% if 'No active consumers' not in item.stdout %} + Kafka lag for group '{{ item.item }}' is {% set lag = item.stdout | regex_replace('.*LAG\\s+(\\d+)', '\\1') | int %}{% if lag <= 9 %}normal (0 to 9){% else %}high{% endif %} {{ item.stdout | regex_replace('.*GROUP\\s+(.*?)\\s+TOPIC', '\\1 TOPIC') | regex_replace('(\\n)+', '\\n') }} + ---------------------------------------- {% else %} No active consumers for this group - {% endif %} ---------------------------------------- + {% endif %} loop: "{{ consumer_group_output.results }}" loop_control: label: "{{ item.item }}" From 58d5cc1d14216bf98e130b6b8d603e643a17abc9 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 22 Mar 2024 14:54:20 +0530 Subject: [PATCH 433/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 857f6d3c29..47145b5d89 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -23,13 +23,15 @@ loop_control: label: "{{ item }}" - - name: Print Kafka lag status and detailed information for each group + - name: Print Kafka group status and lag status for each group debug: msg: | {% if 'No active consumers' not in item.stdout %} - Kafka lag for group '{{ item.item }}' is {% set lag = item.stdout | regex_replace('.*LAG\\s+(\\d+)', '\\1') | int %}{% if lag <= 9 %}normal (0 to 9){% else %}high{% endif %} + GROUP TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID {{ item.stdout | regex_replace('.*GROUP\\s+(.*?)\\s+TOPIC', '\\1 TOPIC') | regex_replace('(\\n)+', '\\n') }} ---------------------------------------- + Kafka lag for group '{{ item.item }}' is {% set lag = item.stdout | regex_replace('.*LAG\\s+(\\d+)', '\\1') | int %}{% if lag <= 9 %}normal (0 to 9){% else %}high{% endif %} + ---------------------------------------- {% else %} No active consumers for this group ---------------------------------------- From 5a4b493d2e9b4a4119fa87cdeb3f1c632b688d2c Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 22 Mar 2024 15:03:40 +0530 Subject: [PATCH 434/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 47145b5d89..821ac4f5d5 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -27,7 +27,9 @@ debug: msg: | {% if 'No active consumers' not in item.stdout %} + {% if loop.index == 1 %} GROUP TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID + {% endif %} {{ item.stdout | regex_replace('.*GROUP\\s+(.*?)\\s+TOPIC', '\\1 TOPIC') | regex_replace('(\\n)+', '\\n') }} ---------------------------------------- Kafka lag for group '{{ item.item }}' is {% set lag = item.stdout | regex_replace('.*LAG\\s+(\\d+)', '\\1') | int %}{% if lag <= 9 %}normal (0 to 9){% else %}high{% endif %} From 808dbdfe49eb10e2cb5767ad70923a008983ccf2 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 22 Mar 2024 15:19:24 +0530 Subject: [PATCH 435/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 821ac4f5d5..06c01fd3e6 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -21,23 +21,12 @@ - "dev-content-publish-group" - "dev-qrcode-image-generator-group" loop_control: - label: "{{ item }}" + loop_var: item - name: Print Kafka group status and lag status for each group debug: msg: | - {% if 'No active consumers' not in item.stdout %} - {% if loop.index == 1 %} - GROUP TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID - {% endif %} {{ item.stdout | regex_replace('.*GROUP\\s+(.*?)\\s+TOPIC', '\\1 TOPIC') | regex_replace('(\\n)+', '\\n') }} - ---------------------------------------- Kafka lag for group '{{ item.item }}' is {% set lag = item.stdout | regex_replace('.*LAG\\s+(\\d+)', '\\1') | int %}{% if lag <= 9 %}normal (0 to 9){% else %}high{% endif %} ---------------------------------------- - {% else %} - No active consumers for this group - ---------------------------------------- - {% endif %} loop: "{{ consumer_group_output.results }}" - loop_control: - label: "{{ item.item }}" From 20d8f285b69a997a3145920888ffcd7302966152 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 22 Mar 2024 15:23:09 +0530 Subject: [PATCH 436/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 06c01fd3e6..6a96e4e05a 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -23,10 +23,9 @@ loop_control: loop_var: item - - name: Print Kafka group status and lag status for each group + - name: Print Kafka lag status for each group debug: msg: | - {{ item.stdout | regex_replace('.*GROUP\\s+(.*?)\\s+TOPIC', '\\1 TOPIC') | regex_replace('(\\n)+', '\\n') }} Kafka lag for group '{{ item.item }}' is {% set lag = item.stdout | regex_replace('.*LAG\\s+(\\d+)', '\\1') | int %}{% if lag <= 9 %}normal (0 to 9){% else %}high{% endif %} ---------------------------------------- loop: "{{ consumer_group_output.results }}" From 61ac99ce9d9352787c23c58b4d9a784b239687c0 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 22 Mar 2024 15:27:07 +0530 Subject: [PATCH 437/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 6a96e4e05a..20a301b695 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -23,9 +23,15 @@ loop_control: loop_var: item - - name: Print Kafka lag status for each group + - name: Convert lag status into table format + set_fact: + lag_table: | + {% for result in consumer_group_output.results %} + {% set lag = result.stdout | regex_replace('.*LAG\\s+(\\d+)', '\\1') | int %} + {% set lag_status = "normal (0 to 9)" if lag <= 9 else "high" %} + {{ result.item + "," + lag_status }} + {% endfor %} + + - name: Print Kafka lag status table debug: - msg: | - Kafka lag for group '{{ item.item }}' is {% set lag = item.stdout | regex_replace('.*LAG\\s+(\\d+)', '\\1') | int %}{% if lag <= 9 %}normal (0 to 9){% else %}high{% endif %} - ---------------------------------------- - loop: "{{ consumer_group_output.results }}" + msg: "{{ lag_table | from_yaml }}" From ea40d02c4553fb096cc502d3f1bb33aaf2571dfe Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Fri, 22 Mar 2024 15:31:11 +0530 Subject: [PATCH 438/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 20a301b695..8d393c9cb4 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -21,17 +21,20 @@ - "dev-content-publish-group" - "dev-qrcode-image-generator-group" loop_control: - loop_var: item - - - name: Convert lag status into table format - set_fact: - lag_table: | - {% for result in consumer_group_output.results %} - {% set lag = result.stdout | regex_replace('.*LAG\\s+(\\d+)', '\\1') | int %} - {% set lag_status = "normal (0 to 9)" if lag <= 9 else "high" %} - {{ result.item + "," + lag_status }} - {% endfor %} - - - name: Print Kafka lag status table + label: "{{ item }}" + - name: Print Kafka lag status for each group debug: - msg: "{{ lag_table | from_yaml }}" + msg: | + Consumer group '{{ item.item }}' has no active members. + GROUP TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID + {{ item.stdout | regex_replace('^[\\s\\S]*GROUP[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }} + {% set lag = item.stdout | regex_replace('^[\\s\\S]*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %} + {% if lag <= 9 %} + Kafka lag for group '{{ item.item }}' is normal (0 to 9) + {% else %} + Kafka lag for group '{{ item.item }}' is high + {% endif %} + ---------------------------------------- + loop: "{{ consumer_group_output.results }}" + loop_control: + label: "{{ item.item }}" From 16893b0f4150d27bcbf8325d32745822d92f8906 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 06:59:14 +0530 Subject: [PATCH 439/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 8d393c9cb4..9e92bf1039 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -1,7 +1,12 @@ +--- - name: Display Kafka consumer group status hosts: ingestion-cluster-kafka gather_facts: no tasks: + - name: Initialize processed groups list + set_fact: + processed_groups: [] + - name: Loop through Kafka consumer groups and check lag status command: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe --group "{{ item }}" register: consumer_group_output @@ -22,19 +27,33 @@ - "dev-qrcode-image-generator-group" loop_control: label: "{{ item }}" - - name: Print Kafka lag status for each group + when: "'{{ item }}' not in processed_groups" + + - name: Generate formatted table for Kafka lag status debug: msg: | Consumer group '{{ item.item }}' has no active members. - GROUP TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID + +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | + +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ {{ item.stdout | regex_replace('^[\\s\\S]*GROUP[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }} + +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ {% set lag = item.stdout | regex_replace('^[\\s\\S]*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %} {% if lag <= 9 %} Kafka lag for group '{{ item.item }}' is normal (0 to 9) {% else %} Kafka lag for group '{{ item.item }}' is high {% endif %} - ---------------------------------------- + +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ loop: "{{ consumer_group_output.results }}" loop_control: label: "{{ item.item }}" + when: "'{{ item.item }}' not in processed_groups" + + - name: Add processed group to the list + set_fact: + processed_groups: "{{ processed_groups + [item.item] }}" + loop: "{{ consumer_group_output.results }}" + loop_control: + label: "{{ item.item }}" + when: "'{{ item.item }}' not in processed_groups" From f9c81a8ebfc57ad64f1efd6b0822b9a974d6c4c7 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 07:10:24 +0530 Subject: [PATCH 440/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 27 ++++++++------------------- 1 file changed, 8 insertions(+), 19 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 9e92bf1039..08f3530c67 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -1,12 +1,7 @@ ---- - name: Display Kafka consumer group status hosts: ingestion-cluster-kafka gather_facts: no tasks: - - name: Initialize processed groups list - set_fact: - processed_groups: [] - - name: Loop through Kafka consumer groups and check lag status command: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe --group "{{ item }}" register: consumer_group_output @@ -27,33 +22,27 @@ - "dev-qrcode-image-generator-group" loop_control: label: "{{ item }}" - when: "'{{ item }}' not in processed_groups" - - name: Generate formatted table for Kafka lag status debug: msg: | + {% set lag_output = [] %} + {% for item in consumer_group_output.results %} + {% if "no active members" in item.stdout %} Consumer group '{{ item.item }}' has no active members. + {% else %} +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - {{ item.stdout | regex_replace('^[\\s\\S]*GROUP[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }} - +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - {% set lag = item.stdout | regex_replace('^[\\s\\S]*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %} + {{ item.stdout | regex_replace('.*GROUP[^\\n]*\\n([\\s\\S]*)LAG', '\\1') | regex_replace('^\\n|\\n$', '') | regex_replace('(\\s{2,})', ' | ') | regex_replace('(\\s{2,})', ' ') | trim }} + {% set lag = item.stdout | regex_replace('.*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %} {% if lag <= 9 %} Kafka lag for group '{{ item.item }}' is normal (0 to 9) {% else %} Kafka lag for group '{{ item.item }}' is high {% endif %} +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + {% endif %} + {% endfor %} loop: "{{ consumer_group_output.results }}" loop_control: label: "{{ item.item }}" - when: "'{{ item.item }}' not in processed_groups" - - - name: Add processed group to the list - set_fact: - processed_groups: "{{ processed_groups + [item.item] }}" - loop: "{{ consumer_group_output.results }}" - loop_control: - label: "{{ item.item }}" - when: "'{{ item.item }}' not in processed_groups" From 9a1bd2711c5fdeb3d7e2c51977601c03a27f6c39 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 07:11:49 +0530 Subject: [PATCH 441/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 08f3530c67..95874568a0 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -33,7 +33,7 @@ +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - {{ item.stdout | regex_replace('.*GROUP[^\\n]*\\n([\\s\\S]*)LAG', '\\1') | regex_replace('^\\n|\\n$', '') | regex_replace('(\\s{2,})', ' | ') | regex_replace('(\\s{2,})', ' ') | trim }} + {{ item.stdout | regex_replace('.*GROUP[^\\n]*\\n([\\s\\S]*)LAG.*', '\\1') | regex_replace('^[\\s\\S]*GROUP[^\\n]*\\n[\\s\\S]*PARTITION.*\\n', '') | regex_replace('^\\n|\\n$', '') | regex_replace('(\\s{2,})', ' | ') | regex_replace('(\\s{2,})', ' ') | trim }} {% set lag = item.stdout | regex_replace('.*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %} {% if lag <= 9 %} Kafka lag for group '{{ item.item }}' is normal (0 to 9) From 8f8a8d17d1ee4726fb51e100337a202c60d961d7 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 07:27:59 +0530 Subject: [PATCH 442/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 95874568a0..d5ee2c4dab 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -22,27 +22,20 @@ - "dev-qrcode-image-generator-group" loop_control: label: "{{ item }}" - - name: Generate formatted table for Kafka lag status + - name: Print Kafka lag status for each group debug: msg: | - {% set lag_output = [] %} - {% for item in consumer_group_output.results %} - {% if "no active members" in item.stdout %} - Consumer group '{{ item.item }}' has no active members. - {% else %} - +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +-----------------------------------------------------------------------------------------------------------------------------------------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | - +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - {{ item.stdout | regex_replace('.*GROUP[^\\n]*\\n([\\s\\S]*)LAG.*', '\\1') | regex_replace('^[\\s\\S]*GROUP[^\\n]*\\n[\\s\\S]*PARTITION.*\\n', '') | regex_replace('^\\n|\\n$', '') | regex_replace('(\\s{2,})', ' | ') | regex_replace('(\\s{2,})', ' ') | trim }} - {% set lag = item.stdout | regex_replace('.*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %} + +-----------------------------------------------------------------------------------------------------------------------------------------------------+ + {{ item.stdout | regex_replace('^[\\s\\S]*GROUP[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }} + {% set lag = item.stdout | regex_replace('^[\\s\\S]*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %} {% if lag <= 9 %} Kafka lag for group '{{ item.item }}' is normal (0 to 9) {% else %} Kafka lag for group '{{ item.item }}' is high {% endif %} - +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - {% endif %} - {% endfor %} + +-----------------------------------------------------------------------------------------------------------------------------------------------------+ loop: "{{ consumer_group_output.results }}" loop_control: label: "{{ item.item }}" From c96149e91272e99fe52425e9bf7cc130a9b51dff Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 07:40:07 +0530 Subject: [PATCH 443/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index d5ee2c4dab..2a4a6f14a3 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -25,17 +25,8 @@ - name: Print Kafka lag status for each group debug: msg: | - +-----------------------------------------------------------------------------------------------------------------------------------------------------+ + +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | - +-----------------------------------------------------------------------------------------------------------------------------------------------------+ - {{ item.stdout | regex_replace('^[\\s\\S]*GROUP[^\\n]*\\n([\\s\\S]*)$', '\\1') | regex_replace('(\\n)+', '\\n') | regex_replace('^\\n|\\n$', '') }} - {% set lag = item.stdout | regex_replace('^[\\s\\S]*LAG[^0-9]*([0-9]+)[\\s\\S]*$', '\\1') | int %} - {% if lag <= 9 %} - Kafka lag for group '{{ item.item }}' is normal (0 to 9) - {% else %} - Kafka lag for group '{{ item.item }}' is high - {% endif %} - +-----------------------------------------------------------------------------------------------------------------------------------------------------+ - loop: "{{ consumer_group_output.results }}" - loop_control: - label: "{{ item.item }}" + +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + {{ consumer_group_output.stdout | regex_replace('^\\s*GROUP', 'GROUP') }} + +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ From bfb66171f1edfe4eab067139e443cf7e07b0b710 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 07:43:11 +0530 Subject: [PATCH 444/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 2a4a6f14a3..65b070ff54 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -28,5 +28,12 @@ +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - {{ consumer_group_output.stdout | regex_replace('^\\s*GROUP', 'GROUP') }} + {%- if consumer_group_output is defined and consumer_group_output.stdout is defined %} + {% for line in consumer_group_output.stdout_lines[2:] %} + {%- set values = line.split() %} + | {{ values[0] | regex_replace('^\\s*GROUP', 'GROUP') }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | + {% endfor %} +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + {%- else %} + Error: Kafka consumer group output not available or invalid. + {%- endif %} From c0a321235bf06aa559f9332749d55411eabd326f Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 07:51:03 +0530 Subject: [PATCH 445/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 65b070ff54..71e635c92c 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -25,15 +25,12 @@ - name: Print Kafka lag status for each group debug: msg: | - +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | - +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - {%- if consumer_group_output is defined and consumer_group_output.stdout is defined %} - {% for line in consumer_group_output.stdout_lines[2:] %} + +------------------------------------------------------------------------------------------------------------------------+ + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | + +------------------------------------------------------------------------------------------------------------------------+ + {% set output_lines = consumer_group_output.results[0].stdout_lines %} + {%- for line in output_lines[2:] %} {%- set values = line.split() %} | {{ values[0] | regex_replace('^\\s*GROUP', 'GROUP') }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | {% endfor %} - +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - {%- else %} - Error: Kafka consumer group output not available or invalid. - {%- endif %} + +------------------------------------------------------------------------------------------------------------------------+ From 4fb0da4721147161ec5dd423990a872d9323a2bd Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 07:56:50 +0530 Subject: [PATCH 446/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 71e635c92c..5cfae89829 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -26,7 +26,7 @@ debug: msg: | +------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | +------------------------------------------------------------------------------------------------------------------------+ {% set output_lines = consumer_group_output.results[0].stdout_lines %} {%- for line in output_lines[2:] %} From 0efaac45a05aadb4d87bc31773ab4ef8c70f1dc3 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 12:25:39 +0530 Subject: [PATCH 447/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 5cfae89829..27edcbf8c8 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -26,11 +26,13 @@ debug: msg: | +------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | +------------------------------------------------------------------------------------------------------------------------+ - {% set output_lines = consumer_group_output.results[0].stdout_lines %} + {% for result in consumer_group_output.results %} + {% set output_lines = result.stdout_lines %} {%- for line in output_lines[2:] %} {%- set values = line.split() %} | {{ values[0] | regex_replace('^\\s*GROUP', 'GROUP') }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | {% endfor %} +------------------------------------------------------------------------------------------------------------------------+ + {% endfor %} From e5e2dbe10ceea491ce717af5b0e35da0ecce4720 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 12:29:29 +0530 Subject: [PATCH 448/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 27edcbf8c8..549b867ec5 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -26,7 +26,7 @@ debug: msg: | +------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | +------------------------------------------------------------------------------------------------------------------------+ {% for result in consumer_group_output.results %} {% set output_lines = result.stdout_lines %} From f91a7c9807f3ab62d8b437a8588d31810bb281b3 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 12:35:42 +0530 Subject: [PATCH 449/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 549b867ec5..0de2c08329 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -28,11 +28,14 @@ +------------------------------------------------------------------------------------------------------------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | +------------------------------------------------------------------------------------------------------------------------+ - {% for result in consumer_group_output.results %} - {% set output_lines = result.stdout_lines %} - {%- for line in output_lines[2:] %} - {%- set values = line.split() %} - | {{ values[0] | regex_replace('^\\s*GROUP', 'GROUP') }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | + {%- for result in consumer_group_output.results %} + {%- set output_lines = result.stdout_lines %} + {%- set group_name = result.item %} + +------------------------------------------------------------------------------------------------------------------------+ + | {{ group_name }} | + +------------------------------------------------------------------------------------------------------------------------+ + {% for line in output_lines[2:] %} + | {{ line.split()[0] }} | {{ line.split()[1] }} | {{ line.split()[2] }} | {{ line.split()[3] }} | {{ line.split()[4] }} | {{ line.split()[5] }} | {{ line.split()[6] }} | {{ line.split()[7] }} | {{ line.split()[8] }} | {% endfor %} +------------------------------------------------------------------------------------------------------------------------+ {% endfor %} From f88dcdbe57e61277e7c9676d665b295018710520 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 12:42:49 +0530 Subject: [PATCH 450/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 0de2c08329..3db7d47f79 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -26,16 +26,16 @@ debug: msg: | +------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | +------------------------------------------------------------------------------------------------------------------------+ {%- for result in consumer_group_output.results %} - {%- set output_lines = result.stdout_lines %} - {%- set group_name = result.item %} + {% set output_lines = result.stdout_lines %} + {% if output_lines | length > 2 %} +------------------------------------------------------------------------------------------------------------------------+ - | {{ group_name }} | - +------------------------------------------------------------------------------------------------------------------------+ - {% for line in output_lines[2:] %} + {%- for line in output_lines[2:] %} | {{ line.split()[0] }} | {{ line.split()[1] }} | {{ line.split()[2] }} | {{ line.split()[3] }} | {{ line.split()[4] }} | {{ line.split()[5] }} | {{ line.split()[6] }} | {{ line.split()[7] }} | {{ line.split()[8] }} | - {% endfor %} + {%- endfor %} +------------------------------------------------------------------------------------------------------------------------+ + {% endif %} {% endfor %} + +------------------------------------------------------------------------------------------------------------------------+ From dd249fa5b28bae47fda2fa08e15454d98e06690d Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 12:52:47 +0530 Subject: [PATCH 451/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 3db7d47f79..0448f7f3f5 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -26,16 +26,11 @@ debug: msg: | +------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | - +------------------------------------------------------------------------------------------------------------------------+ - {%- for result in consumer_group_output.results %} - {% set output_lines = result.stdout_lines %} - {% if output_lines | length > 2 %} + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | +------------------------------------------------------------------------------------------------------------------------+ + {% set output_lines = item.stdout_lines %} {%- for line in output_lines[2:] %} - | {{ line.split()[0] }} | {{ line.split()[1] }} | {{ line.split()[2] }} | {{ line.split()[3] }} | {{ line.split()[4] }} | {{ line.split()[5] }} | {{ line.split()[6] }} | {{ line.split()[7] }} | {{ line.split()[8] }} | - {%- endfor %} - +------------------------------------------------------------------------------------------------------------------------+ - {% endif %} + {%- set values = line.split() %} + | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | {% endfor %} +------------------------------------------------------------------------------------------------------------------------+ From ee48a6acb6a76a9ec66f8934b937153056982b3b Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 12:54:26 +0530 Subject: [PATCH 452/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 0448f7f3f5..4a66e209f3 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -22,14 +22,14 @@ - "dev-qrcode-image-generator-group" loop_control: label: "{{ item }}" + loop_var: group_name # Define the loop variable - name: Print Kafka lag status for each group debug: msg: | +------------------------------------------------------------------------------------------------------------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | +------------------------------------------------------------------------------------------------------------------------+ - {% set output_lines = item.stdout_lines %} - {%- for line in output_lines[2:] %} + {%- for line in consumer_group_output.results[0].stdout_lines[2:] %} {%- set values = line.split() %} | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | {% endfor %} From a32283a656196307aadb9da06f4720b00ebe94ff Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 12:56:12 +0530 Subject: [PATCH 453/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 4a66e209f3..d81a30ffd4 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -22,15 +22,18 @@ - "dev-qrcode-image-generator-group" loop_control: label: "{{ item }}" - loop_var: group_name # Define the loop variable + register: consumer_group_results - name: Print Kafka lag status for each group debug: msg: | +------------------------------------------------------------------------------------------------------------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | +------------------------------------------------------------------------------------------------------------------------+ - {%- for line in consumer_group_output.results[0].stdout_lines[2:] %} + {% for result in consumer_group_results.results %} + {% set output_lines = result.stdout_lines %} + {%- for line in output_lines[2:] %} {%- set values = line.split() %} | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | {% endfor %} +------------------------------------------------------------------------------------------------------------------------+ + {% endfor %} From 1582dd10942d16618d5fcbf3c69c68ef3ceb4120 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 13:03:07 +0530 Subject: [PATCH 454/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index d81a30ffd4..46e549e59a 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -26,14 +26,14 @@ - name: Print Kafka lag status for each group debug: msg: | - +------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | - +------------------------------------------------------------------------------------------------------------------------+ + +----------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | + +----------------------------------------------------------------------------------------------------------------------------------------------------------------+ {% for result in consumer_group_results.results %} {% set output_lines = result.stdout_lines %} {%- for line in output_lines[2:] %} {%- set values = line.split() %} | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | {% endfor %} - +------------------------------------------------------------------------------------------------------------------------+ {% endfor %} + +----------------------------------------------------------------------------------------------------------------------------------------------------------------+ From 9d2127003c169bb7538d0932db5243d8d6743671 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 13:12:20 +0530 Subject: [PATCH 455/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 46e549e59a..b9462d80e0 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -1,4 +1,4 @@ -- name: Display Kafka consumer group status + - name: Display Kafka consumer group status hosts: ingestion-cluster-kafka gather_facts: no tasks: @@ -26,14 +26,15 @@ - name: Print Kafka lag status for each group debug: msg: | - +----------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | - +----------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +------------------------------------------------------------------------------------------------------------------------+ + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | + +------------------------------------------------------------------------------------------------------------------------+ {% for result in consumer_group_results.results %} - {% set output_lines = result.stdout_lines %} + {%- set output_lines = result.stdout_lines %} {%- for line in output_lines[2:] %} - {%- set values = line.split() %} - | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | + +------------------------------------------------------------------------------------------------------------------------+ + | {{ line.split()[0] }} | {{ line.split()[1] }} | {{ line.split()[2] }} | {{ line.split()[3] }} | {{ line.split()[4] }} | {{ line.split()[5] }} | {{ line.split()[6] }} | {{ line.split()[7] }} | {{ line.split()[8] }} | + +------------------------------------------------------------------------------------------------------------------------+ {% endfor %} {% endfor %} - +----------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +------------------------------------------------------------------------------------------------------------------------+ From 928ea43788014365eb0b07ea463ba96e05ff3cfe Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 13:14:55 +0530 Subject: [PATCH 456/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index b9462d80e0..7155c133d2 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -1,4 +1,5 @@ - - name: Display Kafka consumer group status +--- +- name: Display Kafka consumer group status hosts: ingestion-cluster-kafka gather_facts: no tasks: @@ -22,19 +23,15 @@ - "dev-qrcode-image-generator-group" loop_control: label: "{{ item }}" - register: consumer_group_results - name: Print Kafka lag status for each group debug: msg: | +------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | +------------------------------------------------------------------------------------------------------------------------+ - {% for result in consumer_group_results.results %} - {%- set output_lines = result.stdout_lines %} + {% set output_lines = item.stdout_lines %} {%- for line in output_lines[2:] %} - +------------------------------------------------------------------------------------------------------------------------+ - | {{ line.split()[0] }} | {{ line.split()[1] }} | {{ line.split()[2] }} | {{ line.split()[3] }} | {{ line.split()[4] }} | {{ line.split()[5] }} | {{ line.split()[6] }} | {{ line.split()[7] }} | {{ line.split()[8] }} | - +------------------------------------------------------------------------------------------------------------------------+ - {% endfor %} + {%- set values = line.split() %} + | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | | {% endfor %} +------------------------------------------------------------------------------------------------------------------------+ From d78b8a47e01863a991180a1a828c38757758521b Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 14:15:21 +0530 Subject: [PATCH 457/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 7155c133d2..9b46525a75 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -27,11 +27,11 @@ debug: msg: | +------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | +------------------------------------------------------------------------------------------------------------------------+ - {% set output_lines = item.stdout_lines %} + {% set output_lines = consumer_group_output.stdout_lines %} {%- for line in output_lines[2:] %} {%- set values = line.split() %} - | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | | + | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | {% endfor %} +------------------------------------------------------------------------------------------------------------------------+ From 6a45be0a1dccec354e9d98c0269a3a92b46c911d Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 14:21:09 +0530 Subject: [PATCH 458/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 9b46525a75..3fbaa314c8 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -29,9 +29,11 @@ +------------------------------------------------------------------------------------------------------------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | +------------------------------------------------------------------------------------------------------------------------+ - {% set output_lines = consumer_group_output.stdout_lines %} + {% for result in consumer_group_output.results %} + {% set output_lines = result.stdout_lines %} {%- for line in output_lines[2:] %} {%- set values = line.split() %} | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | {% endfor %} +------------------------------------------------------------------------------------------------------------------------+ + {% endfor %} From 6772895773234446c2f5434e0465f8f45b880354 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 14:25:39 +0530 Subject: [PATCH 459/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 3fbaa314c8..828e2ac3c9 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -27,7 +27,7 @@ debug: msg: | +------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | +------------------------------------------------------------------------------------------------------------------------+ {% for result in consumer_group_output.results %} {% set output_lines = result.stdout_lines %} From fb4f16f194ae21e94b15b65be689c57affe5675b Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 14:35:36 +0530 Subject: [PATCH 460/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 828e2ac3c9..38a5254512 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -1,4 +1,3 @@ ---- - name: Display Kafka consumer group status hosts: ingestion-cluster-kafka gather_facts: no @@ -10,17 +9,6 @@ - "dev-audit-event-generator-group" - "telemetry-group" - "prometheus-metrics-consumer" - - "dev-post-publish-processor-group" - - "ml-project-service" - - "dev-audit-history-indexer-group" - - "learning-127.0.1.1" - - "dev-search-indexer-group" - - "outbound" - - "dev-enrolment-reconciliation-group" - - "devsamiksha" - - "dev-relation-cache-updater-group" - - "dev-content-publish-group" - - "dev-qrcode-image-generator-group" loop_control: label: "{{ item }}" - name: Print Kafka lag status for each group @@ -29,11 +17,9 @@ +------------------------------------------------------------------------------------------------------------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | +------------------------------------------------------------------------------------------------------------------------+ - {% for result in consumer_group_output.results %} - {% set output_lines = result.stdout_lines %} + {% set output_lines = consumer_group_output.results[0].stdout_lines %} {%- for line in output_lines[2:] %} {%- set values = line.split() %} | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | {% endfor %} +------------------------------------------------------------------------------------------------------------------------+ - {% endfor %} From aa0094ca53bd8c830e028e366c375964209fb622 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 14:38:18 +0530 Subject: [PATCH 461/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 38a5254512..eb3ea7750c 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -1,3 +1,4 @@ +--- - name: Display Kafka consumer group status hosts: ingestion-cluster-kafka gather_facts: no @@ -9,17 +10,20 @@ - "dev-audit-event-generator-group" - "telemetry-group" - "prometheus-metrics-consumer" + # Add other consumer groups here as needed loop_control: label: "{{ item }}" - name: Print Kafka lag status for each group debug: msg: | +------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | +------------------------------------------------------------------------------------------------------------------------+ - {% set output_lines = consumer_group_output.results[0].stdout_lines %} + {% for result in consumer_group_output.results %} + {% set output_lines = result.stdout_lines %} {%- for line in output_lines[2:] %} {%- set values = line.split() %} | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | {% endfor %} +------------------------------------------------------------------------------------------------------------------------+ + {% endfor %} From 8d66517a7d9aeec03524ac6aa18fa7c3a8021613 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 14:46:59 +0530 Subject: [PATCH 462/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index eb3ea7750c..850aef55fb 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -10,7 +10,6 @@ - "dev-audit-event-generator-group" - "telemetry-group" - "prometheus-metrics-consumer" - # Add other consumer groups here as needed loop_control: label: "{{ item }}" - name: Print Kafka lag status for each group @@ -19,11 +18,11 @@ +------------------------------------------------------------------------------------------------------------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | +------------------------------------------------------------------------------------------------------------------------+ - {% for result in consumer_group_output.results %} + {%- for result in consumer_group_output.results %} {% set output_lines = result.stdout_lines %} {%- for line in output_lines[2:] %} {%- set values = line.split() %} | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | - {% endfor %} +------------------------------------------------------------------------------------------------------------------------+ {% endfor %} + {% endfor %} From 598e2775c88db732c19b4d3e2a9c92aec17627ee Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 14:52:26 +0530 Subject: [PATCH 463/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 850aef55fb..5cb69c5acc 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -10,19 +10,37 @@ - "dev-audit-event-generator-group" - "telemetry-group" - "prometheus-metrics-consumer" + - "dev-post-publish-processor-group" + - "ml-project-service" + - "dev-audit-history-indexer-group" + - "learning-127.0.1.1" + - "dev-search-indexer-group" + - "outbound" + - "dev-enrolment-reconciliation-group" + - "devsamiksha" + - "dev-relation-cache-updater-group" + - "dev-content-publish-group" + - "dev-qrcode-image-generator-group" loop_control: label: "{{ item }}" - name: Print Kafka lag status for each group debug: msg: | +------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | +------------------------------------------------------------------------------------------------------------------------+ {%- for result in consumer_group_output.results %} - {% set output_lines = result.stdout_lines %} + {%- set output_lines = result.stdout_lines %} + {%- if output_lines is not none and output_lines | length > 2 %} + {%- set header_printed = false %} {%- for line in output_lines[2:] %} {%- set values = line.split() %} + {%- if not header_printed %} + | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | + {%- set header_printed = true %} + {%- endif %} | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | +------------------------------------------------------------------------------------------------------------------------+ - {% endfor %} + {%- endfor %} + {%- endif %} {% endfor %} From 619e9f2d4e741a3011afac0b70b91fa989040d2f Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 14:58:27 +0530 Subject: [PATCH 464/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 5cb69c5acc..843551e268 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -29,18 +29,16 @@ +------------------------------------------------------------------------------------------------------------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | +------------------------------------------------------------------------------------------------------------------------+ - {%- for result in consumer_group_output.results %} - {%- set output_lines = result.stdout_lines %} - {%- if output_lines is not none and output_lines | length > 2 %} - {%- set header_printed = false %} + {% for result in consumer_group_output.results %} + {% set output_lines = result.stdout_lines %} {%- for line in output_lines[2:] %} {%- set values = line.split() %} - {%- if not header_printed %} | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | - {%- set header_printed = true %} - {%- endif %} - | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | - +------------------------------------------------------------------------------------------------------------------------+ {%- endfor %} - {%- endif %} + +------------------------------------------------------------------------------------------------------------------------+ + + # Add a blank line after each group's output + {% if not loop.last %} + | + {% endif %} {% endfor %} From 76ae3af2b2729591e3483eb6333de291f7d8d86c Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 15:06:02 +0530 Subject: [PATCH 465/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 843551e268..498d730819 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -37,7 +37,9 @@ {%- endfor %} +------------------------------------------------------------------------------------------------------------------------+ - # Add a blank line after each group's output + +------------------------------------------------------------------------------------------------------------------------+ + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | + +------------------------------------------------------------------------------------------------------------------------+ {% if not loop.last %} | {% endif %} From 02bb0fdd003876468968c7be79dd60aeb0ed6ab6 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 15:15:39 +0530 Subject: [PATCH 466/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 498d730819..1888b1ba00 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -36,11 +36,7 @@ | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | {%- endfor %} +------------------------------------------------------------------------------------------------------------------------+ - - +------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | - +------------------------------------------------------------------------------------------------------------------------+ - {% if not loop.last %} + {%- if not loop.last %} | - {% endif %} - {% endfor %} + {%- endif %} + {%- endfor %} From b05e90d90034f5811e75118afeb3c9ff2f091107 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 15:25:20 +0530 Subject: [PATCH 467/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 1888b1ba00..1b28eb0a35 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -38,5 +38,8 @@ +------------------------------------------------------------------------------------------------------------------------+ {%- if not loop.last %} | + +------------------------------------------------------------------------------------------------------------------------+ + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | + +------------------------------------------------------------------------------------------------------------------------+ {%- endif %} - {%- endfor %} + {% endfor %} From 6db5a6e61791e57118792fb59d97e1627fe4b549 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 15:36:18 +0530 Subject: [PATCH 468/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 1b28eb0a35..604d7f1f7a 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -26,16 +26,19 @@ - name: Print Kafka lag status for each group debug: msg: | + {%- set separator = "+------------------------------------------------------------------------------------------------------------------------+" %} +------------------------------------------------------------------------------------------------------------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | +------------------------------------------------------------------------------------------------------------------------+ {% for result in consumer_group_output.results %} {% set output_lines = result.stdout_lines %} - {%- for line in output_lines[2:] %} - {%- set values = line.split() %} + {% for line in output_lines[2:] %} + {% set values = line.split() %} | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | + {%- if not loop.last %} + |{% endif %} {%- endfor %} - +------------------------------------------------------------------------------------------------------------------------+ + {{ separator[:values | map('length') | max + 1] }} {%- if not loop.last %} | +------------------------------------------------------------------------------------------------------------------------+ From 51837d2474df859c7ac5746c54f34e2c6175673a Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 15:50:06 +0530 Subject: [PATCH 469/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 604d7f1f7a..776e3bd730 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -26,19 +26,16 @@ - name: Print Kafka lag status for each group debug: msg: | - {%- set separator = "+------------------------------------------------------------------------------------------------------------------------+" %} +------------------------------------------------------------------------------------------------------------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | +------------------------------------------------------------------------------------------------------------------------+ {% for result in consumer_group_output.results %} {% set output_lines = result.stdout_lines %} - {% for line in output_lines[2:] %} - {% set values = line.split() %} + {%- for line in output_lines[2:] %} + {%- set values = line.split() %} | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | - {%- if not loop.last %} - |{% endif %} {%- endfor %} - {{ separator[:values | map('length') | max + 1] }} + +----------------------------------------------------------------------+ {%- if not loop.last %} | +------------------------------------------------------------------------------------------------------------------------+ From 2a287ff2e4800c10f45557a41f38e9e27dd1610a Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 15:54:53 +0530 Subject: [PATCH 470/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 776e3bd730..68d8188363 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -1,4 +1,4 @@ ---- +-- - name: Display Kafka consumer group status hosts: ingestion-cluster-kafka gather_facts: no @@ -27,19 +27,13 @@ debug: msg: | +------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | +------------------------------------------------------------------------------------------------------------------------+ {% for result in consumer_group_output.results %} {% set output_lines = result.stdout_lines %} {%- for line in output_lines[2:] %} {%- set values = line.split() %} | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | - {%- endfor %} - +----------------------------------------------------------------------+ - {%- if not loop.last %} - | - +------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | + {% endfor %} +------------------------------------------------------------------------------------------------------------------------+ - {%- endif %} {% endfor %} From c1cf02fa537ccd71ac62a83d73502d068c2ee56c Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 16:01:34 +0530 Subject: [PATCH 471/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 68d8188363..1b28eb0a35 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -1,4 +1,4 @@ --- +--- - name: Display Kafka consumer group status hosts: ingestion-cluster-kafka gather_facts: no @@ -27,13 +27,19 @@ debug: msg: | +------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | +------------------------------------------------------------------------------------------------------------------------+ {% for result in consumer_group_output.results %} {% set output_lines = result.stdout_lines %} {%- for line in output_lines[2:] %} {%- set values = line.split() %} | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | - {% endfor %} + {%- endfor %} + +------------------------------------------------------------------------------------------------------------------------+ + {%- if not loop.last %} + | + +------------------------------------------------------------------------------------------------------------------------+ + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | +------------------------------------------------------------------------------------------------------------------------+ + {%- endif %} {% endfor %} From 8ea0b8ebd032e6b0a4fc746db78471401b4b76db Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sat, 23 Mar 2024 23:57:52 +0530 Subject: [PATCH 472/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 1b28eb0a35..24082d421a 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -26,20 +26,20 @@ - name: Print Kafka lag status for each group debug: msg: | - +------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | - +------------------------------------------------------------------------------------------------------------------------+ + +-----------------------------------------------------------------------------------------------------------------------------+ + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | + +-----------------------------------------------------------------------------------------------------------------------------+ {% for result in consumer_group_output.results %} {% set output_lines = result.stdout_lines %} {%- for line in output_lines[2:] %} {%- set values = line.split() %} - | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | + | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {%- endfor %} - +------------------------------------------------------------------------------------------------------------------------+ + +-----------------------------------------------------------------------------------------------------------------------------+ {%- if not loop.last %} | - +------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | - +------------------------------------------------------------------------------------------------------------------------+ + +-----------------------------------------------------------------------------------------------------------------------------+ + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | + +-----------------------------------------------------------------------------------------------------------------------------+ {%- endif %} {% endfor %} From 3828c3cb4ae584d808fa41261e385c5cb07572f1 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Sun, 24 Mar 2024 00:00:09 +0530 Subject: [PATCH 473/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 24082d421a..1b28eb0a35 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -26,20 +26,20 @@ - name: Print Kafka lag status for each group debug: msg: | - +-----------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | - +-----------------------------------------------------------------------------------------------------------------------------+ + +------------------------------------------------------------------------------------------------------------------------+ + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | + +------------------------------------------------------------------------------------------------------------------------+ {% for result in consumer_group_output.results %} {% set output_lines = result.stdout_lines %} {%- for line in output_lines[2:] %} {%- set values = line.split() %} - | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | + | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | {%- endfor %} - +-----------------------------------------------------------------------------------------------------------------------------+ + +------------------------------------------------------------------------------------------------------------------------+ {%- if not loop.last %} | - +-----------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | - +-----------------------------------------------------------------------------------------------------------------------------+ + +------------------------------------------------------------------------------------------------------------------------+ + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | + +------------------------------------------------------------------------------------------------------------------------+ {%- endif %} {% endfor %} From 410a08fb24f9da47fc34793a7be2ef944831d083 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 26 Mar 2024 11:40:06 +0530 Subject: [PATCH 474/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 1b28eb0a35..2d310c41ce 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -39,7 +39,10 @@ {%- if not loop.last %} | +------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | + {%- endif %} + {%- if loop.index == 1 %} + {%- for _ in range(25) %} + {%- endfor %} +------------------------------------------------------------------------------------------------------------------------+ {%- endif %} {% endfor %} From 31bcb951fcd601844208ab7d3ed0ca0b5109b99d Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 26 Mar 2024 11:53:58 +0530 Subject: [PATCH 475/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 2d310c41ce..75d43a4707 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -35,14 +35,11 @@ {%- set values = line.split() %} | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | {%- endfor %} - +------------------------------------------------------------------------------------------------------------------------+ + +------------------------------------------------------------------------------------------------------------------------+{{ ' ' * 25 }} {%- if not loop.last %} | +------------------------------------------------------------------------------------------------------------------------+ - {%- endif %} - {%- if loop.index == 1 %} - {%- for _ in range(25) %} - {%- endfor %} + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | +------------------------------------------------------------------------------------------------------------------------+ {%- endif %} {% endfor %} From f80928345ceee3407817a37b65fa693f3d3618ce Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 26 Mar 2024 11:56:35 +0530 Subject: [PATCH 476/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 75d43a4707..3e026798ef 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -35,7 +35,7 @@ {%- set values = line.split() %} | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | {%- endfor %} - +------------------------------------------------------------------------------------------------------------------------+{{ ' ' * 25 }} + {{ ' ' * 25 }}+------------------------------------------------------------------------------------------------------------------------+ {%- if not loop.last %} | +------------------------------------------------------------------------------------------------------------------------+ From a25a547eabd615bc299ec5350fe1f4083d44ece0 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 26 Mar 2024 12:02:18 +0530 Subject: [PATCH 477/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 3e026798ef..739f600cd6 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -35,7 +35,7 @@ {%- set values = line.split() %} | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | {%- endfor %} - {{ ' ' * 25 }}+------------------------------------------------------------------------------------------------------------------------+ + {{ ' ' * 25 }}+---------------------------------------------------------------------------------------------------------------------------------------+ {%- if not loop.last %} | +------------------------------------------------------------------------------------------------------------------------+ From 9991cc350199072da693b170050036a4b8e53991 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 26 Mar 2024 12:05:14 +0530 Subject: [PATCH 478/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 739f600cd6..c8f52f4334 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -35,7 +35,7 @@ {%- set values = line.split() %} | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | {%- endfor %} - {{ ' ' * 25 }}+---------------------------------------------------------------------------------------------------------------------------------------+ + {{ ' ' * 25 }}+-------------------------------------------------------------------------------------------------------------------------------------+ {%- if not loop.last %} | +------------------------------------------------------------------------------------------------------------------------+ From f0f6723109fa2c84475cfac4d2d949ac8894253a Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 26 Mar 2024 12:20:59 +0530 Subject: [PATCH 479/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index c8f52f4334..6d24e93ad7 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -33,9 +33,9 @@ {% set output_lines = result.stdout_lines %} {%- for line in output_lines[2:] %} {%- set values = line.split() %} - | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | + | {{ values[0] | ljust(30) }} | {{ values[1] | ljust(40) }} | {{ values[2] | ljust(10) }} | {{ values[3] | ljust(15) }} | {{ values[4] | ljust(15) }} | {{ values[5] | ljust(5) }} | {{ values[6] | ljust(70) }} | {{ values[7] | ljust(15) }} | {{ values[8] | ljust(15) }} | {%- endfor %} - {{ ' ' * 25 }}+-------------------------------------------------------------------------------------------------------------------------------------+ + +------------------------------------------------------------------------------------------------------------------------+ {%- if not loop.last %} | +------------------------------------------------------------------------------------------------------------------------+ From 978374d6eb8594029c97d02050a08bea54c6c23d Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 26 Mar 2024 12:25:10 +0530 Subject: [PATCH 480/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 6d24e93ad7..611168a826 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -33,7 +33,8 @@ {% set output_lines = result.stdout_lines %} {%- for line in output_lines[2:] %} {%- set values = line.split() %} - | {{ values[0] | ljust(30) }} | {{ values[1] | ljust(40) }} | {{ values[2] | ljust(10) }} | {{ values[3] | ljust(15) }} | {{ values[4] | ljust(15) }} | {{ values[5] | ljust(5) }} | {{ values[6] | ljust(70) }} | {{ values[7] | ljust(15) }} | {{ values[8] | ljust(15) }} | + | {{ values[0] }}{{ ' ' * (30 - values[0] | length) }} | {{ values[1] }}{{ ' ' * (40 - values[1] | length) }} | {{ values[2] }}{{ ' ' * (10 - values[2] | length) }} | {{ values[3] }}{{ ' ' * (15 - values[3] | length) }} | {{ values[4] }}{{ ' ' * (15 - values[4] | length) }} | {{ values[5] }}{{ ' ' * (5 - values[5] | length) }} | {{ values[6] }}{{ ' ' * (70 - values[6] | length) }} | {{ values[7] }}{{ ' ' * (15 - values[7] | length) }} | {{ values[8] }}{{ ' ' * (15 - values[8] | length) }} | + {%- endfor %} {%- endfor %} +------------------------------------------------------------------------------------------------------------------------+ {%- if not loop.last %} From 4e13f1122c23c82985db4ef554dc00f363cea577 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 26 Mar 2024 12:28:57 +0530 Subject: [PATCH 481/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 611168a826..38526a171a 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -35,7 +35,6 @@ {%- set values = line.split() %} | {{ values[0] }}{{ ' ' * (30 - values[0] | length) }} | {{ values[1] }}{{ ' ' * (40 - values[1] | length) }} | {{ values[2] }}{{ ' ' * (10 - values[2] | length) }} | {{ values[3] }}{{ ' ' * (15 - values[3] | length) }} | {{ values[4] }}{{ ' ' * (15 - values[4] | length) }} | {{ values[5] }}{{ ' ' * (5 - values[5] | length) }} | {{ values[6] }}{{ ' ' * (70 - values[6] | length) }} | {{ values[7] }}{{ ' ' * (15 - values[7] | length) }} | {{ values[8] }}{{ ' ' * (15 - values[8] | length) }} | {%- endfor %} - {%- endfor %} +------------------------------------------------------------------------------------------------------------------------+ {%- if not loop.last %} | From fea400bfa8b359a80e5ae9078208b91149503d0d Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 26 Mar 2024 12:39:05 +0530 Subject: [PATCH 482/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 38526a171a..1b28eb0a35 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -33,7 +33,7 @@ {% set output_lines = result.stdout_lines %} {%- for line in output_lines[2:] %} {%- set values = line.split() %} - | {{ values[0] }}{{ ' ' * (30 - values[0] | length) }} | {{ values[1] }}{{ ' ' * (40 - values[1] | length) }} | {{ values[2] }}{{ ' ' * (10 - values[2] | length) }} | {{ values[3] }}{{ ' ' * (15 - values[3] | length) }} | {{ values[4] }}{{ ' ' * (15 - values[4] | length) }} | {{ values[5] }}{{ ' ' * (5 - values[5] | length) }} | {{ values[6] }}{{ ' ' * (70 - values[6] | length) }} | {{ values[7] }}{{ ' ' * (15 - values[7] | length) }} | {{ values[8] }}{{ ' ' * (15 - values[8] | length) }} | + | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | {%- endfor %} +------------------------------------------------------------------------------------------------------------------------+ {%- if not loop.last %} From 95a4cd16959799590b390489368d79511bcb5079 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 26 Mar 2024 12:46:26 +0530 Subject: [PATCH 483/616] Update kafka_lags_monitoring.yml From 87ed3d6616190938af2fd80760d60a2638b3cd4a Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 26 Mar 2024 13:01:03 +0530 Subject: [PATCH 484/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 1b28eb0a35..20e851d376 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -1,4 +1,3 @@ ---- - name: Display Kafka consumer group status hosts: ingestion-cluster-kafka gather_facts: no @@ -26,6 +25,7 @@ - name: Print Kafka lag status for each group debug: msg: | + {%- set limit = 38 %} +------------------------------------------------------------------------------------------------------------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | +------------------------------------------------------------------------------------------------------------------------+ @@ -34,12 +34,17 @@ {%- for line in output_lines[2:] %} {%- set values = line.split() %} | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | + {%- if loop.index == limit %} + +------------------------------------------------------------------------------------------------+ + | End of Lag Output | + +------------------------------------------------------------------------------------------------+ + {% break %} + {%- endif %} {%- endfor %} - +------------------------------------------------------------------------------------------------------------------------+ - {%- if not loop.last %} - | + {% if not loop.last %} +------------------------------------------------------------------------------------------------------------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | +------------------------------------------------------------------------------------------------------------------------+ - {%- endif %} + {% endif %} {% endfor %} + +------------------------------------------------------------------------------------------------+ From 2c05bfb10006b7a7d984efbe9bd1f61578fbf716 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 26 Mar 2024 13:04:26 +0530 Subject: [PATCH 485/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 20e851d376..c6f078fdcc 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -29,6 +29,7 @@ +------------------------------------------------------------------------------------------------------------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | +------------------------------------------------------------------------------------------------------------------------+ + {% set end_of_output = false %} {% for result in consumer_group_output.results %} {% set output_lines = result.stdout_lines %} {%- for line in output_lines[2:] %} @@ -38,9 +39,12 @@ +------------------------------------------------------------------------------------------------+ | End of Lag Output | +------------------------------------------------------------------------------------------------+ - {% break %} + {% set end_of_output = true %} {%- endif %} {%- endfor %} + {% if end_of_output %} + {%- break %} + {% endif %} {% if not loop.last %} +------------------------------------------------------------------------------------------------------------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | From 7d07d10ad3fd4af374957cde3ee4e59426e7ab77 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 26 Mar 2024 13:08:05 +0530 Subject: [PATCH 486/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index c6f078fdcc..58779fd69c 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -26,29 +26,26 @@ debug: msg: | {%- set limit = 38 %} - +------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | - +------------------------------------------------------------------------------------------------------------------------+ - {% set end_of_output = false %} + {%- set end_of_output = false %} + +------------------------------------------------------------------------------------------------+ + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | + +------------------------------------------------------------------------------------------------+ {% for result in consumer_group_output.results %} {% set output_lines = result.stdout_lines %} {%- for line in output_lines[2:] %} {%- set values = line.split() %} | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | {%- if loop.index == limit %} + {%- set end_of_output = true %} + {% endif %} + {%- if not end_of_output %} + |------------------------------------------------------------------------------------------------| + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | +------------------------------------------------------------------------------------------------+ - | End of Lag Output | - +------------------------------------------------------------------------------------------------+ - {% set end_of_output = true %} {%- endif %} {%- endfor %} - {% if end_of_output %} - {%- break %} - {% endif %} - {% if not loop.last %} - +------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | - +------------------------------------------------------------------------------------------------------------------------+ - {% endif %} + {%- if end_of_output %} + {%- set output_lines = [] %} + {%- endif %} {% endfor %} +------------------------------------------------------------------------------------------------+ From 4732d0e76fd4dc845923ae8540fc50c54e1d51b3 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 26 Mar 2024 13:09:47 +0530 Subject: [PATCH 487/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 58779fd69c..8e26f805b3 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -27,9 +27,9 @@ msg: | {%- set limit = 38 %} {%- set end_of_output = false %} - +------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | - +------------------------------------------------------------------------------------------------+ + +------------------------------------------------------------------------------------------------------------------------+ + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | + +------------------------------------------------------------------------------------------------------------------------+ {% for result in consumer_group_output.results %} {% set output_lines = result.stdout_lines %} {%- for line in output_lines[2:] %} @@ -39,9 +39,9 @@ {%- set end_of_output = true %} {% endif %} {%- if not end_of_output %} - |------------------------------------------------------------------------------------------------| - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | - +------------------------------------------------------------------------------------------------+ + +------------------------------------------------------------------------------------------------------------------------+ + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | + +------------------------------------------------------------------------------------------------------------------------+ {%- endif %} {%- endfor %} {%- if end_of_output %} From 7ab3d7592c1c3a6e04e39446767834a92faf80a2 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 26 Mar 2024 13:15:52 +0530 Subject: [PATCH 488/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 8e26f805b3..e2989ecc0f 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -27,9 +27,9 @@ msg: | {%- set limit = 38 %} {%- set end_of_output = false %} - +------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | - +------------------------------------------------------------------------------------------------------------------------+ + +----------------------------------------------------------------------------------------------------+ + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | + +----------------------------------------------------------------------------------------------------+ {% for result in consumer_group_output.results %} {% set output_lines = result.stdout_lines %} {%- for line in output_lines[2:] %} @@ -39,13 +39,16 @@ {%- set end_of_output = true %} {% endif %} {%- if not end_of_output %} - +------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | - +------------------------------------------------------------------------------------------------------------------------+ + |------------------------------------------------------------------------------------------------| + | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | + +------------------------------------------------------------------------------------------------+ {%- endif %} {%- endfor %} {%- if end_of_output %} - {%- set output_lines = [] %} + +------------------------------------------------------------------------------------------------+ + | End of Lag Output | + +------------------------------------------------------------------------------------------------+ + {% set output_lines = [] %} {%- endif %} {% endfor %} +------------------------------------------------------------------------------------------------+ From 0d07e75f535a48f0d752109d03b11e79681799c2 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 26 Mar 2024 13:29:46 +0530 Subject: [PATCH 489/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 29 ++++++++++------------------- 1 file changed, 10 insertions(+), 19 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index e2989ecc0f..1b28eb0a35 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -1,3 +1,4 @@ +--- - name: Display Kafka consumer group status hosts: ingestion-cluster-kafka gather_facts: no @@ -25,30 +26,20 @@ - name: Print Kafka lag status for each group debug: msg: | - {%- set limit = 38 %} - {%- set end_of_output = false %} - +----------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | - +----------------------------------------------------------------------------------------------------+ + +------------------------------------------------------------------------------------------------------------------------+ + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | + +------------------------------------------------------------------------------------------------------------------------+ {% for result in consumer_group_output.results %} {% set output_lines = result.stdout_lines %} {%- for line in output_lines[2:] %} {%- set values = line.split() %} | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | - {%- if loop.index == limit %} - {%- set end_of_output = true %} - {% endif %} - {%- if not end_of_output %} - |------------------------------------------------------------------------------------------------| - | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | - +------------------------------------------------------------------------------------------------+ - {%- endif %} {%- endfor %} - {%- if end_of_output %} - +------------------------------------------------------------------------------------------------+ - | End of Lag Output | - +------------------------------------------------------------------------------------------------+ - {% set output_lines = [] %} + +------------------------------------------------------------------------------------------------------------------------+ + {%- if not loop.last %} + | + +------------------------------------------------------------------------------------------------------------------------+ + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | + +------------------------------------------------------------------------------------------------------------------------+ {%- endif %} {% endfor %} - +------------------------------------------------------------------------------------------------+ From b05a57f8c79fcd38b96fcb9b6081031a01e12dd4 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 26 Mar 2024 14:17:26 +0530 Subject: [PATCH 490/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 1b28eb0a35..36fcdca865 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -35,11 +35,10 @@ {%- set values = line.split() %} | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | {%- endfor %} - +------------------------------------------------------------------------------------------------------------------------+ {%- if not loop.last %} - | +------------------------------------------------------------------------------------------------------------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | +------------------------------------------------------------------------------------------------------------------------+ {%- endif %} + +------------------------------------------------------------------------------------------------------------------------+ {% endfor %} From e769da5ce41f8d77248722dede106bab9516d6cb Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 26 Mar 2024 14:21:35 +0530 Subject: [PATCH 491/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 36fcdca865..1b28eb0a35 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -35,10 +35,11 @@ {%- set values = line.split() %} | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | {%- endfor %} + +------------------------------------------------------------------------------------------------------------------------+ {%- if not loop.last %} + | +------------------------------------------------------------------------------------------------------------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | +------------------------------------------------------------------------------------------------------------------------+ {%- endif %} - +------------------------------------------------------------------------------------------------------------------------+ {% endfor %} From be84081149a9af173ce637101d1d763ad8a21d36 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 26 Mar 2024 14:36:18 +0530 Subject: [PATCH 492/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 1b28eb0a35..76d2c047eb 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -33,7 +33,7 @@ {% set output_lines = result.stdout_lines %} {%- for line in output_lines[2:] %} {%- set values = line.split() %} - | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | + | {{ values[1] | truncate(width=32, end='') }} | {{ values[2] | truncate(width=42, end='') }} | {{ values[3] | int }} | {{ values[4] | int }} | {{ values[5] | int }} | {{ values[6] | truncate(width=35, end='') }} | {{ values[7] | truncate(width=14, end='') }} | {{ values[8] | truncate(width=14, end='') }} | {%- endfor %} +------------------------------------------------------------------------------------------------------------------------+ {%- if not loop.last %} From 0442e6990a95b9e94a36e676e251951f30a30c48 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 26 Mar 2024 14:38:30 +0530 Subject: [PATCH 493/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 76d2c047eb..ca51564def 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -33,7 +33,7 @@ {% set output_lines = result.stdout_lines %} {%- for line in output_lines[2:] %} {%- set values = line.split() %} - | {{ values[1] | truncate(width=32, end='') }} | {{ values[2] | truncate(width=42, end='') }} | {{ values[3] | int }} | {{ values[4] | int }} | {{ values[5] | int }} | {{ values[6] | truncate(width=35, end='') }} | {{ values[7] | truncate(width=14, end='') }} | {{ values[8] | truncate(width=14, end='') }} | + | {{ values[1][:32] }} | {{ values[2][:42] }} | {{ values[3] | int }} | {{ values[4] | int }} | {{ values[5] | int }} | {{ values[6][:35] }} | {{ values[7][:14] }} | {{ values[8][:14] }} | {%- endfor %} +------------------------------------------------------------------------------------------------------------------------+ {%- if not loop.last %} From 2407c2c6b7d1790aecaf968dcae25e15f07291e1 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 26 Mar 2024 16:35:18 +0530 Subject: [PATCH 494/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index ca51564def..c547af208b 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -26,16 +26,37 @@ - name: Print Kafka lag status for each group debug: msg: | - +------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | - +------------------------------------------------------------------------------------------------------------------------+ + {% set empty_lines_count = 0 %} + {% set print_group_details = false %} + {% set last_line_separator = false %} + {% set empty_space_count = 0 %} {% for result in consumer_group_output.results %} {% set output_lines = result.stdout_lines %} {%- for line in output_lines[2:] %} + {%- if line.strip() == '' %} + {% set empty_lines_count = empty_lines_count + 1 %} + {%- if empty_lines_count >= 10 %} + {% set print_group_details = true %} + {%- endif %} + {%- elif print_group_details %} + {%- if last_line_separator %} + {%- if empty_space_count < 10 %} + {% for i in range(10 - empty_space_count) %} + | + {%- endfor %} + {%- endif %} + | Next line after the separator | + +------------------------------------------------------------------------------------------------------------------------+ + {% set last_line_separator = false %} + {% set empty_space_count = 0 %} + {%- endif %} + {%- if line.strip() == '+' and line.strip().endswith('+') %} + {% set last_line_separator = true %} + {%- endif %} {%- set values = line.split() %} | {{ values[1][:32] }} | {{ values[2][:42] }} | {{ values[3] | int }} | {{ values[4] | int }} | {{ values[5] | int }} | {{ values[6][:35] }} | {{ values[7][:14] }} | {{ values[8][:14] }} | {%- endfor %} - +------------------------------------------------------------------------------------------------------------------------+ + {%- endfor %} {%- if not loop.last %} | +------------------------------------------------------------------------------------------------------------------------+ From cb46584017885b2e5c63dac27cbdac7b986fa02b Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 26 Mar 2024 16:37:35 +0530 Subject: [PATCH 495/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index c547af208b..4dcc9cce66 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -56,11 +56,10 @@ {%- set values = line.split() %} | {{ values[1][:32] }} | {{ values[2][:42] }} | {{ values[3] | int }} | {{ values[4] | int }} | {{ values[5] | int }} | {{ values[6][:35] }} | {{ values[7][:14] }} | {{ values[8][:14] }} | {%- endfor %} - {%- endfor %} {%- if not loop.last %} | +------------------------------------------------------------------------------------------------------------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | +------------------------------------------------------------------------------------------------------------------------+ {%- endif %} - {% endfor %} + {%- endfor %} From a279e01c3b507b281b4a625cf95dc1e74d38496f Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 26 Mar 2024 16:39:47 +0530 Subject: [PATCH 496/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 4dcc9cce66..c11a9c8035 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -37,7 +37,7 @@ {% set empty_lines_count = empty_lines_count + 1 %} {%- if empty_lines_count >= 10 %} {% set print_group_details = true %} - {%- endif %} + {% endif %} {%- elif print_group_details %} {%- if last_line_separator %} {%- if empty_space_count < 10 %} From c6f1b784aa963d1fddca4cf42b644a7b00463ebc Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 26 Mar 2024 16:42:16 +0530 Subject: [PATCH 497/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index c11a9c8035..c547af208b 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -37,7 +37,7 @@ {% set empty_lines_count = empty_lines_count + 1 %} {%- if empty_lines_count >= 10 %} {% set print_group_details = true %} - {% endif %} + {%- endif %} {%- elif print_group_details %} {%- if last_line_separator %} {%- if empty_space_count < 10 %} @@ -56,10 +56,11 @@ {%- set values = line.split() %} | {{ values[1][:32] }} | {{ values[2][:42] }} | {{ values[3] | int }} | {{ values[4] | int }} | {{ values[5] | int }} | {{ values[6][:35] }} | {{ values[7][:14] }} | {{ values[8][:14] }} | {%- endfor %} + {%- endfor %} {%- if not loop.last %} | +------------------------------------------------------------------------------------------------------------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | +------------------------------------------------------------------------------------------------------------------------+ {%- endif %} - {%- endfor %} + {% endfor %} From d349b978c36e2f0a734a040ec41de839c55b815a Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 26 Mar 2024 16:45:21 +0530 Subject: [PATCH 498/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index c547af208b..667291c6da 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -38,7 +38,8 @@ {%- if empty_lines_count >= 10 %} {% set print_group_details = true %} {%- endif %} - {%- elif print_group_details %} + {%- endif %} + {%- if print_group_details %} {%- if last_line_separator %} {%- if empty_space_count < 10 %} {% for i in range(10 - empty_space_count) %} From 4d35e254b5b6e3e34480c0fc2173cd5df5764dea Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 26 Mar 2024 16:48:41 +0530 Subject: [PATCH 499/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 667291c6da..2b40acc24a 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -26,6 +26,9 @@ - name: Print Kafka lag status for each group debug: msg: | + +------------------------------------------------------------------------------------------------------------------------+ + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | + +------------------------------------------------------------------------------------------------------------------------+ {% set empty_lines_count = 0 %} {% set print_group_details = false %} {% set last_line_separator = false %} @@ -38,8 +41,7 @@ {%- if empty_lines_count >= 10 %} {% set print_group_details = true %} {%- endif %} - {%- endif %} - {%- if print_group_details %} + {%- elif print_group_details %} {%- if last_line_separator %} {%- if empty_space_count < 10 %} {% for i in range(10 - empty_space_count) %} @@ -57,7 +59,6 @@ {%- set values = line.split() %} | {{ values[1][:32] }} | {{ values[2][:42] }} | {{ values[3] | int }} | {{ values[4] | int }} | {{ values[5] | int }} | {{ values[6][:35] }} | {{ values[7][:14] }} | {{ values[8][:14] }} | {%- endfor %} - {%- endfor %} {%- if not loop.last %} | +------------------------------------------------------------------------------------------------------------------------+ From 16b969a13a5fe0349a3071b55c2cc505f27e829e Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 26 Mar 2024 16:51:09 +0530 Subject: [PATCH 500/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 2b40acc24a..a2a02e8301 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -41,7 +41,8 @@ {%- if empty_lines_count >= 10 %} {% set print_group_details = true %} {%- endif %} - {%- elif print_group_details %} + {%- endif %} + {%- if print_group_details %} {%- if last_line_separator %} {%- if empty_space_count < 10 %} {% for i in range(10 - empty_space_count) %} From c7e708a7d449b78000b573878bb8c24a34c75200 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 26 Mar 2024 16:56:17 +0530 Subject: [PATCH 501/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 28 ++-------------------------- 1 file changed, 2 insertions(+), 26 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index a2a02e8301..1b28eb0a35 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -29,37 +29,13 @@ +------------------------------------------------------------------------------------------------------------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | +------------------------------------------------------------------------------------------------------------------------+ - {% set empty_lines_count = 0 %} - {% set print_group_details = false %} - {% set last_line_separator = false %} - {% set empty_space_count = 0 %} {% for result in consumer_group_output.results %} {% set output_lines = result.stdout_lines %} {%- for line in output_lines[2:] %} - {%- if line.strip() == '' %} - {% set empty_lines_count = empty_lines_count + 1 %} - {%- if empty_lines_count >= 10 %} - {% set print_group_details = true %} - {%- endif %} - {%- endif %} - {%- if print_group_details %} - {%- if last_line_separator %} - {%- if empty_space_count < 10 %} - {% for i in range(10 - empty_space_count) %} - | - {%- endfor %} - {%- endif %} - | Next line after the separator | - +------------------------------------------------------------------------------------------------------------------------+ - {% set last_line_separator = false %} - {% set empty_space_count = 0 %} - {%- endif %} - {%- if line.strip() == '+' and line.strip().endswith('+') %} - {% set last_line_separator = true %} - {%- endif %} {%- set values = line.split() %} - | {{ values[1][:32] }} | {{ values[2][:42] }} | {{ values[3] | int }} | {{ values[4] | int }} | {{ values[5] | int }} | {{ values[6][:35] }} | {{ values[7][:14] }} | {{ values[8][:14] }} | + | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | {%- endfor %} + +------------------------------------------------------------------------------------------------------------------------+ {%- if not loop.last %} | +------------------------------------------------------------------------------------------------------------------------+ From 6724688cb258c38466cad04b88f85f649fa4c886 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 26 Mar 2024 17:49:06 +0530 Subject: [PATCH 502/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 1b28eb0a35..e4f6f71a2a 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -43,3 +43,4 @@ +------------------------------------------------------------------------------------------------------------------------+ {%- endif %} {% endfor %} + +------------------------------------------------------------------------------------------------------------------------+ # Closing line added here From fca52f8e194882c8058296d23d4f180858574170 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 26 Mar 2024 17:55:16 +0530 Subject: [PATCH 503/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index e4f6f71a2a..5a951ef187 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -43,4 +43,4 @@ +------------------------------------------------------------------------------------------------------------------------+ {%- endif %} {% endfor %} - +------------------------------------------------------------------------------------------------------------------------+ # Closing line added here + +------------------------------------------------------------------------------------------------------------------------+ From 149118e4585d2de0addef23a2b7b409da9db2c87 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 27 Mar 2024 09:17:25 +0530 Subject: [PATCH 504/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 5a951ef187..1b28eb0a35 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -43,4 +43,3 @@ +------------------------------------------------------------------------------------------------------------------------+ {%- endif %} {% endfor %} - +------------------------------------------------------------------------------------------------------------------------+ From 9ae13325f23c17b57fd6c91650d42f3045293c1c Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 27 Mar 2024 09:29:01 +0530 Subject: [PATCH 505/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 1b28eb0a35..410672699d 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -7,10 +7,11 @@ command: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe --group "{{ item }}" register: consumer_group_output loop: + - "jobmanager" - "dev-audit-event-generator-group" - "telemetry-group" - "prometheus-metrics-consumer" - - "dev-post-publish-processor-group" + - "create-entity-consumer-group" - "ml-project-service" - "dev-audit-history-indexer-group" - "learning-127.0.1.1" From 961e7d7cdd96b1e1a31ade094adcbcd06f83df22 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 27 Mar 2024 09:44:29 +0530 Subject: [PATCH 506/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 410672699d..a0a4a4182f 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -1,4 +1,3 @@ ---- - name: Display Kafka consumer group status hosts: ingestion-cluster-kafka gather_facts: no @@ -24,6 +23,7 @@ - "dev-qrcode-image-generator-group" loop_control: label: "{{ item }}" + - name: Print Kafka lag status for each group debug: msg: | @@ -35,12 +35,18 @@ {%- for line in output_lines[2:] %} {%- set values = line.split() %} | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | + {%- if values[0] != 'GROUP' %} + {%- if loop.first %} + | | | | | | | | | | + {%- endif %} + {%- endif %} + {%- if values[0] != 'GROUP' %} + | | | | | | | | | | + {%- endif %} {%- endfor %} - +------------------------------------------------------------------------------------------------------------------------+ - {%- if not loop.last %} + +------------------------------------------------------------------------------------------------------------------------+{% if not loop.last %} | +------------------------------------------------------------------------------------------------------------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | - +------------------------------------------------------------------------------------------------------------------------+ - {%- endif %} + +------------------------------------------------------------------------------------------------------------------------+{% endif %} {% endfor %} From 86eed0cc2cf01173faed2dcbef3a7d71bd3666db Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 27 Mar 2024 09:50:21 +0530 Subject: [PATCH 507/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index a0a4a4182f..54f730435a 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -37,14 +37,14 @@ | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | {%- if values[0] != 'GROUP' %} {%- if loop.first %} - | | | | | | | | | | + {%- endif %} {%- endif %} {%- if values[0] != 'GROUP' %} - | | | | | | | | | | + {%- endif %} {%- endfor %} - +------------------------------------------------------------------------------------------------------------------------+{% if not loop.last %} + +------------------------------------------------------------------------------------------------------------------------+{% if not loop.last %} | +------------------------------------------------------------------------------------------------------------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | From 6c94b51bbfbf893b879a2eaca8857358bc9f2645 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 27 Mar 2024 10:02:14 +0530 Subject: [PATCH 508/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 54f730435a..1901b5f195 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -27,26 +27,33 @@ - name: Print Kafka lag status for each group debug: msg: | + {%- if loop.first %} +------------------------------------------------------------------------------------------------------------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | +------------------------------------------------------------------------------------------------------------------------+ + {%- endif %} + {% set table_started = False %} {% for result in consumer_group_output.results %} {% set output_lines = result.stdout_lines %} {%- for line in output_lines[2:] %} {%- set values = line.split() %} + {%- if values[0] == 'GROUP' %} + {%- if not table_started %} | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | - {%- if values[0] != 'GROUP' %} - {%- if loop.first %} - + {%- set table_started = True %} {%- endif %} - {%- endif %} - {%- if values[0] != 'GROUP' %} - + {%- else %} + | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | {%- endif %} {%- endfor %} - +------------------------------------------------------------------------------------------------------------------------+{% if not loop.last %} + {%- if table_started and not loop.last %} + +------------------------------------------------------------------------------------------------------------------------+ | +------------------------------------------------------------------------------------------------------------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | - +------------------------------------------------------------------------------------------------------------------------+{% endif %} + +------------------------------------------------------------------------------------------------------------------------+ + {%- endif %} {% endfor %} + {%- if table_started %} + +------------------------------------------------------------------------------------------------------------------------+ + {%- endif %} From 18505f81d3e379ea2a6de4baa70f571447f229fb Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 27 Mar 2024 10:09:41 +0530 Subject: [PATCH 509/616] Update kafka_lags_monitoring.yml From c7f30cc3ceb2db72bf1463e768869620e5375833 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 27 Mar 2024 10:15:04 +0530 Subject: [PATCH 510/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 1901b5f195..410672699d 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -1,3 +1,4 @@ +--- - name: Display Kafka consumer group status hosts: ingestion-cluster-kafka gather_facts: no @@ -23,37 +24,23 @@ - "dev-qrcode-image-generator-group" loop_control: label: "{{ item }}" - - name: Print Kafka lag status for each group debug: msg: | - {%- if loop.first %} +------------------------------------------------------------------------------------------------------------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | +------------------------------------------------------------------------------------------------------------------------+ - {%- endif %} - {% set table_started = False %} {% for result in consumer_group_output.results %} {% set output_lines = result.stdout_lines %} {%- for line in output_lines[2:] %} {%- set values = line.split() %} - {%- if values[0] == 'GROUP' %} - {%- if not table_started %} - | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | - {%- set table_started = True %} - {%- endif %} - {%- else %} | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | - {%- endif %} {%- endfor %} - {%- if table_started and not loop.last %} +------------------------------------------------------------------------------------------------------------------------+ + {%- if not loop.last %} | +------------------------------------------------------------------------------------------------------------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | +------------------------------------------------------------------------------------------------------------------------+ {%- endif %} {% endfor %} - {%- if table_started %} - +------------------------------------------------------------------------------------------------------------------------+ - {%- endif %} From f224bb66bace1bdcb201308176470371e9f21def Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 27 Mar 2024 11:44:23 +0530 Subject: [PATCH 511/616] Update kafka_lags_monitoring.yml From e219890f0e1d38321b525038508f57fbf7ac6976 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 27 Mar 2024 11:56:15 +0530 Subject: [PATCH 512/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 410672699d..10e7c06450 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -24,9 +24,9 @@ - "dev-qrcode-image-generator-group" loop_control: label: "{{ item }}" - - name: Print Kafka lag status for each group - debug: - msg: | + - name: Format Kafka lag status for each group + set_fact: + formatted_output: | +------------------------------------------------------------------------------------------------------------------------+ | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | +------------------------------------------------------------------------------------------------------------------------+ @@ -44,3 +44,8 @@ +------------------------------------------------------------------------------------------------------------------------+ {%- endif %} {% endfor %} + loop_control: + label: "{{ item }}" + - name: Print formatted Kafka lag status + debug: + msg: "{{ formatted_output }}" From 6d1f2a0a517cdc4fbca374ec2c2fdc297bd903f6 Mon Sep 17 00:00:00 2001 From: trigyn-Himanshu <137037616+trigyn-Himanshu@users.noreply.github.com> Date: Wed, 27 Mar 2024 12:07:04 +0530 Subject: [PATCH 513/616] Update kafka_lags_monitoring.yml Modifying the YAML to give formatted output. --- ansible/kafka_lags_monitoring.yml | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 10e7c06450..b732ebfbd6 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -1,4 +1,3 @@ ---- - name: Display Kafka consumer group status hosts: ingestion-cluster-kafka gather_facts: no @@ -24,28 +23,30 @@ - "dev-qrcode-image-generator-group" loop_control: label: "{{ item }}" + - name: Format Kafka lag status for each group set_fact: formatted_output: | +------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | +------------------------------------------------------------------------------------------------------------------------+ {% for result in consumer_group_output.results %} {% set output_lines = result.stdout_lines %} {%- for line in output_lines[2:] %} {%- set values = line.split() %} - | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | + | {{ values[0] | regex_replace('(^.{0,30})(.{10})(.{0,20})', '\\1\\2...\\3') | center(27) }} | {{ values[1] | regex_replace('(^.{0,30})(.{10})(.{0,20})', '\\1\\2...\\3') | center(34) }} | {{ values[2] | center(10) }} | {{ values[3] | center(14) }} | {{ values[4] | center(15) }} | {{ values[5] | center(4) }} | {{ values[6] | center(38) }} | {{ values[7] | center(14) }} | {{ values[8] | center(14) }} | {%- endfor %} +------------------------------------------------------------------------------------------------------------------------+ - {%- if not loop.last %} + {% if not loop.last %} | +------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | +------------------------------------------------------------------------------------------------------------------------+ - {%- endif %} + {% endif %} {% endfor %} loop_control: label: "{{ item }}" + - name: Print formatted Kafka lag status debug: msg: "{{ formatted_output }}" From bfc60216b705a80e927192d1386c1de4af497183 Mon Sep 17 00:00:00 2001 From: trigyn-Himanshu <137037616+trigyn-Himanshu@users.noreply.github.com> Date: Wed, 27 Mar 2024 12:10:33 +0530 Subject: [PATCH 514/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index b732ebfbd6..410672699d 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -1,3 +1,4 @@ +--- - name: Display Kafka consumer group status hosts: ingestion-cluster-kafka gather_facts: no @@ -23,30 +24,23 @@ - "dev-qrcode-image-generator-group" loop_control: label: "{{ item }}" - - - name: Format Kafka lag status for each group - set_fact: - formatted_output: | + - name: Print Kafka lag status for each group + debug: + msg: | +------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | +------------------------------------------------------------------------------------------------------------------------+ {% for result in consumer_group_output.results %} {% set output_lines = result.stdout_lines %} {%- for line in output_lines[2:] %} {%- set values = line.split() %} - | {{ values[0] | regex_replace('(^.{0,30})(.{10})(.{0,20})', '\\1\\2...\\3') | center(27) }} | {{ values[1] | regex_replace('(^.{0,30})(.{10})(.{0,20})', '\\1\\2...\\3') | center(34) }} | {{ values[2] | center(10) }} | {{ values[3] | center(14) }} | {{ values[4] | center(15) }} | {{ values[5] | center(4) }} | {{ values[6] | center(38) }} | {{ values[7] | center(14) }} | {{ values[8] | center(14) }} | + | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | {%- endfor %} +------------------------------------------------------------------------------------------------------------------------+ - {% if not loop.last %} + {%- if not loop.last %} | +------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | +------------------------------------------------------------------------------------------------------------------------+ - {% endif %} + {%- endif %} {% endfor %} - loop_control: - label: "{{ item }}" - - - name: Print formatted Kafka lag status - debug: - msg: "{{ formatted_output }}" From f071cf091d6bdb7dfe7e5db36eaf21bddcf416dc Mon Sep 17 00:00:00 2001 From: trigyn-Himanshu <137037616+trigyn-Himanshu@users.noreply.github.com> Date: Wed, 27 Mar 2024 12:20:16 +0530 Subject: [PATCH 515/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 37 +++++++++++++------------------ 1 file changed, 15 insertions(+), 22 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 410672699d..01a92b480a 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -1,10 +1,9 @@ ---- - name: Display Kafka consumer group status hosts: ingestion-cluster-kafka gather_facts: no tasks: - name: Loop through Kafka consumer groups and check lag status - command: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe --group "{{ item }}" + shell: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe --group "{{ item }}" register: consumer_group_output loop: - "jobmanager" @@ -24,23 +23,17 @@ - "dev-qrcode-image-generator-group" loop_control: label: "{{ item }}" - - name: Print Kafka lag status for each group - debug: - msg: | - +------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | - +------------------------------------------------------------------------------------------------------------------------+ - {% for result in consumer_group_output.results %} - {% set output_lines = result.stdout_lines %} - {%- for line in output_lines[2:] %} - {%- set values = line.split() %} - | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | - {%- endfor %} - +------------------------------------------------------------------------------------------------------------------------+ - {%- if not loop.last %} - | - +------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | - +------------------------------------------------------------------------------------------------------------------------+ - {%- endif %} - {% endfor %} + + - name: Print formatted Kafka lag status + shell: | + echo "+------------------------------------------------------------------------------------------------------------------------+" + echo "| GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID |" + echo "+------------------------------------------------------------------------------------------------------------------------+" + for result in {{ consumer_group_output.results }}; do + echo "$result" | awk 'NR>2 {printf("| %-35s | %-29s | %-10s | %-14s | %-15s | %-4s | %-38s | %-14s | %-14s |\n", $1, $2, $3, $4, $5, $6, $7, $8, $9)}' + done + echo "+------------------------------------------------------------------------------------------------------------------------+" + register: formatted_output + + - debug: + msg: "{{ formatted_output.stdout }}" From 4799cc87520b9ac75b6cd45257d7337d45886bff Mon Sep 17 00:00:00 2001 From: trigyn-Himanshu <137037616+trigyn-Himanshu@users.noreply.github.com> Date: Wed, 27 Mar 2024 12:29:19 +0530 Subject: [PATCH 516/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 01a92b480a..a2657f627f 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -25,15 +25,20 @@ label: "{{ item }}" - name: Print formatted Kafka lag status - shell: | - echo "+------------------------------------------------------------------------------------------------------------------------+" - echo "| GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID |" - echo "+------------------------------------------------------------------------------------------------------------------------+" - for result in {{ consumer_group_output.results }}; do - echo "$result" | awk 'NR>2 {printf("| %-35s | %-29s | %-10s | %-14s | %-15s | %-4s | %-38s | %-14s | %-14s |\n", $1, $2, $3, $4, $5, $6, $7, $8, $9)}' - done - echo "+------------------------------------------------------------------------------------------------------------------------+" - register: formatted_output - - - debug: - msg: "{{ formatted_output.stdout }}" + debug: + msg: | + {% for result in consumer_group_output.results %} + {% set output_lines = result.stdout_lines %} + {% for line in output_lines[2:] %} + {% set values = line.split() %} + | Group: {{ values[0] }} + | Topic: {{ values[1] }} + | Partition: {{ values[2] }} + | Current-OFFSET: {{ values[3] }} + | LOG-END-OFFSET: {{ values[4] }} + | LAG: {{ values[5] }} + | CONSUMER-ID: {{ values[6] }} + | HOST: {{ values[7] }} + | CLIENT-ID: {{ values[8] }} + {% endfor %} + {% endfor %} From 929e50093162349c43ee9c881de34d2ae0bc736e Mon Sep 17 00:00:00 2001 From: trigyn-Himanshu <137037616+trigyn-Himanshu@users.noreply.github.com> Date: Wed, 27 Mar 2024 12:42:52 +0530 Subject: [PATCH 517/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index a2657f627f..a69c5f976b 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -30,15 +30,15 @@ {% for result in consumer_group_output.results %} {% set output_lines = result.stdout_lines %} {% for line in output_lines[2:] %} - {% set values = line.split() %} - | Group: {{ values[0] }} - | Topic: {{ values[1] }} - | Partition: {{ values[2] }} - | Current-OFFSET: {{ values[3] }} - | LOG-END-OFFSET: {{ values[4] }} - | LAG: {{ values[5] }} - | CONSUMER-ID: {{ values[6] }} - | HOST: {{ values[7] }} - | CLIENT-ID: {{ values[8] }} + {% set values = line.split('|') %} + | GROUP: {{ values[1].strip() }} + | TOPIC: {{ values[2].strip() }} + | PARTITION: {{ values[3].strip() }} + | Current-OFFSET: {{ values[4].strip() }} + | LOG-END-OFFSET: {{ values[5].strip() }} + | LAG: {{ values[6].strip() }} + | CONSUMER-ID: {{ values[7].strip() }} + | HOST: {{ values[8].strip() }} + | CLIENT-ID: {{ values[9].strip() }} {% endfor %} {% endfor %} From dd33183e3eb0e8f13768e2998040fb49219ef2ec Mon Sep 17 00:00:00 2001 From: trigyn-Himanshu <137037616+trigyn-Himanshu@users.noreply.github.com> Date: Wed, 27 Mar 2024 16:44:18 +0530 Subject: [PATCH 518/616] Create Jenkinsfile Jenkins code for Druid --- pipelines/Druid/Jenkinsfile | 52 +++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 pipelines/Druid/Jenkinsfile diff --git a/pipelines/Druid/Jenkinsfile b/pipelines/Druid/Jenkinsfile new file mode 100644 index 0000000000..b92b3fa62c --- /dev/null +++ b/pipelines/Druid/Jenkinsfile @@ -0,0 +1,52 @@ +@Library('deploy-conf') _ +node() { + try { + timestamps { + String ANSI_GREEN = "\u001B[32m" + String ANSI_NORMAL = "\u001B[0m" + String ANSI_BOLD = "\u001B[1m" + String ANSI_RED = "\u001B[31m" + String ANSI_YELLOW = "\u001B[33m" + + stage('checkout public repo') { + folder = new File("$WORKSPACE/.git") + if (folder.exists()) + { + println "Found .git folder. Clearing it.." + sh'git clean -fxd' + } + checkout scm + } + + ansiColor('xterm') { + stage('deploy'){ + values = [:] + currentWs = sh(returnStdout: true, script: 'pwd').trim() + envDir = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-3].trim() + module = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-2].trim() + jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() + ansiblePlaybook = "${currentWs}/ansible/druid_monitoring.yml" + ansibleExtraArgs = "--vault-password-file /var/lib/jenkins/secrets/vault-pass -vv" + values.put('currentWs', currentWs) + values.put('env', envDir) + values.put('module', module) + values.put('jobName', jobName) + values.put('ansiblePlaybook', ansiblePlaybook) + values.put('ansibleExtraArgs', ansibleExtraArgs) + println values + ansible_playbook_run(values) + currentBuild.result = 'SUCCESS' + currentBuild.description = "Private: ${params.private_branch}, Public: ${params.branch_or_tag}" + } + } + } + } + catch (err) { + currentBuild.result = "FAILURE" + throw err + } + finally { + slack_notify(currentBuild.result) + email_notify() + } +} From b497a73ae0e9a311532f53f5a10b7c68099695ab Mon Sep 17 00:00:00 2001 From: trigyn-Himanshu <137037616+trigyn-Himanshu@users.noreply.github.com> Date: Wed, 27 Mar 2024 16:56:07 +0530 Subject: [PATCH 519/616] Create druid_monitoring.yml --- ansible/druid_monitoring.yml | 49 ++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 ansible/druid_monitoring.yml diff --git a/ansible/druid_monitoring.yml b/ansible/druid_monitoring.yml new file mode 100644 index 0000000000..dec1b81bdf --- /dev/null +++ b/ansible/druid_monitoring.yml @@ -0,0 +1,49 @@ +- name: Fetch Druid data source information + hosts: druid + gather_facts: no + tasks: + - name: Get Druid data source details + k8s_info: + api_version: v1 + kind: Pod + namespace: your_namespace + label_selectors: + app: druid + register: druid_pods + + - name: Extract Druid data source information + set_fact: + druid_data_sources: [] + loop: "{{ druid_pods.resources }}" + loop_control: + loop_var: pod_info + vars: + pod_name: "{{ pod_info.metadata.name }}" + pod_namespace: "{{ pod_info.metadata.namespace }}" + tasks: + - name: Get Druid data source name, endpoint, namespace, pod, service, and availability + shell: | + druid_data_source_name=$(curl -s http://localhost:8888/druid/indexer/v1/datasources) + druid_endpoint=$(kubectl get svc -n {{ pod_namespace }} | grep 'druid-broker' | awk '{print $4}') + druid_namespace="{{ pod_namespace }}" + druid_pod="{{ pod_name }}" + druid_service=$(kubectl get svc -n {{ pod_namespace }} | grep 'druid-broker' | awk '{print $1}') + druid_availability=$(kubectl get pod -n {{ pod_namespace }} {{ pod_name }} -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}') + echo "Druid Data Source Name: $druid_data_source_name" + echo "Druid Endpoint: $druid_endpoint" + echo "Namespace: $druid_namespace" + echo "Pod: $druid_pod" + echo "Service: $druid_service" + echo "Availability: $druid_availability" + register: druid_info + + - name: Add Druid data source information to the list + set_fact: + druid_data_sources: "{{ druid_data_sources + [{ + 'name': druid_info.stdout_lines[0], + 'endpoint': druid_info.stdout_lines[1], + 'namespace': druid_info.stdout_lines[2], + 'pod': druid_info.stdout_lines[3], + 'service': druid_info.stdout_lines[4], + 'availability': druid_info.stdout_lines[5] + }] }}" From 21c8366efdeb1aa9a70b7c2542f7671c62e218d9 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 27 Mar 2024 17:32:57 +0530 Subject: [PATCH 520/616] Update druid_monitoring.yml --- ansible/druid_monitoring.yml | 52 +++++++++++++++--------------------- 1 file changed, 21 insertions(+), 31 deletions(-) diff --git a/ansible/druid_monitoring.yml b/ansible/druid_monitoring.yml index dec1b81bdf..6932dc6704 100644 --- a/ansible/druid_monitoring.yml +++ b/ansible/druid_monitoring.yml @@ -13,37 +13,27 @@ - name: Extract Druid data source information set_fact: - druid_data_sources: [] + druid_data_sources: "{{ druid_data_sources | default([]) + [{ + 'name': item.metadata.name, + 'namespace': item.metadata.namespace + }] }}" loop: "{{ druid_pods.resources }}" loop_control: - loop_var: pod_info - vars: - pod_name: "{{ pod_info.metadata.name }}" - pod_namespace: "{{ pod_info.metadata.namespace }}" - tasks: - - name: Get Druid data source name, endpoint, namespace, pod, service, and availability - shell: | - druid_data_source_name=$(curl -s http://localhost:8888/druid/indexer/v1/datasources) - druid_endpoint=$(kubectl get svc -n {{ pod_namespace }} | grep 'druid-broker' | awk '{print $4}') - druid_namespace="{{ pod_namespace }}" - druid_pod="{{ pod_name }}" - druid_service=$(kubectl get svc -n {{ pod_namespace }} | grep 'druid-broker' | awk '{print $1}') - druid_availability=$(kubectl get pod -n {{ pod_namespace }} {{ pod_name }} -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}') - echo "Druid Data Source Name: $druid_data_source_name" - echo "Druid Endpoint: $druid_endpoint" - echo "Namespace: $druid_namespace" - echo "Pod: $druid_pod" - echo "Service: $druid_service" - echo "Availability: $druid_availability" - register: druid_info + loop_var: item - - name: Add Druid data source information to the list - set_fact: - druid_data_sources: "{{ druid_data_sources + [{ - 'name': druid_info.stdout_lines[0], - 'endpoint': druid_info.stdout_lines[1], - 'namespace': druid_info.stdout_lines[2], - 'pod': druid_info.stdout_lines[3], - 'service': druid_info.stdout_lines[4], - 'availability': druid_info.stdout_lines[5] - }] }}" + - name: Get Druid data source name, endpoint, pod, service, and availability + shell: | + druid_data_source_name=$(curl -s http://localhost:8888/druid/indexer/v1/datasources) + druid_endpoint=$(kubectl get svc -n {{ item.metadata.namespace }} | grep 'druid-broker' | awk '{print $4}') + druid_pod="{{ item.metadata.name }}" + druid_service=$(kubectl get svc -n {{ item.metadata.namespace }} | grep 'druid-broker' | awk '{print $1}') + druid_availability=$(kubectl get pod -n {{ item.metadata.namespace }} {{ item.metadata.name }} -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}') + echo "Druid Data Source Name: $druid_data_source_name" + echo "Druid Endpoint: $druid_endpoint" + echo "Namespace: {{ item.metadata.namespace }}" + echo "Pod: {{ item.metadata.name }}" + echo "Service: $druid_service" + echo "Availability: $druid_availability" + loop: "{{ druid_pods.resources }}" + loop_control: + loop_var: item From 6c5d797b1459a3fc32a3b42ea4cb6276da8f3f68 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 27 Mar 2024 17:34:48 +0530 Subject: [PATCH 521/616] Update druid_monitoring.yml --- ansible/druid_monitoring.yml | 47 +++++++++++++++++++++--------------- 1 file changed, 28 insertions(+), 19 deletions(-) diff --git a/ansible/druid_monitoring.yml b/ansible/druid_monitoring.yml index 6932dc6704..2607e8e7d3 100644 --- a/ansible/druid_monitoring.yml +++ b/ansible/druid_monitoring.yml @@ -1,8 +1,13 @@ +--- - name: Fetch Druid data source information - hosts: druid + hosts: localhost # assuming this task doesn't require remote hosts gather_facts: no tasks: - - name: Get Druid data source details + - name: Print debug message + debug: + msg: "Fetching Druid data source details" + + - name: Fetch Druid data source details k8s_info: api_version: v1 kind: Pod @@ -13,27 +18,31 @@ - name: Extract Druid data source information set_fact: - druid_data_sources: "{{ druid_data_sources | default([]) + [{ - 'name': item.metadata.name, - 'namespace': item.metadata.namespace - }] }}" - loop: "{{ druid_pods.resources }}" - loop_control: - loop_var: item + druid_data_sources: [] - - name: Get Druid data source name, endpoint, pod, service, and availability + - name: Get Druid data source name, endpoint, namespace, pod, service, and availability shell: | druid_data_source_name=$(curl -s http://localhost:8888/druid/indexer/v1/datasources) - druid_endpoint=$(kubectl get svc -n {{ item.metadata.namespace }} | grep 'druid-broker' | awk '{print $4}') - druid_pod="{{ item.metadata.name }}" - druid_service=$(kubectl get svc -n {{ item.metadata.namespace }} | grep 'druid-broker' | awk '{print $1}') - druid_availability=$(kubectl get pod -n {{ item.metadata.namespace }} {{ item.metadata.name }} -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}') + druid_endpoint=$(kubectl get svc -n {{ pod_namespace }} | grep 'druid-broker' | awk '{print $4}') + druid_namespace="{{ pod_namespace }}" + druid_pod="{{ pod_name }}" + druid_service=$(kubectl get svc -n {{ pod_namespace }} | grep 'druid-broker' | awk '{print $1}') + druid_availability=$(kubectl get pod -n {{ pod_namespace }} {{ pod_name }} -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}') echo "Druid Data Source Name: $druid_data_source_name" echo "Druid Endpoint: $druid_endpoint" - echo "Namespace: {{ item.metadata.namespace }}" - echo "Pod: {{ item.metadata.name }}" + echo "Namespace: $druid_namespace" + echo "Pod: $druid_pod" echo "Service: $druid_service" echo "Availability: $druid_availability" - loop: "{{ druid_pods.resources }}" - loop_control: - loop_var: item + register: druid_info + + - name: Add Druid data source information to the list + set_fact: + druid_data_sources: "{{ druid_data_sources + [{ + 'name': druid_info.stdout_lines[0], + 'endpoint': druid_info.stdout_lines[1], + 'namespace': druid_info.stdout_lines[2], + 'pod': druid_info.stdout_lines[3], + 'service': druid_info.stdout_lines[4], + 'availability': druid_info.stdout_lines[5] + }] }}" From 6f57c323b6d85481b0fc4040452140777a4a3f27 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 27 Mar 2024 17:36:18 +0530 Subject: [PATCH 522/616] Update druid_monitoring.yml --- ansible/druid_monitoring.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ansible/druid_monitoring.yml b/ansible/druid_monitoring.yml index 2607e8e7d3..6e431cf18a 100644 --- a/ansible/druid_monitoring.yml +++ b/ansible/druid_monitoring.yml @@ -19,6 +19,12 @@ - name: Extract Druid data source information set_fact: druid_data_sources: [] + loop: "{{ druid_pods.resources }}" + loop_control: + loop_var: pod_info + vars: + pod_name: "{{ pod_info.metadata.name }}" + pod_namespace: "{{ pod_info.metadata.namespace }}" - name: Get Druid data source name, endpoint, namespace, pod, service, and availability shell: | From 32267f2139506dfcedcde99d96993ae3fc764b16 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 27 Mar 2024 17:37:58 +0530 Subject: [PATCH 523/616] Update druid_monitoring.yml --- ansible/druid_monitoring.yml | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/ansible/druid_monitoring.yml b/ansible/druid_monitoring.yml index 6e431cf18a..02cbcf3b3c 100644 --- a/ansible/druid_monitoring.yml +++ b/ansible/druid_monitoring.yml @@ -1,13 +1,8 @@ ---- - name: Fetch Druid data source information - hosts: localhost # assuming this task doesn't require remote hosts + hosts: druid gather_facts: no tasks: - - name: Print debug message - debug: - msg: "Fetching Druid data source details" - - - name: Fetch Druid data source details + - name: Get Druid data source details k8s_info: api_version: v1 kind: Pod @@ -26,6 +21,7 @@ pod_name: "{{ pod_info.metadata.name }}" pod_namespace: "{{ pod_info.metadata.namespace }}" + # Move these tasks outside of the previous set_fact task - name: Get Druid data source name, endpoint, namespace, pod, service, and availability shell: | druid_data_source_name=$(curl -s http://localhost:8888/druid/indexer/v1/datasources) From ff5161733397ee7ee1bf78f9ad75ef4487750b50 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 27 Mar 2024 17:39:40 +0530 Subject: [PATCH 524/616] Update druid_monitoring.yml --- ansible/druid_monitoring.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/ansible/druid_monitoring.yml b/ansible/druid_monitoring.yml index 02cbcf3b3c..dc2b7accee 100644 --- a/ansible/druid_monitoring.yml +++ b/ansible/druid_monitoring.yml @@ -21,7 +21,6 @@ pod_name: "{{ pod_info.metadata.name }}" pod_namespace: "{{ pod_info.metadata.namespace }}" - # Move these tasks outside of the previous set_fact task - name: Get Druid data source name, endpoint, namespace, pod, service, and availability shell: | druid_data_source_name=$(curl -s http://localhost:8888/druid/indexer/v1/datasources) From 26b0c9834d31d79eb845f7556b4609baac0c15e0 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 27 Mar 2024 17:44:11 +0530 Subject: [PATCH 525/616] Update druid_monitoring.yml --- ansible/druid_monitoring.yml | 32 ++++++++------------------------ 1 file changed, 8 insertions(+), 24 deletions(-) diff --git a/ansible/druid_monitoring.yml b/ansible/druid_monitoring.yml index dc2b7accee..c33bb3c0a7 100644 --- a/ansible/druid_monitoring.yml +++ b/ansible/druid_monitoring.yml @@ -1,3 +1,4 @@ +--- - name: Fetch Druid data source information hosts: druid gather_facts: no @@ -6,7 +7,6 @@ k8s_info: api_version: v1 kind: Pod - namespace: your_namespace label_selectors: app: druid register: druid_pods @@ -19,31 +19,15 @@ loop_var: pod_info vars: pod_name: "{{ pod_info.metadata.name }}" - pod_namespace: "{{ pod_info.metadata.namespace }}" - - name: Get Druid data source name, endpoint, namespace, pod, service, and availability - shell: | - druid_data_source_name=$(curl -s http://localhost:8888/druid/indexer/v1/datasources) - druid_endpoint=$(kubectl get svc -n {{ pod_namespace }} | grep 'druid-broker' | awk '{print $4}') - druid_namespace="{{ pod_namespace }}" - druid_pod="{{ pod_name }}" - druid_service=$(kubectl get svc -n {{ pod_namespace }} | grep 'druid-broker' | awk '{print $1}') - druid_availability=$(kubectl get pod -n {{ pod_namespace }} {{ pod_name }} -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}') - echo "Druid Data Source Name: $druid_data_source_name" - echo "Druid Endpoint: $druid_endpoint" - echo "Namespace: $druid_namespace" - echo "Pod: $druid_pod" - echo "Service: $druid_service" - echo "Availability: $druid_availability" - register: druid_info + - name: Check Druid server status + command: | + kubectl get pod {{ pod_name }} -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}' + register: druid_status - - name: Add Druid data source information to the list + - name: Add Druid server status to the list set_fact: druid_data_sources: "{{ druid_data_sources + [{ - 'name': druid_info.stdout_lines[0], - 'endpoint': druid_info.stdout_lines[1], - 'namespace': druid_info.stdout_lines[2], - 'pod': druid_info.stdout_lines[3], - 'service': druid_info.stdout_lines[4], - 'availability': druid_info.stdout_lines[5] + 'name': pod_name, + 'status': druid_status.stdout }] }}" From 57992ef9ad4f615834ce955679481349c2659656 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 27 Mar 2024 17:47:44 +0530 Subject: [PATCH 526/616] Update druid_monitoring.yml --- ansible/druid_monitoring.yml | 37 ++++++------------------------------ 1 file changed, 6 insertions(+), 31 deletions(-) diff --git a/ansible/druid_monitoring.yml b/ansible/druid_monitoring.yml index c33bb3c0a7..3632471696 100644 --- a/ansible/druid_monitoring.yml +++ b/ansible/druid_monitoring.yml @@ -1,33 +1,8 @@ ---- -- name: Fetch Druid data source information - hosts: druid - gather_facts: no +- hosts: druid tasks: - - name: Get Druid data source details - k8s_info: - api_version: v1 - kind: Pod - label_selectors: - app: druid - register: druid_pods + - name: Gather service facts + service_facts: - - name: Extract Druid data source information - set_fact: - druid_data_sources: [] - loop: "{{ druid_pods.resources }}" - loop_control: - loop_var: pod_info - vars: - pod_name: "{{ pod_info.metadata.name }}" - - - name: Check Druid server status - command: | - kubectl get pod {{ pod_name }} -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}' - register: druid_status - - - name: Add Druid server status to the list - set_fact: - druid_data_sources: "{{ druid_data_sources + [{ - 'name': pod_name, - 'status': druid_status.stdout - }] }}" + - name: Print Druid service status using service facts + debug: + msg: "Druid Service Status: {{ ansible_facts.services['druid'].state }}" From 873ddc23fe6bb7fcaf6524b75580764f8d0f4ea8 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 27 Mar 2024 17:56:38 +0530 Subject: [PATCH 527/616] Update druid_monitoring.yml --- ansible/druid_monitoring.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/druid_monitoring.yml b/ansible/druid_monitoring.yml index 3632471696..0bb727a46c 100644 --- a/ansible/druid_monitoring.yml +++ b/ansible/druid_monitoring.yml @@ -1,4 +1,4 @@ -- hosts: druid +- hosts: raw-coordinator tasks: - name: Gather service facts service_facts: From d78e6c60218a929186e0f4efd8c34f53758ea867 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 28 Mar 2024 14:05:58 +0530 Subject: [PATCH 528/616] Update druid_monitoring.yml --- ansible/druid_monitoring.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/druid_monitoring.yml b/ansible/druid_monitoring.yml index 0bb727a46c..eb32e46bfd 100644 --- a/ansible/druid_monitoring.yml +++ b/ansible/druid_monitoring.yml @@ -1,8 +1,8 @@ -- hosts: raw-coordinator +- hosts: dp-druid-ps tasks: - name: Gather service facts service_facts: - - name: Print Druid service status using service facts + - name: Print Druid service status debug: msg: "Druid Service Status: {{ ansible_facts.services['druid'].state }}" From d89e19f7ea36aaa14cd9e79f94dcf3c312126158 Mon Sep 17 00:00:00 2001 From: trigyn-Himanshu <137037616+trigyn-Himanshu@users.noreply.github.com> Date: Thu, 28 Mar 2024 16:17:55 +0530 Subject: [PATCH 529/616] Update druid_monitoring.yml --- ansible/druid_monitoring.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/druid_monitoring.yml b/ansible/druid_monitoring.yml index eb32e46bfd..fc18b11308 100644 --- a/ansible/druid_monitoring.yml +++ b/ansible/druid_monitoring.yml @@ -1,4 +1,4 @@ -- hosts: dp-druid-ps +- hosts: sl-druid tasks: - name: Gather service facts service_facts: From 476de79b6d2872efb8b4ae389ba591b63e86ce16 Mon Sep 17 00:00:00 2001 From: trigyn-Himanshu <137037616+trigyn-Himanshu@users.noreply.github.com> Date: Thu, 28 Mar 2024 16:25:55 +0530 Subject: [PATCH 530/616] Update druid_monitoring.yml --- ansible/druid_monitoring.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/druid_monitoring.yml b/ansible/druid_monitoring.yml index fc18b11308..375fcf7899 100644 --- a/ansible/druid_monitoring.yml +++ b/ansible/druid_monitoring.yml @@ -1,4 +1,4 @@ -- hosts: sl-druid +- hosts: druid tasks: - name: Gather service facts service_facts: From 928f02b085977e1112ce7d9a2c7b86f0d89590d8 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Thu, 28 Mar 2024 17:11:07 +0530 Subject: [PATCH 531/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 32 ++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index a69c5f976b..410672699d 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -1,9 +1,10 @@ +--- - name: Display Kafka consumer group status hosts: ingestion-cluster-kafka gather_facts: no tasks: - name: Loop through Kafka consumer groups and check lag status - shell: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe --group "{{ item }}" + command: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe --group "{{ item }}" register: consumer_group_output loop: - "jobmanager" @@ -23,22 +24,23 @@ - "dev-qrcode-image-generator-group" loop_control: label: "{{ item }}" - - - name: Print formatted Kafka lag status + - name: Print Kafka lag status for each group debug: msg: | + +------------------------------------------------------------------------------------------------------------------------+ + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | + +------------------------------------------------------------------------------------------------------------------------+ {% for result in consumer_group_output.results %} {% set output_lines = result.stdout_lines %} - {% for line in output_lines[2:] %} - {% set values = line.split('|') %} - | GROUP: {{ values[1].strip() }} - | TOPIC: {{ values[2].strip() }} - | PARTITION: {{ values[3].strip() }} - | Current-OFFSET: {{ values[4].strip() }} - | LOG-END-OFFSET: {{ values[5].strip() }} - | LAG: {{ values[6].strip() }} - | CONSUMER-ID: {{ values[7].strip() }} - | HOST: {{ values[8].strip() }} - | CLIENT-ID: {{ values[9].strip() }} - {% endfor %} + {%- for line in output_lines[2:] %} + {%- set values = line.split() %} + | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | + {%- endfor %} + +------------------------------------------------------------------------------------------------------------------------+ + {%- if not loop.last %} + | + +------------------------------------------------------------------------------------------------------------------------+ + | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | + +------------------------------------------------------------------------------------------------------------------------+ + {%- endif %} {% endfor %} From 61d15c447673c2502295b08500e0899675021736 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 3 Apr 2024 02:45:11 +0530 Subject: [PATCH 532/616] Create Jenkinsfile --- pipelines/check-ssl-expiry/Jenkinsfile | 50 ++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 pipelines/check-ssl-expiry/Jenkinsfile diff --git a/pipelines/check-ssl-expiry/Jenkinsfile b/pipelines/check-ssl-expiry/Jenkinsfile new file mode 100644 index 0000000000..e903003f6e --- /dev/null +++ b/pipelines/check-ssl-expiry/Jenkinsfile @@ -0,0 +1,50 @@ +@Library('deploy-conf') _ +node() { + try { + String ANSI_GREEN = "\u001B[32m" + String ANSI_NORMAL = "\u001B[0m" + String ANSI_BOLD = "\u001B[1m" + String ANSI_RED = "\u001B[31m" + String ANSI_YELLOW = "\u001B[33m" + + stage('checkout public repo') { + folder = new File("$WORKSPACE/.git") + if (folder.exists()) + { + println "Found .git folder. Clearing it.." + sh'git clean -fxd' + } + checkout scm + } + + ansiColor('xterm') { + stage('deploy'){ + values = [:] + envDir = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-3].trim() + module = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-2].trim() + jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() + currentWs = sh(returnStdout: true, script: 'pwd').trim() + ansiblePlaybook = "${currentWs}/ansible/check_ssl_expiry.yml" + ansibleExtraArgs = "--vault-password-file /var/lib/jenkins/secrets/vault-pass" + values.put('currentWs', currentWs) + values.put('env', envDir) + values.put('module', module) + values.put('jobName', jobName) + values.put('ansiblePlaybook', ansiblePlaybook) + values.put('ansibleExtraArgs', ansibleExtraArgs) + println values + ansible_playbook_run(values) + currentBuild.result = 'SUCCESS' + currentBuild.description = "Private: ${params.private_branch}, Public: ${params.branch_or_tag}" + } + } + } + catch (err) { + currentBuild.result = 'FAILURE' + throw err + } + finally { + slack_notify(currentBuild.result) + email_notify() + } +} From e5a2e8c2349e6cf3a4533e53f09317ede66282f9 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 3 Apr 2024 02:46:05 +0530 Subject: [PATCH 533/616] Create check_ssl_expiry.yml --- ansible/check_ssl_expiry.yml | 43 ++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 ansible/check_ssl_expiry.yml diff --git a/ansible/check_ssl_expiry.yml b/ansible/check_ssl_expiry.yml new file mode 100644 index 0000000000..09637566b6 --- /dev/null +++ b/ansible/check_ssl_expiry.yml @@ -0,0 +1,43 @@ +--- +- name: Check SSL certificate expiry dates + hosts: localhost + tasks: + - name: Fetch SSL certificate information + uri: + url: "https://{{ item }}" + return_content: yes + register: certificate_info + with_items: + - ops.diksha.gov.in + - api.diksha.gov.in + - diksha.gov.in + - vdn.diksha.gov.in + - merge.diksha.gov.in + - obj.diksha.gov.in + - obj.vdn.diksha.gov.in + - static.diksha.gov.in + - oci.diksha.gov.in + - myaccess.diksha.orcl.cloud/login + - files.odev.oci.diksha.gov.in + - vdn.oci.diksha.gov.in + - dev.oci.diksha.gov.in + - support.diksha.gov.in + - teachersupport.diksha.gov.in + - vsk.ndear.gov.in + - sskvsk.karnataka.gov.in + - vsk.schooleducationharyana.gov.in + - pp-myjp.diksha.gov.in + - ejaaduipitara.ncert.gov.in + + - name: Extract SSL certificate expiry date + set_fact: + expiry_date: "{{ (item.content | regex_findall('(not valid after|expiry date|expire date|expires on|expire on)(.*)\n', '\\2') | first).split('(')[0] }}" + days_left: "{{ ((expiry_date | to_datetime(format='%d %b %Y %H:%M:%S GMT')) - (ansible_date_time.iso8601 | to_datetime)).days }}" + with_items: + - "{{ certificate_info.results }}" + + - name: Display SSL certificate expiry dates + debug: + msg: "{{ item.item }} - Expiry Date: {{ item.expiry_date }} ({{ item.days_left }} days left)" + with_items: + - "{{ certificate_info.results }}" From e8343ad4050c9cb79d95a0f84f17cbfc80c431df Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 3 Apr 2024 03:01:47 +0530 Subject: [PATCH 534/616] Update check_ssl_expiry.yml --- ansible/check_ssl_expiry.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/ansible/check_ssl_expiry.yml b/ansible/check_ssl_expiry.yml index 09637566b6..8fe718690d 100644 --- a/ansible/check_ssl_expiry.yml +++ b/ansible/check_ssl_expiry.yml @@ -7,6 +7,7 @@ url: "https://{{ item }}" return_content: yes register: certificate_info + ignore_errors: yes with_items: - ops.diksha.gov.in - api.diksha.gov.in From 6ae8a80177e0d799ab0b61ec2717ecff9fcf059b Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 3 Apr 2024 03:06:53 +0530 Subject: [PATCH 535/616] Update check_ssl_expiry.yml --- ansible/check_ssl_expiry.yml | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/ansible/check_ssl_expiry.yml b/ansible/check_ssl_expiry.yml index 8fe718690d..4d9a9722b2 100644 --- a/ansible/check_ssl_expiry.yml +++ b/ansible/check_ssl_expiry.yml @@ -6,8 +6,11 @@ uri: url: "https://{{ item }}" return_content: yes - register: certificate_info - ignore_errors: yes + status_code: + - 200 + - 201 # Add other status codes you want to treat as successful + register: certificate_info + ignore_errors: yes # Ignore all errors, including non-200 status codes with_items: - ops.diksha.gov.in - api.diksha.gov.in @@ -34,11 +37,14 @@ set_fact: expiry_date: "{{ (item.content | regex_findall('(not valid after|expiry date|expire date|expires on|expire on)(.*)\n', '\\2') | first).split('(')[0] }}" days_left: "{{ ((expiry_date | to_datetime(format='%d %b %Y %H:%M:%S GMT')) - (ansible_date_time.iso8601 | to_datetime)).days }}" + status_code: "{{ item.status }}" with_items: - - "{{ certificate_info.results }}" + - "{{ certificate_info.results | default([]) }}" + when: certificate_info is defined and certificate_info.results | length > 0 - name: Display SSL certificate expiry dates debug: - msg: "{{ item.item }} - Expiry Date: {{ item.expiry_date }} ({{ item.days_left }} days left)" + msg: "{{ item.item }} - Status Code: {{ item.status_code }} | Expiry Date: {{ item.expiry_date }} ({{ item.days_left }} days left)" with_items: - - "{{ certificate_info.results }}" + - "{{ certificate_info.results | default([]) }}" + when: certificate_info is defined and certificate_info.results | length > 0 From 141ac7e9c2a93e12bf7fc177ca2f7dd391284ac1 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 3 Apr 2024 03:11:35 +0530 Subject: [PATCH 536/616] Update check_ssl_expiry.yml --- ansible/check_ssl_expiry.yml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/ansible/check_ssl_expiry.yml b/ansible/check_ssl_expiry.yml index 4d9a9722b2..36930f7c01 100644 --- a/ansible/check_ssl_expiry.yml +++ b/ansible/check_ssl_expiry.yml @@ -37,14 +37,11 @@ set_fact: expiry_date: "{{ (item.content | regex_findall('(not valid after|expiry date|expire date|expires on|expire on)(.*)\n', '\\2') | first).split('(')[0] }}" days_left: "{{ ((expiry_date | to_datetime(format='%d %b %Y %H:%M:%S GMT')) - (ansible_date_time.iso8601 | to_datetime)).days }}" - status_code: "{{ item.status }}" with_items: - "{{ certificate_info.results | default([]) }}" - when: certificate_info is defined and certificate_info.results | length > 0 - name: Display SSL certificate expiry dates debug: - msg: "{{ item.item }} - Status Code: {{ item.status_code }} | Expiry Date: {{ item.expiry_date }} ({{ item.days_left }} days left)" + msg: "{{ item.item.url }} - Status Code: {{ item.status }} | Expiry Date: {{ item.expiry_date }} ({{ item.days_left }} days left)" with_items: - "{{ certificate_info.results | default([]) }}" - when: certificate_info is defined and certificate_info.results | length > 0 From 6a98f19d4e368b9670194c7f063add946827800c Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 3 Apr 2024 03:22:11 +0530 Subject: [PATCH 537/616] Update check_ssl_expiry.yml --- ansible/check_ssl_expiry.yml | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/ansible/check_ssl_expiry.yml b/ansible/check_ssl_expiry.yml index 36930f7c01..fd81c10340 100644 --- a/ansible/check_ssl_expiry.yml +++ b/ansible/check_ssl_expiry.yml @@ -33,15 +33,17 @@ - pp-myjp.diksha.gov.in - ejaaduipitara.ncert.gov.in - - name: Extract SSL certificate expiry date + - name: Extract SSL certificate expiry date and days left set_fact: expiry_date: "{{ (item.content | regex_findall('(not valid after|expiry date|expire date|expires on|expire on)(.*)\n', '\\2') | first).split('(')[0] }}" - days_left: "{{ ((expiry_date | to_datetime(format='%d %b %Y %H:%M:%S GMT')) - (ansible_date_time.iso8601 | to_datetime)).days }}" - with_items: - - "{{ certificate_info.results | default([]) }}" + days_left: "{{ ((expiry_date | to_datetime(format='%b %d %H:%M:%S %Y %Z')) - (ansible_date_time.iso8601 | to_datetime)).days }}" + status_code: "{{ item.status }}" + success: "{{ item.status == 200 }}" + with_items: "{{ certificate_info.results | default([]) }}" + when: certificate_info is defined and certificate_info.results | length > 0 - name: Display SSL certificate expiry dates debug: - msg: "{{ item.item.url }} - Status Code: {{ item.status }} | Expiry Date: {{ item.expiry_date }} ({{ item.days_left }} days left)" - with_items: - - "{{ certificate_info.results | default([]) }}" + msg: "{{ item.item }} - Status Code: {{ item.status_code }} | Expiry Date: {{ item.expiry_date }} ({{ item.days_left }} days left)" + with_items: "{{ certificate_info.results | default([]) }}" + when: item.success | default(true) From 388e1b98ef4821063f11eb52c0c2fe22affe6e5e Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 3 Apr 2024 03:25:34 +0530 Subject: [PATCH 538/616] Update check_ssl_expiry.yml --- ansible/check_ssl_expiry.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/check_ssl_expiry.yml b/ansible/check_ssl_expiry.yml index fd81c10340..bd890116a2 100644 --- a/ansible/check_ssl_expiry.yml +++ b/ansible/check_ssl_expiry.yml @@ -31,7 +31,7 @@ - sskvsk.karnataka.gov.in - vsk.schooleducationharyana.gov.in - pp-myjp.diksha.gov.in - - ejaaduipitara.ncert.gov.in + - ejaaduipitara.ncert.gov.in. - name: Extract SSL certificate expiry date and days left set_fact: From e845c18c8cd0a99984cf2f074005f1fda48316f6 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 3 Apr 2024 03:31:35 +0530 Subject: [PATCH 539/616] Update check_ssl_expiry.yml --- ansible/check_ssl_expiry.yml | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/ansible/check_ssl_expiry.yml b/ansible/check_ssl_expiry.yml index bd890116a2..f904693f86 100644 --- a/ansible/check_ssl_expiry.yml +++ b/ansible/check_ssl_expiry.yml @@ -10,7 +10,6 @@ - 200 - 201 # Add other status codes you want to treat as successful register: certificate_info - ignore_errors: yes # Ignore all errors, including non-200 status codes with_items: - ops.diksha.gov.in - api.diksha.gov.in @@ -31,7 +30,7 @@ - sskvsk.karnataka.gov.in - vsk.schooleducationharyana.gov.in - pp-myjp.diksha.gov.in - - ejaaduipitara.ncert.gov.in. + - ejaaduipitara.ncert.gov.in - name: Extract SSL certificate expiry date and days left set_fact: @@ -40,10 +39,14 @@ status_code: "{{ item.status }}" success: "{{ item.status == 200 }}" with_items: "{{ certificate_info.results | default([]) }}" - when: certificate_info is defined and certificate_info.results | length > 0 + when: certificate_info is defined and certificate_info.results | length > 0 and (item.status == 200 or item.status == 201) - name: Display SSL certificate expiry dates debug: - msg: "{{ item.item }} - Status Code: {{ item.status_code }} | Expiry Date: {{ item.expiry_date }} ({{ item.days_left }} days left)" + msg: "{{ item.item }} - Status Code: {{ item.status }} | Expiry Date: {{ item.expiry_date }} ({{ item.days_left }} days left)" with_items: "{{ certificate_info.results | default([]) }}" - when: item.success | default(true) + when: item.success | default(false) + + # Print the output directly in Jenkins console + - debug: + var: certificate_info.results From b2b2f975365a55350ac3bca6b3686c4e549691f9 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 3 Apr 2024 06:29:03 +0530 Subject: [PATCH 540/616] Update check_ssl_expiry.yml --- ansible/check_ssl_expiry.yml | 41 +++++++++++++++--------------------- 1 file changed, 17 insertions(+), 24 deletions(-) diff --git a/ansible/check_ssl_expiry.yml b/ansible/check_ssl_expiry.yml index f904693f86..a7dd0121fe 100644 --- a/ansible/check_ssl_expiry.yml +++ b/ansible/check_ssl_expiry.yml @@ -1,16 +1,17 @@ --- -- name: Check SSL certificate expiry dates - hosts: localhost +- name: Check SSL Certificate Expiry + hosts: jenkins-master + gather_facts: no + tasks: - - name: Fetch SSL certificate information + - name: Get SSL Certificate Expiry for Domains uri: url: "https://{{ item }}" + method: GET return_content: yes - status_code: - - 200 - - 201 # Add other status codes you want to treat as successful - register: certificate_info - with_items: + validate_certs: no + register: response + loop: - ops.diksha.gov.in - api.diksha.gov.in - diksha.gov.in @@ -20,7 +21,7 @@ - obj.vdn.diksha.gov.in - static.diksha.gov.in - oci.diksha.gov.in - - myaccess.diksha.orcl.cloud/login + - myaccess.diksha.orcl.cloud - files.odev.oci.diksha.gov.in - vdn.oci.diksha.gov.in - dev.oci.diksha.gov.in @@ -32,21 +33,13 @@ - pp-myjp.diksha.gov.in - ejaaduipitara.ncert.gov.in - - name: Extract SSL certificate expiry date and days left + - name: Extract Certificate Expiry Dates set_fact: - expiry_date: "{{ (item.content | regex_findall('(not valid after|expiry date|expire date|expires on|expire on)(.*)\n', '\\2') | first).split('(')[0] }}" - days_left: "{{ ((expiry_date | to_datetime(format='%b %d %H:%M:%S %Y %Z')) - (ansible_date_time.iso8601 | to_datetime)).days }}" - status_code: "{{ item.status }}" - success: "{{ item.status == 200 }}" - with_items: "{{ certificate_info.results | default([]) }}" - when: certificate_info is defined and certificate_info.results | length > 0 and (item.status == 200 or item.status == 201) + expiry_dates: "{{ expiry_dates | default({}) | combine({item.item: item.json.server_cert.expiry_date}) }}" + loop: "{{ response.results }}" + when: response.status == 200 and item.json.server_cert is defined - - name: Display SSL certificate expiry dates + - name: Print SSL Certificate Expiry Dates debug: - msg: "{{ item.item }} - Status Code: {{ item.status }} | Expiry Date: {{ item.expiry_date }} ({{ item.days_left }} days left)" - with_items: "{{ certificate_info.results | default([]) }}" - when: item.success | default(false) - - # Print the output directly in Jenkins console - - debug: - var: certificate_info.results + msg: "Domain {{ item.key }} has Expiry Date: {{ item.value }}" + loop: "{{ expiry_dates | dict2items }}" From afa336f284462c3913c9005d35d8513aad6310b8 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 3 Apr 2024 06:37:52 +0530 Subject: [PATCH 541/616] Update check_ssl_expiry.yml --- ansible/check_ssl_expiry.yml | 44 +++++------------------------------- 1 file changed, 6 insertions(+), 38 deletions(-) diff --git a/ansible/check_ssl_expiry.yml b/ansible/check_ssl_expiry.yml index a7dd0121fe..87eafbf2dd 100644 --- a/ansible/check_ssl_expiry.yml +++ b/ansible/check_ssl_expiry.yml @@ -1,45 +1,13 @@ --- -- name: Check SSL Certificate Expiry +- name: Execute certcheck.py and Print Output hosts: jenkins-master gather_facts: no tasks: - - name: Get SSL Certificate Expiry for Domains - uri: - url: "https://{{ item }}" - method: GET - return_content: yes - validate_certs: no - register: response - loop: - - ops.diksha.gov.in - - api.diksha.gov.in - - diksha.gov.in - - vdn.diksha.gov.in - - merge.diksha.gov.in - - obj.diksha.gov.in - - obj.vdn.diksha.gov.in - - static.diksha.gov.in - - oci.diksha.gov.in - - myaccess.diksha.orcl.cloud - - files.odev.oci.diksha.gov.in - - vdn.oci.diksha.gov.in - - dev.oci.diksha.gov.in - - support.diksha.gov.in - - teachersupport.diksha.gov.in - - vsk.ndear.gov.in - - sskvsk.karnataka.gov.in - - vsk.schooleducationharyana.gov.in - - pp-myjp.diksha.gov.in - - ejaaduipitara.ncert.gov.in + - name: Execute Python Script + command: python3 /root/certcheck.py + register: script_output - - name: Extract Certificate Expiry Dates - set_fact: - expiry_dates: "{{ expiry_dates | default({}) | combine({item.item: item.json.server_cert.expiry_date}) }}" - loop: "{{ response.results }}" - when: response.status == 200 and item.json.server_cert is defined - - - name: Print SSL Certificate Expiry Dates + - name: Print Script Output debug: - msg: "Domain {{ item.key }} has Expiry Date: {{ item.value }}" - loop: "{{ expiry_dates | dict2items }}" + msg: "{{ script_output.stdout }}" From 86a0393e2fdf20c245ee1c2aca8831da863f55dd Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 3 Apr 2024 06:40:02 +0530 Subject: [PATCH 542/616] Update check_ssl_expiry.yml --- ansible/check_ssl_expiry.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ansible/check_ssl_expiry.yml b/ansible/check_ssl_expiry.yml index 87eafbf2dd..14ae27d77a 100644 --- a/ansible/check_ssl_expiry.yml +++ b/ansible/check_ssl_expiry.yml @@ -1,9 +1,14 @@ --- - name: Execute certcheck.py and Print Output - hosts: jenkins-master + hosts: localhost gather_facts: no + vars: + ansible_python_interpreter: /usr/bin/python3 tasks: + - name: Set Execute Permissions on Python Script + command: chmod +x /root/certcheck.py + - name: Execute Python Script command: python3 /root/certcheck.py register: script_output From 3bd94a1031c4b6c692ff190451d69c37a525c687 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 3 Apr 2024 06:46:03 +0530 Subject: [PATCH 543/616] Update check_ssl_expiry.yml --- ansible/check_ssl_expiry.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/check_ssl_expiry.yml b/ansible/check_ssl_expiry.yml index 14ae27d77a..78aa5f9919 100644 --- a/ansible/check_ssl_expiry.yml +++ b/ansible/check_ssl_expiry.yml @@ -7,10 +7,10 @@ tasks: - name: Set Execute Permissions on Python Script - command: chmod +x /root/certcheck.py + command: chmod +x /opt/scripts/certcheck.py - name: Execute Python Script - command: python3 /root/certcheck.py + command: python3 /opt/scripts/certcheck.py register: script_output - name: Print Script Output From 068d0fa83eae97f003e8fb1c6a9e19bdf1eaab75 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 3 Apr 2024 16:38:30 +0530 Subject: [PATCH 544/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 56 ++++++++----------------------- 1 file changed, 14 insertions(+), 42 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 410672699d..7668d07f99 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -1,46 +1,18 @@ --- -- name: Display Kafka consumer group status - hosts: ingestion-cluster-kafka +- name: Execute check_kafka_group-status.py and Print Output + hosts: localhost gather_facts: no + vars: + ansible_python_interpreter: /usr/bin/python3 + tasks: - - name: Loop through Kafka consumer groups and check lag status - command: /opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server "localhost:9092" --describe --group "{{ item }}" - register: consumer_group_output - loop: - - "jobmanager" - - "dev-audit-event-generator-group" - - "telemetry-group" - - "prometheus-metrics-consumer" - - "create-entity-consumer-group" - - "ml-project-service" - - "dev-audit-history-indexer-group" - - "learning-127.0.1.1" - - "dev-search-indexer-group" - - "outbound" - - "dev-enrolment-reconciliation-group" - - "devsamiksha" - - "dev-relation-cache-updater-group" - - "dev-content-publish-group" - - "dev-qrcode-image-generator-group" - loop_control: - label: "{{ item }}" - - name: Print Kafka lag status for each group + - name: Set Execute Permissions on Python Script + command: chmod +x /opt/scripts/check_kafka_group-status.py + + - name: Execute Python Script + command: python3 /opt/scripts/check_kafka_group-status.py + register: script_output + + - name: Print Script Output debug: - msg: | - +------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | - +------------------------------------------------------------------------------------------------------------------------+ - {% for result in consumer_group_output.results %} - {% set output_lines = result.stdout_lines %} - {%- for line in output_lines[2:] %} - {%- set values = line.split() %} - | {{ values[0] }} | {{ values[1] }} | {{ values[2] }} | {{ values[3] }} | {{ values[4] }} | {{ values[5] }} | {{ values[6] }} | {{ values[7] }} | {{ values[8] }} | - {%- endfor %} - +------------------------------------------------------------------------------------------------------------------------+ - {%- if not loop.last %} - | - +------------------------------------------------------------------------------------------------------------------------+ - | GROUP | TOPIC | PARTITION | CURRENT-OFFSET | LOG-END-OFFSET | LAG | CONSUMER-ID | HOST | CLIENT-ID | | - +------------------------------------------------------------------------------------------------------------------------+ - {%- endif %} - {% endfor %} + msg: "{{ script_output.stdout }}" From dbef303b33611177e1e40a6af7438b86d4164972 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 3 Apr 2024 16:43:44 +0530 Subject: [PATCH 545/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 7668d07f99..5c8cf1f998 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -10,7 +10,7 @@ command: chmod +x /opt/scripts/check_kafka_group-status.py - name: Execute Python Script - command: python3 /opt/scripts/check_kafka_group-status.py + command: /usr/bin/python3 /opt/scripts/check_kafka_group-status.py register: script_output - name: Print Script Output From fcab0d031627bc4136245402d08f526641fb9dc0 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 3 Apr 2024 16:46:54 +0530 Subject: [PATCH 546/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 5c8cf1f998..cc7c61df9d 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -1,6 +1,6 @@ --- - name: Execute check_kafka_group-status.py and Print Output - hosts: localhost + hosts: ingestion-cluster-kafka gather_facts: no vars: ansible_python_interpreter: /usr/bin/python3 From d1dabd3a8af76a918d9e4ba3496d887e7ca6a868 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 3 Apr 2024 16:57:43 +0530 Subject: [PATCH 547/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index cc7c61df9d..bb883d7886 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -1,13 +1,20 @@ --- -- name: Execute check_kafka_group-status.py and Print Output +- name: Execute Kafka Consumer Groups Script hosts: ingestion-cluster-kafka - gather_facts: no - vars: - ansible_python_interpreter: /usr/bin/python3 + become: yes # If sudo/root access is required to copy the script and install dependencies tasks: - - name: Set Execute Permissions on Python Script - command: chmod +x /opt/scripts/check_kafka_group-status.py + - name: Copy Python Script to Target Servers + copy: + src: /path/to/local/check_kafka_group-status.py # Update the source path accordingly + dest: /opt/scripts/check_kafka_group-status.py + mode: 0755 # Set execute permissions on the copied script + + - name: Install tabulate Python library + pip: + name: tabulate + state: present + executable: /usr/bin/python3 # Path to the Python 3 interpreter on your target servers - name: Execute Python Script command: /usr/bin/python3 /opt/scripts/check_kafka_group-status.py From 1feddc5a61c43545a074ee2c5f6618c592f14d74 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 3 Apr 2024 17:07:39 +0530 Subject: [PATCH 548/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index bb883d7886..e6c6db2de0 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -9,6 +9,7 @@ src: /path/to/local/check_kafka_group-status.py # Update the source path accordingly dest: /opt/scripts/check_kafka_group-status.py mode: 0755 # Set execute permissions on the copied script + remote_src: yes # Check for the file on remote servers - name: Install tabulate Python library pip: From 84c6e482c7ef12ac2244acd6359579b4bd4c104b Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 3 Apr 2024 17:09:29 +0530 Subject: [PATCH 549/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index e6c6db2de0..cb3c69d104 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -6,7 +6,7 @@ tasks: - name: Copy Python Script to Target Servers copy: - src: /path/to/local/check_kafka_group-status.py # Update the source path accordingly + src: /opt/scripts/check_kafka_group-status.py # Update the source path accordingly dest: /opt/scripts/check_kafka_group-status.py mode: 0755 # Set execute permissions on the copied script remote_src: yes # Check for the file on remote servers From 71a0c0896b0bb3384ef4edde020631231d11af69 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 3 Apr 2024 17:12:05 +0530 Subject: [PATCH 550/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index cb3c69d104..9c597fccf4 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -6,16 +6,17 @@ tasks: - name: Copy Python Script to Target Servers copy: - src: /opt/scripts/check_kafka_group-status.py # Update the source path accordingly + src: /path/to/local/check_kafka_group-status.py # Update the source path accordingly dest: /opt/scripts/check_kafka_group-status.py mode: 0755 # Set execute permissions on the copied script remote_src: yes # Check for the file on remote servers - - name: Install tabulate Python library - pip: - name: tabulate - state: present - executable: /usr/bin/python3 # Path to the Python 3 interpreter on your target servers + # Skip the installation task since tabulate is already installed manually + # - name: Install tabulate Python library + # pip: + # name: tabulate + # state: present + # executable: /usr/bin/python3 # Path to the Python 3 interpreter on your target servers - name: Execute Python Script command: /usr/bin/python3 /opt/scripts/check_kafka_group-status.py From 56550e0ccf9037e5b8309802d3080675591ce27b Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 3 Apr 2024 17:16:20 +0530 Subject: [PATCH 551/616] Update kafka_lags_monitoring.yml --- ansible/kafka_lags_monitoring.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/kafka_lags_monitoring.yml b/ansible/kafka_lags_monitoring.yml index 9c597fccf4..40caf276ac 100644 --- a/ansible/kafka_lags_monitoring.yml +++ b/ansible/kafka_lags_monitoring.yml @@ -6,7 +6,7 @@ tasks: - name: Copy Python Script to Target Servers copy: - src: /path/to/local/check_kafka_group-status.py # Update the source path accordingly + src: /opt/scripts/check_kafka_group-status.py # Update the source path accordingly dest: /opt/scripts/check_kafka_group-status.py mode: 0755 # Set execute permissions on the copied script remote_src: yes # Check for the file on remote servers From e2dc220e80187dc8af35dac154c8fc6ad0a92679 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Mon, 8 Apr 2024 13:06:21 +0530 Subject: [PATCH 552/616] Update Jenkinsfile --- pipelines/check-ssl-expiry/Jenkinsfile | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/pipelines/check-ssl-expiry/Jenkinsfile b/pipelines/check-ssl-expiry/Jenkinsfile index e903003f6e..f81bb359c8 100644 --- a/pipelines/check-ssl-expiry/Jenkinsfile +++ b/pipelines/check-ssl-expiry/Jenkinsfile @@ -48,3 +48,22 @@ node() { email_notify() } } + +def email_notify() { + def buildNumber = env.BUILD_NUMBER + def jobUrl = env.JOB_URL + + emailext ( + subject: 'Jenkins Pipeline Status', + body: """$JOB_NAME - Pipeline ${currentBuild.result}. + + Build Number: $buildNumber + + Job URL: $jobUrl + + Console Output is attached.""", + to: 'alertsdiksha@trigyn.com', + attachmentsPattern: "${env.BUILD_ID}/log", + attachLog: true + ) +} From f398cd913b791e0a3b7c9dbe2f7d2dd412d6ca04 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Mon, 8 Apr 2024 13:18:05 +0530 Subject: [PATCH 553/616] Update Jenkinsfile --- pipelines/check-ssl-expiry/Jenkinsfile | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/pipelines/check-ssl-expiry/Jenkinsfile b/pipelines/check-ssl-expiry/Jenkinsfile index f81bb359c8..905ea3ed8d 100644 --- a/pipelines/check-ssl-expiry/Jenkinsfile +++ b/pipelines/check-ssl-expiry/Jenkinsfile @@ -50,20 +50,27 @@ node() { } def email_notify() { + def buildNumber = env.BUILD_NUMBER + def jobUrl = env.JOB_URL + + // Send email notification using the built-in 'mail' step + + mail ( - emailext ( subject: 'Jenkins Pipeline Status', + body: """$JOB_NAME - Pipeline ${currentBuild.result}. Build Number: $buildNumber Job URL: $jobUrl - Console Output is attached.""", - to: 'alertsdiksha@trigyn.com', - attachmentsPattern: "${env.BUILD_ID}/log", - attachLog: true + Hello, This is an email from Dev Jenkins pipeline.""", + + to: 'alertsdiksha@trigyn.com' + ) + } From 8eef3db88ccdc54dad84faeb4c89a9e53b9ec662 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Mon, 8 Apr 2024 13:28:10 +0530 Subject: [PATCH 554/616] Update Jenkinsfile --- pipelines/check-ssl-expiry/Jenkinsfile | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/pipelines/check-ssl-expiry/Jenkinsfile b/pipelines/check-ssl-expiry/Jenkinsfile index 905ea3ed8d..f5edcc3ca0 100644 --- a/pipelines/check-ssl-expiry/Jenkinsfile +++ b/pipelines/check-ssl-expiry/Jenkinsfile @@ -50,27 +50,25 @@ node() { } def email_notify() { - def buildNumber = env.BUILD_NUMBER - def jobUrl = env.JOB_URL - - // Send email notification using the built-in 'mail' step - mail ( + // Get the console output from the entire build + def consoleLog = currentBuild.rawBuild.getLog(10000) // Adjust the number of lines as needed + // Send email notification using the built-in 'mail' step + mail ( subject: 'Jenkins Pipeline Status', - body: """$JOB_NAME - Pipeline ${currentBuild.result}. Build Number: $buildNumber Job URL: $jobUrl - Hello, This is an email from Dev Jenkins pipeline.""", + Console Output: + $consoleLog + Hello, This is an email from Dev Jenkins pipeline.""", to: 'alertsdiksha@trigyn.com' - ) - } From 2dcd1fee29373d7ee78f946c6608e107c5cd1223 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Mon, 8 Apr 2024 13:30:24 +0530 Subject: [PATCH 555/616] Update Jenkinsfile --- pipelines/check-ssl-expiry/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipelines/check-ssl-expiry/Jenkinsfile b/pipelines/check-ssl-expiry/Jenkinsfile index f5edcc3ca0..73a5c55ef2 100644 --- a/pipelines/check-ssl-expiry/Jenkinsfile +++ b/pipelines/check-ssl-expiry/Jenkinsfile @@ -54,7 +54,7 @@ def email_notify() { def jobUrl = env.JOB_URL // Get the console output from the entire build - def consoleLog = currentBuild.rawBuild.getLog(10000) // Adjust the number of lines as needed + def consoleLog = currentBuild.getLog(10000) // Adjust the number of lines as needed // Send email notification using the built-in 'mail' step mail ( From 98977d6aaaa7d4fe11382d318c11be243d13366a Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Mon, 8 Apr 2024 13:32:41 +0530 Subject: [PATCH 556/616] Update Jenkinsfile --- pipelines/check-ssl-expiry/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipelines/check-ssl-expiry/Jenkinsfile b/pipelines/check-ssl-expiry/Jenkinsfile index 73a5c55ef2..f5edcc3ca0 100644 --- a/pipelines/check-ssl-expiry/Jenkinsfile +++ b/pipelines/check-ssl-expiry/Jenkinsfile @@ -54,7 +54,7 @@ def email_notify() { def jobUrl = env.JOB_URL // Get the console output from the entire build - def consoleLog = currentBuild.getLog(10000) // Adjust the number of lines as needed + def consoleLog = currentBuild.rawBuild.getLog(10000) // Adjust the number of lines as needed // Send email notification using the built-in 'mail' step mail ( From acd78f71309f4e0317c2c0c4f66c3a08a06acfc9 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Mon, 8 Apr 2024 14:02:14 +0530 Subject: [PATCH 557/616] Update Jenkinsfile --- pipelines/check-ssl-expiry/Jenkinsfile | 32 ++++++++++++++------------ 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/pipelines/check-ssl-expiry/Jenkinsfile b/pipelines/check-ssl-expiry/Jenkinsfile index f5edcc3ca0..87a92a115c 100644 --- a/pipelines/check-ssl-expiry/Jenkinsfile +++ b/pipelines/check-ssl-expiry/Jenkinsfile @@ -50,25 +50,27 @@ node() { } def email_notify() { - def buildNumber = env.BUILD_NUMBER - def jobUrl = env.JOB_URL + // Import necessary Java time classes + import java.time.LocalDate + import java.time.format.DateTimeFormatter - // Get the console output from the entire build - def consoleLog = currentBuild.rawBuild.getLog(10000) // Adjust the number of lines as needed + // Get the current date in dd-MM-yyyy format + def currentDate = LocalDate.now().format(DateTimeFormatter.ofPattern("dd-MM-yyyy")) - // Send email notification using the built-in 'mail' step - mail ( - subject: 'Jenkins Pipeline Status', - body: """$JOB_NAME - Pipeline ${currentBuild.result}. - - Build Number: $buildNumber - - Job URL: $jobUrl + // Generate the attachment file name based on pipeline name and current date + def attachmentFileName = "${JOB_NAME}_${currentDate}.txt" - Console Output: - $consoleLog + // Specify the path to the attachment file using the generated file name + def attachmentFilePath = "${WORKSPACE}/${attachmentFileName}" + emailext ( + subject: 'Jenkins Pipeline Status', + body: """$JOB_NAME - Pipeline ${currentBuild.result}. + Build Number: ${env.BUILD_NUMBER} + Job URL: ${env.JOB_URL} Hello, This is an email from Dev Jenkins pipeline.""", - to: 'alertsdiksha@trigyn.com' + to: 'alertsdiksha@trigyn.com', + attachments: attachmentFilePath ) } + From d98639173d813938ed9283fc1c0f380f2cab483d Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Mon, 8 Apr 2024 14:05:02 +0530 Subject: [PATCH 558/616] Update Jenkinsfile --- pipelines/check-ssl-expiry/Jenkinsfile | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/pipelines/check-ssl-expiry/Jenkinsfile b/pipelines/check-ssl-expiry/Jenkinsfile index 87a92a115c..8aa07f984d 100644 --- a/pipelines/check-ssl-expiry/Jenkinsfile +++ b/pipelines/check-ssl-expiry/Jenkinsfile @@ -50,10 +50,9 @@ node() { } def email_notify() { - // Import necessary Java time classes - import java.time.LocalDate - import java.time.format.DateTimeFormatter - + def buildNumber = env.BUILD_NUMBER + def jobUrl = env.JOB_URL + // Get the current date in dd-MM-yyyy format def currentDate = LocalDate.now().format(DateTimeFormatter.ofPattern("dd-MM-yyyy")) @@ -63,14 +62,18 @@ def email_notify() { // Specify the path to the attachment file using the generated file name def attachmentFilePath = "${WORKSPACE}/${attachmentFileName}" + // Check if the attachment file exists + sh "ls ${attachmentFilePath}" + emailext ( subject: 'Jenkins Pipeline Status', body: """$JOB_NAME - Pipeline ${currentBuild.result}. - Build Number: ${env.BUILD_NUMBER} - Job URL: ${env.JOB_URL} + Build Number: $buildNumber + Job URL: $jobUrl Hello, This is an email from Dev Jenkins pipeline.""", to: 'alertsdiksha@trigyn.com', attachments: attachmentFilePath ) } + From 2a0b3a0b67f995b05199da65970d7a600de7a169 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Mon, 8 Apr 2024 14:06:05 +0530 Subject: [PATCH 559/616] Update Jenkinsfile --- pipelines/check-ssl-expiry/Jenkinsfile | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/pipelines/check-ssl-expiry/Jenkinsfile b/pipelines/check-ssl-expiry/Jenkinsfile index 8aa07f984d..23cbc2722d 100644 --- a/pipelines/check-ssl-expiry/Jenkinsfile +++ b/pipelines/check-ssl-expiry/Jenkinsfile @@ -49,12 +49,15 @@ node() { } } +import java.text.SimpleDateFormat +import java.util.Date + def email_notify() { def buildNumber = env.BUILD_NUMBER def jobUrl = env.JOB_URL // Get the current date in dd-MM-yyyy format - def currentDate = LocalDate.now().format(DateTimeFormatter.ofPattern("dd-MM-yyyy")) + def currentDate = new Date().format('dd-MM-yyyy') // Generate the attachment file name based on pipeline name and current date def attachmentFileName = "${JOB_NAME}_${currentDate}.txt" @@ -62,9 +65,6 @@ def email_notify() { // Specify the path to the attachment file using the generated file name def attachmentFilePath = "${WORKSPACE}/${attachmentFileName}" - // Check if the attachment file exists - sh "ls ${attachmentFilePath}" - emailext ( subject: 'Jenkins Pipeline Status', body: """$JOB_NAME - Pipeline ${currentBuild.result}. @@ -77,3 +77,4 @@ def email_notify() { } + From 3069ef9aba3199c4eab3f52ba47fbe18191f522f Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Mon, 8 Apr 2024 14:08:09 +0530 Subject: [PATCH 560/616] Update Jenkinsfile --- pipelines/check-ssl-expiry/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipelines/check-ssl-expiry/Jenkinsfile b/pipelines/check-ssl-expiry/Jenkinsfile index 23cbc2722d..15408e5a82 100644 --- a/pipelines/check-ssl-expiry/Jenkinsfile +++ b/pipelines/check-ssl-expiry/Jenkinsfile @@ -72,7 +72,7 @@ def email_notify() { Job URL: $jobUrl Hello, This is an email from Dev Jenkins pipeline.""", to: 'alertsdiksha@trigyn.com', - attachments: attachmentFilePath + attach: "${attachmentFilePath}" ) } From a08c60fff5d0413b93141092750e9ef1a1b33c2a Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Mon, 8 Apr 2024 14:11:43 +0530 Subject: [PATCH 561/616] Update Jenkinsfile --- pipelines/check-ssl-expiry/Jenkinsfile | 31 ++++++++++++++++---------- 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/pipelines/check-ssl-expiry/Jenkinsfile b/pipelines/check-ssl-expiry/Jenkinsfile index 15408e5a82..e81cff080e 100644 --- a/pipelines/check-ssl-expiry/Jenkinsfile +++ b/pipelines/check-ssl-expiry/Jenkinsfile @@ -52,29 +52,36 @@ node() { import java.text.SimpleDateFormat import java.util.Date +import java.nio.file.Files +import java.nio.file.Paths + def email_notify() { def buildNumber = env.BUILD_NUMBER def jobUrl = env.JOB_URL - - // Get the current date in dd-MM-yyyy format - def currentDate = new Date().format('dd-MM-yyyy') - // Generate the attachment file name based on pipeline name and current date - def attachmentFileName = "${JOB_NAME}_${currentDate}.txt" + // Capture the pipeline output as a string + def pipelineOutput = """ + $JOB_NAME - Pipeline ${currentBuild.result}. + Build Number: $buildNumber + Job URL: $jobUrl + Hello, This is an email from Dev Jenkins pipeline. + """ - // Specify the path to the attachment file using the generated file name - def attachmentFilePath = "${WORKSPACE}/${attachmentFileName}" + // Write the output to a temporary file + def tempFilePath = "${WORKSPACE}/${JOB_NAME}_output.txt" + Files.write(Paths.get(tempFilePath), pipelineOutput.getBytes()) - emailext ( + // Send email notification using the built-in 'mail' step with the attachment + mail ( subject: 'Jenkins Pipeline Status', body: """$JOB_NAME - Pipeline ${currentBuild.result}. Build Number: $buildNumber Job URL: $jobUrl Hello, This is an email from Dev Jenkins pipeline.""", to: 'alertsdiksha@trigyn.com', - attach: "${attachmentFilePath}" + attachments: tempFilePath ) -} - - + // Clean up the temporary file after sending the email + sh "rm ${tempFilePath}" +} From 12e31ab540e6cc313bfe07b325b173268ae0baf1 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Mon, 8 Apr 2024 14:13:17 +0530 Subject: [PATCH 562/616] Update Jenkinsfile --- pipelines/check-ssl-expiry/Jenkinsfile | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/pipelines/check-ssl-expiry/Jenkinsfile b/pipelines/check-ssl-expiry/Jenkinsfile index e81cff080e..0e0ecd8b8b 100644 --- a/pipelines/check-ssl-expiry/Jenkinsfile +++ b/pipelines/check-ssl-expiry/Jenkinsfile @@ -49,12 +49,6 @@ node() { } } -import java.text.SimpleDateFormat -import java.util.Date - -import java.nio.file.Files -import java.nio.file.Paths - def email_notify() { def buildNumber = env.BUILD_NUMBER def jobUrl = env.JOB_URL @@ -69,7 +63,7 @@ def email_notify() { // Write the output to a temporary file def tempFilePath = "${WORKSPACE}/${JOB_NAME}_output.txt" - Files.write(Paths.get(tempFilePath), pipelineOutput.getBytes()) + writeFile file: tempFilePath, text: pipelineOutput // Send email notification using the built-in 'mail' step with the attachment mail ( @@ -79,7 +73,7 @@ def email_notify() { Job URL: $jobUrl Hello, This is an email from Dev Jenkins pipeline.""", to: 'alertsdiksha@trigyn.com', - attachments: tempFilePath + attachmentsPattern: tempFilePath ) // Clean up the temporary file after sending the email From 19598a6684d16fec525b1967e29babd35f145af2 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Mon, 8 Apr 2024 14:18:50 +0530 Subject: [PATCH 563/616] Update Jenkinsfile --- pipelines/check-ssl-expiry/Jenkinsfile | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/pipelines/check-ssl-expiry/Jenkinsfile b/pipelines/check-ssl-expiry/Jenkinsfile index 0e0ecd8b8b..b63cc857d8 100644 --- a/pipelines/check-ssl-expiry/Jenkinsfile +++ b/pipelines/check-ssl-expiry/Jenkinsfile @@ -49,6 +49,9 @@ node() { } } +import java.time.LocalDate +import java.time.format.DateTimeFormatter + def email_notify() { def buildNumber = env.BUILD_NUMBER def jobUrl = env.JOB_URL @@ -61,8 +64,11 @@ def email_notify() { Hello, This is an email from Dev Jenkins pipeline. """ - // Write the output to a temporary file - def tempFilePath = "${WORKSPACE}/${JOB_NAME}_output.txt" + // Get the current date in DD-MM-YYYY format + def currentDate = LocalDate.now().format(DateTimeFormatter.ofPattern("dd-MM-yyyy")) + + // Write the output to a temporary file with date in the file name + def tempFilePath = "${WORKSPACE}/${JOB_NAME}_output_${currentDate}.txt" writeFile file: tempFilePath, text: pipelineOutput // Send email notification using the built-in 'mail' step with the attachment @@ -73,9 +79,7 @@ def email_notify() { Job URL: $jobUrl Hello, This is an email from Dev Jenkins pipeline.""", to: 'alertsdiksha@trigyn.com', - attachmentsPattern: tempFilePath + attachments: [file(tempFilePath)] ) - - // Clean up the temporary file after sending the email - sh "rm ${tempFilePath}" } + From 8507b957e62b1854ce6b695aa16c1261df6e0593 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 9 Apr 2024 11:23:15 +0530 Subject: [PATCH 564/616] Update Jenkinsfile --- pipelines/check-ssl-expiry/Jenkinsfile | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/pipelines/check-ssl-expiry/Jenkinsfile b/pipelines/check-ssl-expiry/Jenkinsfile index b63cc857d8..4e34dce86a 100644 --- a/pipelines/check-ssl-expiry/Jenkinsfile +++ b/pipelines/check-ssl-expiry/Jenkinsfile @@ -56,30 +56,21 @@ def email_notify() { def buildNumber = env.BUILD_NUMBER def jobUrl = env.JOB_URL - // Capture the pipeline output as a string - def pipelineOutput = """ + def emailContent = """ $JOB_NAME - Pipeline ${currentBuild.result}. Build Number: $buildNumber Job URL: $jobUrl Hello, This is an email from Dev Jenkins pipeline. """ - // Get the current date in DD-MM-YYYY format def currentDate = LocalDate.now().format(DateTimeFormatter.ofPattern("dd-MM-yyyy")) + def tempFilePath = "${WORKSPACE}/SSL_Expiry_output_${currentDate}.txt" + writeFile file: tempFilePath, text: emailContent - // Write the output to a temporary file with date in the file name - def tempFilePath = "${WORKSPACE}/${JOB_NAME}_output_${currentDate}.txt" - writeFile file: tempFilePath, text: pipelineOutput - - // Send email notification using the built-in 'mail' step with the attachment - mail ( + emailext ( subject: 'Jenkins Pipeline Status', - body: """$JOB_NAME - Pipeline ${currentBuild.result}. - Build Number: $buildNumber - Job URL: $jobUrl - Hello, This is an email from Dev Jenkins pipeline.""", + body: emailContent, to: 'alertsdiksha@trigyn.com', - attachments: [file(tempFilePath)] + attachmentsPattern: tempFilePath ) } - From bc1a05ef030db76908b4b76344769122d794f272 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 9 Apr 2024 11:32:55 +0530 Subject: [PATCH 565/616] Update Jenkinsfile --- pipelines/check-ssl-expiry/Jenkinsfile | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pipelines/check-ssl-expiry/Jenkinsfile b/pipelines/check-ssl-expiry/Jenkinsfile index 4e34dce86a..416969eb73 100644 --- a/pipelines/check-ssl-expiry/Jenkinsfile +++ b/pipelines/check-ssl-expiry/Jenkinsfile @@ -64,13 +64,15 @@ def email_notify() { """ def currentDate = LocalDate.now().format(DateTimeFormatter.ofPattern("dd-MM-yyyy")) - def tempFilePath = "${WORKSPACE}/SSL_Expiry_output_${currentDate}.txt" - writeFile file: tempFilePath, text: emailContent + def attachmentsPattern = "**/SSL_Expiry_output_${currentDate}.txt" + + writeFile file: "SSL_Expiry_output_${currentDate}.txt", text: emailContent emailext ( subject: 'Jenkins Pipeline Status', body: emailContent, to: 'alertsdiksha@trigyn.com', - attachmentsPattern: tempFilePath + attachmentsPattern: attachmentsPattern ) } + From 70cf507cc2a122b0ca58febceac877e2fa053278 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 9 Apr 2024 11:41:53 +0530 Subject: [PATCH 566/616] Update Jenkinsfile --- pipelines/check-ssl-expiry/Jenkinsfile | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/pipelines/check-ssl-expiry/Jenkinsfile b/pipelines/check-ssl-expiry/Jenkinsfile index 416969eb73..ac1d9c30e8 100644 --- a/pipelines/check-ssl-expiry/Jenkinsfile +++ b/pipelines/check-ssl-expiry/Jenkinsfile @@ -64,15 +64,13 @@ def email_notify() { """ def currentDate = LocalDate.now().format(DateTimeFormatter.ofPattern("dd-MM-yyyy")) - def attachmentsPattern = "**/SSL_Expiry_output_${currentDate}.txt" - - writeFile file: "SSL_Expiry_output_${currentDate}.txt", text: emailContent + def tempFilePath = "SSL_Expiry_output_${currentDate}.txt" + writeFile file: tempFilePath, text: emailContent emailext ( subject: 'Jenkins Pipeline Status', body: emailContent, to: 'alertsdiksha@trigyn.com', - attachmentsPattern: attachmentsPattern + attachmentsPattern: tempFilePath ) } - From 659727c3cf7aed70d2d500e12771954bb3762e45 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 9 Apr 2024 12:01:19 +0530 Subject: [PATCH 567/616] Update Jenkinsfile --- pipelines/check-ssl-expiry/Jenkinsfile | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/pipelines/check-ssl-expiry/Jenkinsfile b/pipelines/check-ssl-expiry/Jenkinsfile index ac1d9c30e8..7ed9c69ee9 100644 --- a/pipelines/check-ssl-expiry/Jenkinsfile +++ b/pipelines/check-ssl-expiry/Jenkinsfile @@ -56,21 +56,30 @@ def email_notify() { def buildNumber = env.BUILD_NUMBER def jobUrl = env.JOB_URL - def emailContent = """ + // Capture the pipeline output as a string + def pipelineOutput = """ $JOB_NAME - Pipeline ${currentBuild.result}. Build Number: $buildNumber Job URL: $jobUrl Hello, This is an email from Dev Jenkins pipeline. """ + // Get the current date in DD-MM-YYYY format def currentDate = LocalDate.now().format(DateTimeFormatter.ofPattern("dd-MM-yyyy")) - def tempFilePath = "SSL_Expiry_output_${currentDate}.txt" - writeFile file: tempFilePath, text: emailContent - emailext ( + // Write the output to a temporary file with date in the file name + def tempFilePath = "${WORKSPACE}/${JOB_NAME}_output_${currentDate}.txt" + writeFile file: tempFilePath, text: pipelineOutput + + // Send email notification using the built-in 'mail' step with the attachment + mail ( subject: 'Jenkins Pipeline Status', - body: emailContent, + body: """$JOB_NAME - Pipeline ${currentBuild.result}. + Build Number: $buildNumber + Job URL: $jobUrl + Hello, This is an email from Dev Jenkins pipeline.""", to: 'alertsdiksha@trigyn.com', - attachmentsPattern: tempFilePath + attachments: [file(tempFilePath)] ) + } From 8b7b081768c3a63e2ba6a6ef84834799692fc290 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 9 Apr 2024 14:00:13 +0530 Subject: [PATCH 568/616] Update Jenkinsfile --- pipelines/check-ssl-expiry/Jenkinsfile | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/pipelines/check-ssl-expiry/Jenkinsfile b/pipelines/check-ssl-expiry/Jenkinsfile index 7ed9c69ee9..086d308749 100644 --- a/pipelines/check-ssl-expiry/Jenkinsfile +++ b/pipelines/check-ssl-expiry/Jenkinsfile @@ -71,15 +71,14 @@ def email_notify() { def tempFilePath = "${WORKSPACE}/${JOB_NAME}_output_${currentDate}.txt" writeFile file: tempFilePath, text: pipelineOutput - // Send email notification using the built-in 'mail' step with the attachment - mail ( + // Send email notification using the 'emailext' step with attachments + emailext ( subject: 'Jenkins Pipeline Status', body: """$JOB_NAME - Pipeline ${currentBuild.result}. Build Number: $buildNumber Job URL: $jobUrl Hello, This is an email from Dev Jenkins pipeline.""", to: 'alertsdiksha@trigyn.com', - attachments: [file(tempFilePath)] + attachmentsPattern: tempFilePath // Attach the temporary output file ) - } From ac90811bb8a560925ff2b02ca075750ea66c00c1 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 9 Apr 2024 14:02:06 +0530 Subject: [PATCH 569/616] Update Jenkinsfile --- pipelines/check-ssl-expiry/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipelines/check-ssl-expiry/Jenkinsfile b/pipelines/check-ssl-expiry/Jenkinsfile index 086d308749..120e6e9857 100644 --- a/pipelines/check-ssl-expiry/Jenkinsfile +++ b/pipelines/check-ssl-expiry/Jenkinsfile @@ -79,6 +79,6 @@ def email_notify() { Job URL: $jobUrl Hello, This is an email from Dev Jenkins pipeline.""", to: 'alertsdiksha@trigyn.com', - attachmentsPattern: tempFilePath // Attach the temporary output file + attachmentsPattern: "${WORKSPACE}/*_output_${currentDate}.txt" // Attach the temporary output file using Ant GLOB pattern ) } From 12803d25e8fe9793a9c8c0a5117114c6856f471d Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 9 Apr 2024 14:04:51 +0530 Subject: [PATCH 570/616] Update Jenkinsfile --- pipelines/check-ssl-expiry/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipelines/check-ssl-expiry/Jenkinsfile b/pipelines/check-ssl-expiry/Jenkinsfile index 120e6e9857..04dbb9508c 100644 --- a/pipelines/check-ssl-expiry/Jenkinsfile +++ b/pipelines/check-ssl-expiry/Jenkinsfile @@ -79,6 +79,6 @@ def email_notify() { Job URL: $jobUrl Hello, This is an email from Dev Jenkins pipeline.""", to: 'alertsdiksha@trigyn.com', - attachmentsPattern: "${WORKSPACE}/*_output_${currentDate}.txt" // Attach the temporary output file using Ant GLOB pattern + attachmentsPattern: "**/${JOB_NAME}_output_${currentDate}.txt" // Attach the temporary output file using Ant GLOB pattern ) } From 528659e5047cbe010adc153d6e9f377b92daf6a3 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 9 Apr 2024 17:55:58 +0530 Subject: [PATCH 571/616] Update Jenkinsfile --- pipelines/check-ssl-expiry/Jenkinsfile | 47 +++++++++++--------------- 1 file changed, 19 insertions(+), 28 deletions(-) diff --git a/pipelines/check-ssl-expiry/Jenkinsfile b/pipelines/check-ssl-expiry/Jenkinsfile index 04dbb9508c..b4cf8ed7f7 100644 --- a/pipelines/check-ssl-expiry/Jenkinsfile +++ b/pipelines/check-ssl-expiry/Jenkinsfile @@ -1,4 +1,5 @@ @Library('deploy-conf') _ + node() { try { String ANSI_GREEN = "\u001B[32m" @@ -9,10 +10,9 @@ node() { stage('checkout public repo') { folder = new File("$WORKSPACE/.git") - if (folder.exists()) - { - println "Found .git folder. Clearing it.." - sh'git clean -fxd' + if (folder.exists()) { + println "Found .git folder. Clearing it.." + sh'git clean -fxd' } checkout scm } @@ -49,36 +49,27 @@ node() { } } -import java.time.LocalDate -import java.time.format.DateTimeFormatter - def email_notify() { def buildNumber = env.BUILD_NUMBER def jobUrl = env.JOB_URL - // Capture the pipeline output as a string - def pipelineOutput = """ - $JOB_NAME - Pipeline ${currentBuild.result}. - Build Number: $buildNumber - Job URL: $jobUrl - Hello, This is an email from Dev Jenkins pipeline. - """ - - // Get the current date in DD-MM-YYYY format - def currentDate = LocalDate.now().format(DateTimeFormatter.ofPattern("dd-MM-yyyy")) - - // Write the output to a temporary file with date in the file name - def tempFilePath = "${WORKSPACE}/${JOB_NAME}_output_${currentDate}.txt" - writeFile file: tempFilePath, text: pipelineOutput + // Execute the Ansible playbook and capture its output + def ansibleOutput = sh(script: 'ansible-playbook /var/lib/jenkins/workspace/OpsAdministration/dev/Core/SSL_Expiry/ansible/check_ssl_expiry.yml', returnStdout: true).trim() - // Send email notification using the 'emailext' step with attachments - emailext ( - subject: 'Jenkins Pipeline Status', - body: """$JOB_NAME - Pipeline ${currentBuild.result}. + // Send email notification using the built-in 'mail' step + mail ( + subject: 'Jenkins SSL_Expiry Pipeline Status', + $JOB_NAME - Pipeline ${currentBuild.result}. + Build Number: $buildNumber Job URL: $jobUrl - Hello, This is an email from Dev Jenkins pipeline.""", - to: 'alertsdiksha@trigyn.com', - attachmentsPattern: "**/${JOB_NAME}_output_${currentDate}.txt" // Attach the temporary output file using Ant GLOB pattern + + Ansible Playbook Output: + ${ansibleOutput} + + + This is an email from the Dev Jenkins pipeline. + """, + to: 'alertsdiksha@trigyn.com' ) } From 8aa1b81ea5f57f8652eefdcb0358baaa6fe541af Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 9 Apr 2024 17:57:59 +0530 Subject: [PATCH 572/616] Update Jenkinsfile From 8cb213f0273c580f9a17430dc5749d633eeb0610 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Tue, 9 Apr 2024 18:00:50 +0530 Subject: [PATCH 573/616] Update Jenkinsfile --- pipelines/check-ssl-expiry/Jenkinsfile | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/pipelines/check-ssl-expiry/Jenkinsfile b/pipelines/check-ssl-expiry/Jenkinsfile index b4cf8ed7f7..f86b8eb809 100644 --- a/pipelines/check-ssl-expiry/Jenkinsfile +++ b/pipelines/check-ssl-expiry/Jenkinsfile @@ -12,20 +12,20 @@ node() { folder = new File("$WORKSPACE/.git") if (folder.exists()) { println "Found .git folder. Clearing it.." - sh'git clean -fxd' + sh 'git clean -fxd' } checkout scm } ansiColor('xterm') { - stage('deploy'){ - values = [:] - envDir = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-3].trim() - module = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-2].trim() - jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() - currentWs = sh(returnStdout: true, script: 'pwd').trim() - ansiblePlaybook = "${currentWs}/ansible/check_ssl_expiry.yml" - ansibleExtraArgs = "--vault-password-file /var/lib/jenkins/secrets/vault-pass" + stage('deploy') { + def values = [:] + def envDir = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-3].trim() + def module = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-2].trim() + def jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() + def currentWs = sh(returnStdout: true, script: 'pwd').trim() + def ansiblePlaybook = "${currentWs}/ansible/check_ssl_expiry.yml" + def ansibleExtraArgs = "--vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('currentWs', currentWs) values.put('env', envDir) values.put('module', module) @@ -42,7 +42,7 @@ node() { catch (err) { currentBuild.result = 'FAILURE' throw err - } + } finally { slack_notify(currentBuild.result) email_notify() @@ -59,6 +59,7 @@ def email_notify() { // Send email notification using the built-in 'mail' step mail ( subject: 'Jenkins SSL_Expiry Pipeline Status', + body: """ $JOB_NAME - Pipeline ${currentBuild.result}. Build Number: $buildNumber From 004baaa091f537b2f81f7585af71da5f276ebcc4 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 10 Apr 2024 10:03:15 +0530 Subject: [PATCH 574/616] Update Jenkinsfile --- pipelines/check-ssl-expiry/Jenkinsfile | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/pipelines/check-ssl-expiry/Jenkinsfile b/pipelines/check-ssl-expiry/Jenkinsfile index f86b8eb809..0d9f644318 100644 --- a/pipelines/check-ssl-expiry/Jenkinsfile +++ b/pipelines/check-ssl-expiry/Jenkinsfile @@ -56,6 +56,12 @@ def email_notify() { // Execute the Ansible playbook and capture its output def ansibleOutput = sh(script: 'ansible-playbook /var/lib/jenkins/workspace/OpsAdministration/dev/Core/SSL_Expiry/ansible/check_ssl_expiry.yml', returnStdout: true).trim() + // Strip ANSI escape codes from the output + ansibleOutput = ansibleOutput.replaceAll(/\u001B\[[0-9;]*m/, '') + + // Remove specific lines from the Ansible output + ansibleOutput = ansibleOutput.replaceAll(/Ansible Playbook Output:(.*?)PLAY RECAP(.*?)/s, '') + // Send email notification using the built-in 'mail' step mail ( subject: 'Jenkins SSL_Expiry Pipeline Status', @@ -64,11 +70,7 @@ def email_notify() { Build Number: $buildNumber Job URL: $jobUrl - - Ansible Playbook Output: - ${ansibleOutput} - - + This is an email from the Dev Jenkins pipeline. """, to: 'alertsdiksha@trigyn.com' From fbd3065f82eb61bf700c1eb25c07213f2f3a7efb Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 10 Apr 2024 10:06:02 +0530 Subject: [PATCH 575/616] Update Jenkinsfile --- pipelines/check-ssl-expiry/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipelines/check-ssl-expiry/Jenkinsfile b/pipelines/check-ssl-expiry/Jenkinsfile index 0d9f644318..dd9f3f6197 100644 --- a/pipelines/check-ssl-expiry/Jenkinsfile +++ b/pipelines/check-ssl-expiry/Jenkinsfile @@ -60,7 +60,7 @@ def email_notify() { ansibleOutput = ansibleOutput.replaceAll(/\u001B\[[0-9;]*m/, '') // Remove specific lines from the Ansible output - ansibleOutput = ansibleOutput.replaceAll(/Ansible Playbook Output:(.*?)PLAY RECAP(.*?)/s, '') + ansibleOutput = ansibleOutput.replaceAll(/Ansible Playbook Output:(.*?)PLAY RECAP(.*?)/, '').trim() // Send email notification using the built-in 'mail' step mail ( From 64f40baffa878b5c08fbd547b8879ccef3edea71 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 10 Apr 2024 10:10:09 +0530 Subject: [PATCH 576/616] Update Jenkinsfile --- pipelines/check-ssl-expiry/Jenkinsfile | 3 --- 1 file changed, 3 deletions(-) diff --git a/pipelines/check-ssl-expiry/Jenkinsfile b/pipelines/check-ssl-expiry/Jenkinsfile index dd9f3f6197..b24cf81d3a 100644 --- a/pipelines/check-ssl-expiry/Jenkinsfile +++ b/pipelines/check-ssl-expiry/Jenkinsfile @@ -59,9 +59,6 @@ def email_notify() { // Strip ANSI escape codes from the output ansibleOutput = ansibleOutput.replaceAll(/\u001B\[[0-9;]*m/, '') - // Remove specific lines from the Ansible output - ansibleOutput = ansibleOutput.replaceAll(/Ansible Playbook Output:(.*?)PLAY RECAP(.*?)/, '').trim() - // Send email notification using the built-in 'mail' step mail ( subject: 'Jenkins SSL_Expiry Pipeline Status', From 5443843a2e465b81211b557247e03ddf601bdd41 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Wed, 10 Apr 2024 10:14:06 +0530 Subject: [PATCH 577/616] Update Jenkinsfile --- pipelines/check-ssl-expiry/Jenkinsfile | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pipelines/check-ssl-expiry/Jenkinsfile b/pipelines/check-ssl-expiry/Jenkinsfile index b24cf81d3a..183beb92f2 100644 --- a/pipelines/check-ssl-expiry/Jenkinsfile +++ b/pipelines/check-ssl-expiry/Jenkinsfile @@ -67,7 +67,10 @@ def email_notify() { Build Number: $buildNumber Job URL: $jobUrl - + + Ansible Playbook Output: + ${ansibleOutput} + This is an email from the Dev Jenkins pipeline. """, to: 'alertsdiksha@trigyn.com' From d624a365f63d8ae83a30d5c7c1dcbd1770fd334c Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Mon, 15 Apr 2024 22:12:25 +0530 Subject: [PATCH 578/616] Update Jenkinsfile --- pipelines/kafka-lags-monitoring/Jenkinsfile | 28 +++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/pipelines/kafka-lags-monitoring/Jenkinsfile b/pipelines/kafka-lags-monitoring/Jenkinsfile index d8c06cf7e1..b8962f6e5a 100644 --- a/pipelines/kafka-lags-monitoring/Jenkinsfile +++ b/pipelines/kafka-lags-monitoring/Jenkinsfile @@ -50,3 +50,31 @@ node() { email_notify() } } + +def email_notify() { + def buildNumber = env.BUILD_NUMBER + def jobUrl = env.JOB_URL + + // Execute the Ansible playbook and capture its output + def ansibleOutput = sh(script: 'ansible-playbook /var/lib/jenkins/workspace/OpsAdministration/dev/Core/Kafka_Lag_Monitoring/ansible/kafka_lags_monitoring.yml', returnStdout: true).trim() + + // Strip ANSI escape codes from the output + ansibleOutput = ansibleOutput.replaceAll(/\u001B\[[0-9;]*m/, '') + + // Send email notification using the built-in 'mail' step + mail ( + subject: 'Jenkins SSL_Expiry Pipeline Status', + body: """ + $JOB_NAME - Pipeline ${currentBuild.result}. + + Build Number: $buildNumber + Job URL: $jobUrl + + Ansible Playbook Output: + ${ansibleOutput} + + This is an email from the Dev Jenkins pipeline. + """, + to: 'alertsdiksha@trigyn.com' + ) +} From 0dd3477fd666a436c98a40c689114de974ba232e Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Mon, 15 Apr 2024 22:31:35 +0530 Subject: [PATCH 579/616] Update Jenkinsfile --- pipelines/kafka-lags-monitoring/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipelines/kafka-lags-monitoring/Jenkinsfile b/pipelines/kafka-lags-monitoring/Jenkinsfile index b8962f6e5a..3fc641fac4 100644 --- a/pipelines/kafka-lags-monitoring/Jenkinsfile +++ b/pipelines/kafka-lags-monitoring/Jenkinsfile @@ -63,7 +63,7 @@ def email_notify() { // Send email notification using the built-in 'mail' step mail ( - subject: 'Jenkins SSL_Expiry Pipeline Status', + subject: 'Jenkins Kafka_Lag_Monitoring Pipeline Status', body: """ $JOB_NAME - Pipeline ${currentBuild.result}. From 3dfb12186a3a77c604d4809e4837ec86cf6ac3c9 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Mon, 15 Apr 2024 22:34:25 +0530 Subject: [PATCH 580/616] Update Jenkinsfile --- pipelines/kafka-lags-monitoring/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipelines/kafka-lags-monitoring/Jenkinsfile b/pipelines/kafka-lags-monitoring/Jenkinsfile index 3fc641fac4..dc7ed7be67 100644 --- a/pipelines/kafka-lags-monitoring/Jenkinsfile +++ b/pipelines/kafka-lags-monitoring/Jenkinsfile @@ -71,7 +71,7 @@ def email_notify() { Job URL: $jobUrl Ansible Playbook Output: - ${ansibleOutput} + $ansibleOutput This is an email from the Dev Jenkins pipeline. """, From 785bd861647b7f16a8930606239a6f83ee536536 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Mon, 15 Apr 2024 22:45:34 +0530 Subject: [PATCH 581/616] Update Jenkinsfile --- pipelines/kafka-lags-monitoring/Jenkinsfile | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/pipelines/kafka-lags-monitoring/Jenkinsfile b/pipelines/kafka-lags-monitoring/Jenkinsfile index dc7ed7be67..12628ecd55 100644 --- a/pipelines/kafka-lags-monitoring/Jenkinsfile +++ b/pipelines/kafka-lags-monitoring/Jenkinsfile @@ -51,21 +51,15 @@ node() { } } -def email_notify() { +def email_notify(buildResult, ansibleOutput) { def buildNumber = env.BUILD_NUMBER def jobUrl = env.JOB_URL - // Execute the Ansible playbook and capture its output - def ansibleOutput = sh(script: 'ansible-playbook /var/lib/jenkins/workspace/OpsAdministration/dev/Core/Kafka_Lag_Monitoring/ansible/kafka_lags_monitoring.yml', returnStdout: true).trim() - - // Strip ANSI escape codes from the output - ansibleOutput = ansibleOutput.replaceAll(/\u001B\[[0-9;]*m/, '') - // Send email notification using the built-in 'mail' step mail ( subject: 'Jenkins Kafka_Lag_Monitoring Pipeline Status', body: """ - $JOB_NAME - Pipeline ${currentBuild.result}. + $JOB_NAME - Pipeline ${buildResult}. Build Number: $buildNumber Job URL: $jobUrl From 40e7a51b170d0ed0777ab3e38fb67957b10254f7 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Mon, 15 Apr 2024 22:49:31 +0530 Subject: [PATCH 582/616] Update Jenkinsfile --- pipelines/kafka-lags-monitoring/Jenkinsfile | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/pipelines/kafka-lags-monitoring/Jenkinsfile b/pipelines/kafka-lags-monitoring/Jenkinsfile index 12628ecd55..b8962f6e5a 100644 --- a/pipelines/kafka-lags-monitoring/Jenkinsfile +++ b/pipelines/kafka-lags-monitoring/Jenkinsfile @@ -51,21 +51,27 @@ node() { } } -def email_notify(buildResult, ansibleOutput) { +def email_notify() { def buildNumber = env.BUILD_NUMBER def jobUrl = env.JOB_URL + // Execute the Ansible playbook and capture its output + def ansibleOutput = sh(script: 'ansible-playbook /var/lib/jenkins/workspace/OpsAdministration/dev/Core/Kafka_Lag_Monitoring/ansible/kafka_lags_monitoring.yml', returnStdout: true).trim() + + // Strip ANSI escape codes from the output + ansibleOutput = ansibleOutput.replaceAll(/\u001B\[[0-9;]*m/, '') + // Send email notification using the built-in 'mail' step mail ( - subject: 'Jenkins Kafka_Lag_Monitoring Pipeline Status', + subject: 'Jenkins SSL_Expiry Pipeline Status', body: """ - $JOB_NAME - Pipeline ${buildResult}. + $JOB_NAME - Pipeline ${currentBuild.result}. Build Number: $buildNumber Job URL: $jobUrl Ansible Playbook Output: - $ansibleOutput + ${ansibleOutput} This is an email from the Dev Jenkins pipeline. """, From 3f6b89e3be5b32a39a931ae2edca2b15e3b411f9 Mon Sep 17 00:00:00 2001 From: Praveen-Devaraj <121887814+Praveen-Devaraj@users.noreply.github.com> Date: Mon, 15 Apr 2024 22:50:03 +0530 Subject: [PATCH 583/616] Update Jenkinsfile --- pipelines/kafka-lags-monitoring/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipelines/kafka-lags-monitoring/Jenkinsfile b/pipelines/kafka-lags-monitoring/Jenkinsfile index b8962f6e5a..3fc641fac4 100644 --- a/pipelines/kafka-lags-monitoring/Jenkinsfile +++ b/pipelines/kafka-lags-monitoring/Jenkinsfile @@ -63,7 +63,7 @@ def email_notify() { // Send email notification using the built-in 'mail' step mail ( - subject: 'Jenkins SSL_Expiry Pipeline Status', + subject: 'Jenkins Kafka_Lag_Monitoring Pipeline Status', body: """ $JOB_NAME - Pipeline ${currentBuild.result}. From af0986a4bb4211cf037231d9d014005c39c62334 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Tue, 30 Apr 2024 11:37:19 +0530 Subject: [PATCH 584/616] Create Jenkinsfile For testing Disk space Dev-servers --- pipelines/disk-space-monitoring /Jenkinsfile | 76 ++++++++++++++++++++ 1 file changed, 76 insertions(+) create mode 100644 pipelines/disk-space-monitoring /Jenkinsfile diff --git a/pipelines/disk-space-monitoring /Jenkinsfile b/pipelines/disk-space-monitoring /Jenkinsfile new file mode 100644 index 0000000000..135e598f80 --- /dev/null +++ b/pipelines/disk-space-monitoring /Jenkinsfile @@ -0,0 +1,76 @@ +@Library('deploy-conf') _ + +node() { + try { + String ANSI_GREEN = "\u001B[32m" + String ANSI_NORMAL = "\u001B[0m" + String ANSI_BOLD = "\u001B[1m" + String ANSI_RED = "\u001B[31m" + String ANSI_YELLOW = "\u001B[33m" + + stage('checkout public repo') { + folder = new File("$WORKSPACE/.git") + if (folder.exists()) { + println "Found .git folder. Clearing it.." + sh'git clean -fxd' + } + checkout scm + } + + ansiColor('xterm') { + stage('deploy'){ + values = [:] + envDir = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-3].trim() + module = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-2].trim() + jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() + currentWs = sh(returnStdout: true, script: 'pwd').trim() + ansiblePlaybook = "${currentWs}/ansible/path.yml" + ansibleExtraArgs = "--vault-password-file /var/lib/jenkins/secrets/vault-pass" + values.put('currentWs', currentWs) + values.put('env', envDir) + values.put('module', module) + values.put('jobName', jobName) + values.put('ansiblePlaybook', ansiblePlaybook) + values.put('ansibleExtraArgs', ansibleExtraArgs) + println values + ansible_playbook_run(values) + currentBuild.result = 'SUCCESS' + currentBuild.description = "Private: ${params.private_branch}, Public: ${params.branch_or_tag}" + } + } + } + catch (err) { + currentBuild.result = 'FAILURE' + throw err + } + finally { + slack_notify(currentBuild.result) + email_notify() + } +} + +def email_notify() { + + def buildNumber = env.BUILD_NUMBER + + def jobUrl = env.JOB_URL + + // Send email notification using the built-in 'mail' step + + mail ( + + subject: 'Jenkins Pipeline Status', + + body: """$JOB_NAME - Pipeline ${currentBuild.result}. + + Build Number: $buildNumber + + Job URL: $jobUrl + + Hello, This is an email from Dev Jenkins pipeline.""", + + to: 'alertsdiksha@trigyn.com' + + ) + +} From e9cba46497ae093e229c8041fd3eb5d5c9318edd Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Tue, 30 Apr 2024 11:41:06 +0530 Subject: [PATCH 585/616] Create check_disk-space.yml creating ansilbe file for disk-space testing --- ansible/check_disk-space.yml | 1 + 1 file changed, 1 insertion(+) create mode 100644 ansible/check_disk-space.yml diff --git a/ansible/check_disk-space.yml b/ansible/check_disk-space.yml new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/ansible/check_disk-space.yml @@ -0,0 +1 @@ + From f402c8f2202b81858bcfa1f04e92994c28d06880 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Tue, 30 Apr 2024 12:00:01 +0530 Subject: [PATCH 586/616] Update check_disk-space.yml updating scripts for disk space --- ansible/check_disk-space.yml | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/ansible/check_disk-space.yml b/ansible/check_disk-space.yml index 8b13789179..ed2d6d2aef 100644 --- a/ansible/check_disk-space.yml +++ b/ansible/check_disk-space.yml @@ -1 +1,26 @@ +--- +- name: Check disk space on mongo_master + hosts: mongo_master + gather_facts: no + tasks: + - name: Check mongo disk space + ansible.builtin.shell: df -h + register: mongo_disk_space + + - name: Print mongo disk space + ansible.builtin.debug: + msg: "mongo disk space: {{ mongo_disk_space.stdout_lines }}" + +- name: Check disk space on dp-spark-ps + hosts: dp-spark-ps + gather_facts: no + + tasks: + - name: Check spark disk space + ansible.builtin.shell: df -h + register: spark_disk_space + + - name: Print spark disk space + ansible.builtin.debug: + msg: "spark disk space: {{ spark_disk_space.stdout_lines }}" From 5671ec44bd54c546ab82104ff513dcd34f3eb86d Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Tue, 30 Apr 2024 12:01:35 +0530 Subject: [PATCH 587/616] Update Jenkinsfile updated ansible playbook path in jenkins file --- pipelines/disk-space-monitoring /Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipelines/disk-space-monitoring /Jenkinsfile b/pipelines/disk-space-monitoring /Jenkinsfile index 135e598f80..6f6e1354c1 100644 --- a/pipelines/disk-space-monitoring /Jenkinsfile +++ b/pipelines/disk-space-monitoring /Jenkinsfile @@ -24,7 +24,7 @@ node() { module = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-2].trim() jobName = sh(returnStdout: true, script: "echo $JOB_NAME").split('/')[-1].trim() currentWs = sh(returnStdout: true, script: 'pwd').trim() - ansiblePlaybook = "${currentWs}/ansible/path.yml" + ansiblePlaybook = "${currentWs}/ansible/check_disk-space.yml" ansibleExtraArgs = "--vault-password-file /var/lib/jenkins/secrets/vault-pass" values.put('currentWs', currentWs) values.put('env', envDir) From 196e36346039267069a58f146e49a8455ff427a9 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Tue, 30 Apr 2024 12:21:08 +0530 Subject: [PATCH 588/616] Update check_disk-space.yml modified scripts --- ansible/check_disk-space.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ansible/check_disk-space.yml b/ansible/check_disk-space.yml index ed2d6d2aef..c5345ea8c8 100644 --- a/ansible/check_disk-space.yml +++ b/ansible/check_disk-space.yml @@ -5,11 +5,11 @@ tasks: - name: Check mongo disk space - ansible.builtin.shell: df -h + command: df -h register: mongo_disk_space - name: Print mongo disk space - ansible.builtin.debug: + debug: msg: "mongo disk space: {{ mongo_disk_space.stdout_lines }}" - name: Check disk space on dp-spark-ps @@ -18,9 +18,9 @@ tasks: - name: Check spark disk space - ansible.builtin.shell: df -h + command: df -h register: spark_disk_space - name: Print spark disk space - ansible.builtin.debug: + debug: msg: "spark disk space: {{ spark_disk_space.stdout_lines }}" From 2e31b7a00cf03c83ccb684193ccc8d45bb8d9f76 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Tue, 30 Apr 2024 12:25:22 +0530 Subject: [PATCH 589/616] Update check_disk-space.yml --- ansible/check_disk-space.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/check_disk-space.yml b/ansible/check_disk-space.yml index c5345ea8c8..bc9130a7d6 100644 --- a/ansible/check_disk-space.yml +++ b/ansible/check_disk-space.yml @@ -16,7 +16,7 @@ hosts: dp-spark-ps gather_facts: no - tasks: + ### tasks: - name: Check spark disk space command: df -h register: spark_disk_space From 6709f60ad378ee00f2396b58be5d2fd22a309a5b Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Tue, 30 Apr 2024 12:31:45 +0530 Subject: [PATCH 590/616] Update check_disk-space.yml --- ansible/check_disk-space.yml | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/ansible/check_disk-space.yml b/ansible/check_disk-space.yml index bc9130a7d6..6372c56087 100644 --- a/ansible/check_disk-space.yml +++ b/ansible/check_disk-space.yml @@ -12,15 +12,3 @@ debug: msg: "mongo disk space: {{ mongo_disk_space.stdout_lines }}" -- name: Check disk space on dp-spark-ps - hosts: dp-spark-ps - gather_facts: no - - ### tasks: - - name: Check spark disk space - command: df -h - register: spark_disk_space - - - name: Print spark disk space - debug: - msg: "spark disk space: {{ spark_disk_space.stdout_lines }}" From b724e54a2decbc18052a2e66a4de4c4c61345ffe Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Tue, 30 Apr 2024 14:04:34 +0530 Subject: [PATCH 591/616] Update check_disk-space.yml modified scripts- adding hosts --- ansible/check_disk-space.yml | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/ansible/check_disk-space.yml b/ansible/check_disk-space.yml index 6372c56087..347c04d1c4 100644 --- a/ansible/check_disk-space.yml +++ b/ansible/check_disk-space.yml @@ -10,5 +10,17 @@ - name: Print mongo disk space debug: - msg: "mongo disk space: {{ mongo_disk_space.stdout_lines }}" + msg: "mongo disk space: {{ mongo_disk_space.stdout | grep('/dev/sda') }}" +- name: Check disk space on dp-spark-ps + hosts: postgresql-master-1 + gather_facts: no + + tasks: + - name: Check postgres disk space + command: df -h + register: postgres_disk_space + + - name: Print spark disk space + debug: + msg: "postgres disk space: {{ postgres_disk_space.stdout | grep('/dev/sda') }}" From 10c73c90ab5dee2b4cd6561288f00b58d9b6374d Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Tue, 30 Apr 2024 14:07:13 +0530 Subject: [PATCH 592/616] Update check_disk-space.yml --- ansible/check_disk-space.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/check_disk-space.yml b/ansible/check_disk-space.yml index 347c04d1c4..582607c2ff 100644 --- a/ansible/check_disk-space.yml +++ b/ansible/check_disk-space.yml @@ -10,7 +10,7 @@ - name: Print mongo disk space debug: - msg: "mongo disk space: {{ mongo_disk_space.stdout | grep('/dev/sda') }}" + msg: "mongo disk space: {{ mongo_disk_space.stdout | grep('/dev/sda1') }}" - name: Check disk space on dp-spark-ps hosts: postgresql-master-1 @@ -23,4 +23,4 @@ - name: Print spark disk space debug: - msg: "postgres disk space: {{ postgres_disk_space.stdout | grep('/dev/sda') }}" + msg: "postgres disk space: {{ postgres_disk_space.stdout | grep('/dev/sda1') }}" From efa8f09945ff7980525766d58d5f556b852afd93 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Tue, 30 Apr 2024 14:15:53 +0530 Subject: [PATCH 593/616] Update check_disk-space.yml adding hosts --- ansible/check_disk-space.yml | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/ansible/check_disk-space.yml b/ansible/check_disk-space.yml index 582607c2ff..caf696478e 100644 --- a/ansible/check_disk-space.yml +++ b/ansible/check_disk-space.yml @@ -10,9 +10,9 @@ - name: Print mongo disk space debug: - msg: "mongo disk space: {{ mongo_disk_space.stdout | grep('/dev/sda1') }}" + msg: "mongo disk space: {{ mongo_disk_space.stdout }}" -- name: Check disk space on dp-spark-ps +- name: Check disk space on postgres hosts: postgresql-master-1 gather_facts: no @@ -23,4 +23,18 @@ - name: Print spark disk space debug: - msg: "postgres disk space: {{ postgres_disk_space.stdout | grep('/dev/sda1') }}" + msg: "postgres disk space: {{ postgres_disk_space.stdout }}" + + +- name: Check disk space on spark01 + hosts: dp-spark-ps + gather_facts: no + + tasks: + - name: Check spark01 disk space + command: df -h + register: spark_disk_space + + - name: Print spark disk space + debug: + msg: "spark disk space: {{ spark_disk_space.stdout_lines }}" From 04135a4b046830cc5535fa11844b04714f270193 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Tue, 30 Apr 2024 14:19:20 +0530 Subject: [PATCH 594/616] Update check_disk-space.yml --- ansible/check_disk-space.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/check_disk-space.yml b/ansible/check_disk-space.yml index caf696478e..2262dfae9b 100644 --- a/ansible/check_disk-space.yml +++ b/ansible/check_disk-space.yml @@ -30,7 +30,7 @@ hosts: dp-spark-ps gather_facts: no - tasks: + tasks: - name: Check spark01 disk space command: df -h register: spark_disk_space From 81f1b112a02c15ed099f01b219e8ff1220af7567 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Mon, 6 May 2024 12:17:26 +0530 Subject: [PATCH 595/616] Update check_disk-space.yml Adding hosts for disk space testing --- ansible/check_disk-space.yml | 68 +++++++++++++++++++++++++++++++++++- 1 file changed, 67 insertions(+), 1 deletion(-) diff --git a/ansible/check_disk-space.yml b/ansible/check_disk-space.yml index 2262dfae9b..5dab474dca 100644 --- a/ansible/check_disk-space.yml +++ b/ansible/check_disk-space.yml @@ -37,4 +37,70 @@ - name: Print spark disk space debug: - msg: "spark disk space: {{ spark_disk_space.stdout_lines }}" + msg: "spark disk space: {{ spark_disk_space.stdout_lines }}" + +- name: Check disk space on redis + hosts: redis + gather_facts: no + + tasks: + - name: Check redis disk space + command: df -h + register: redis_disk_space + + - name: Print redis disk space + debug: + msg: "redis disk space: {{ redis_disk_space.stdout_lines }}" + +- name: Check disk space on keycloak-1 + hosts: keycloak-1 + gather_facts: no + + tasks: + - name: Check keycloak disk space + command: df -h + register: keycloak_disk_space + + - name: Print keycloak disk space + debug: + msg: "keycloak disk space: {{ keycloak_disk_space.stdout_lines }}" + +- name: Check disk space on learning1 + hosts: learning1 + gather_facts: no + + tasks: + - name: Check learning1 disk space + command: df -h + register: learning1_disk_space + + - name: Print learning1 disk space + debug: + msg: "learning1 disk space: {{ learning1_disk_space.stdout_lines }}" + +- name: Check disk space on ml-analytics-service + hosts: ml-analytics-service + gather_facts: no + + tasks: + - name: Check ml-analytics-service disk space + command: df -h + register: ml-analytics-service_disk_space + + - name: Print ml-analytics-service disk space + debug: + msg: "ml-analytics-service disk space: {{ ml-analytics-service_disk_space.stdout_lines }}" + +- name: Check disk space on cassandra + hosts: lp-cassandra + gather_facts: no + + tasks: + - name: Check cassandra disk space + command: df -h + register:cassandra_disk_space + + - name: Print cassandra disk space + debug: + msg: "cassandra disk space: {{ cassandra_disk_space.stdout_lines }}" + From aaeceb06073154dfbd59a90e1a925232d08dac55 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Mon, 6 May 2024 12:23:25 +0530 Subject: [PATCH 596/616] Update check_disk-space.yml --- ansible/check_disk-space.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ansible/check_disk-space.yml b/ansible/check_disk-space.yml index 5dab474dca..40742c6fc0 100644 --- a/ansible/check_disk-space.yml +++ b/ansible/check_disk-space.yml @@ -92,15 +92,17 @@ msg: "ml-analytics-service disk space: {{ ml-analytics-service_disk_space.stdout_lines }}" - name: Check disk space on cassandra - hosts: lp-cassandra + hosts: cassandra-01 gather_facts: no tasks: - name: Check cassandra disk space command: df -h register:cassandra_disk_space - + - name: Print cassandra disk space debug: msg: "cassandra disk space: {{ cassandra_disk_space.stdout_lines }}" + + From 26ef24f37954f5ff09fc1eb516323790fda57ab5 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Mon, 6 May 2024 12:25:17 +0530 Subject: [PATCH 597/616] Update check_disk-space.yml --- ansible/check_disk-space.yml | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/ansible/check_disk-space.yml b/ansible/check_disk-space.yml index 40742c6fc0..97f72254f2 100644 --- a/ansible/check_disk-space.yml +++ b/ansible/check_disk-space.yml @@ -91,18 +91,5 @@ debug: msg: "ml-analytics-service disk space: {{ ml-analytics-service_disk_space.stdout_lines }}" -- name: Check disk space on cassandra - hosts: cassandra-01 - gather_facts: no - - tasks: - - name: Check cassandra disk space - command: df -h - register:cassandra_disk_space - - - name: Print cassandra disk space - debug: - msg: "cassandra disk space: {{ cassandra_disk_space.stdout_lines }}" - From c2a70abad5ddcebb2196faafe4ddcb6dbc4382ca Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Mon, 6 May 2024 12:42:34 +0530 Subject: [PATCH 598/616] Update check_disk-space.yml modified syntax --- ansible/check_disk-space.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ansible/check_disk-space.yml b/ansible/check_disk-space.yml index 97f72254f2..e6dcf24e3d 100644 --- a/ansible/check_disk-space.yml +++ b/ansible/check_disk-space.yml @@ -26,18 +26,18 @@ msg: "postgres disk space: {{ postgres_disk_space.stdout }}" -- name: Check disk space on spark01 - hosts: dp-spark-ps +- name: Check disk space on spark-sl + hosts: py-spark-sl gather_facts: no tasks: - - name: Check spark01 disk space + - name: Check spark-sl disk space command: df -h - register: spark_disk_space + register: sparksl_disk_space - name: Print spark disk space debug: - msg: "spark disk space: {{ spark_disk_space.stdout_lines }}" + msg: "sparksl disk space: {{ sparksl_disk_space.stdout_lines }}" - name: Check disk space on redis hosts: redis From 8e0afb45d4ea3feaca2532d2b33afebe80f5835b Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Mon, 6 May 2024 12:48:35 +0530 Subject: [PATCH 599/616] Update check_disk-space.yml adding influxdb host --- ansible/check_disk-space.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/ansible/check_disk-space.yml b/ansible/check_disk-space.yml index e6dcf24e3d..18a8b218c7 100644 --- a/ansible/check_disk-space.yml +++ b/ansible/check_disk-space.yml @@ -78,18 +78,18 @@ debug: msg: "learning1 disk space: {{ learning1_disk_space.stdout_lines }}" -- name: Check disk space on ml-analytics-service - hosts: ml-analytics-service +- name: Check disk space on influxdb + hosts: dp-influx-ps gather_facts: no tasks: - - name: Check ml-analytics-service disk space + - name: Check influxdb disk space command: df -h - register: ml-analytics-service_disk_space + register: influxdb_disk_space - - name: Print ml-analytics-service disk space + - name: Print influxdb disk space debug: - msg: "ml-analytics-service disk space: {{ ml-analytics-service_disk_space.stdout_lines }}" + msg: "influxdb disk space: {{ influxdb_disk_space.stdout_lines }}" From ec27547e4ea4b350278a7ff22cd9c13f52a70d82 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Tue, 7 May 2024 14:20:47 +0530 Subject: [PATCH 600/616] Update check_disk-space.yml Hosts added --- ansible/check_disk-space.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/ansible/check_disk-space.yml b/ansible/check_disk-space.yml index 18a8b218c7..b0aae18a6d 100644 --- a/ansible/check_disk-space.yml +++ b/ansible/check_disk-space.yml @@ -91,5 +91,17 @@ debug: msg: "influxdb disk space: {{ influxdb_disk_space.stdout_lines }}" +- name: Check disk space on cassandra-lp-dp + hosts: cassandra-lp-dp + gather_facts: no + + tasks: + - name: Check cassandra disk space + command: df -h + register:cassandra_disk_space + + - name: Print cassandra disk space + debug: + msg: "cassandra-lp-dp disk space: {{ cassandra-lp-dp_disk_space.stdout_lines }}" From 99ae659fa7983bb650b2fafcdfb0eedbb4def078 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Tue, 7 May 2024 14:23:35 +0530 Subject: [PATCH 601/616] Update check_disk-space.yml --- ansible/check_disk-space.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ansible/check_disk-space.yml b/ansible/check_disk-space.yml index b0aae18a6d..b893017c08 100644 --- a/ansible/check_disk-space.yml +++ b/ansible/check_disk-space.yml @@ -91,8 +91,8 @@ debug: msg: "influxdb disk space: {{ influxdb_disk_space.stdout_lines }}" -- name: Check disk space on cassandra-lp-dp - hosts: cassandra-lp-dp +- name: Check disk space on dp-cassandra + hosts: dp-cassandra gather_facts: no tasks: @@ -102,6 +102,6 @@ - name: Print cassandra disk space debug: - msg: "cassandra-lp-dp disk space: {{ cassandra-lp-dp_disk_space.stdout_lines }}" + msg: "dp-cassandra disk space: {{ dp-cassandra_disk_space.stdout_lines }}" From 30349402c7d652346f9c8a631568f178e5e3cb34 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Tue, 7 May 2024 17:13:35 +0530 Subject: [PATCH 602/616] Update check_disk-space.yml --- ansible/check_disk-space.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/check_disk-space.yml b/ansible/check_disk-space.yml index b893017c08..3157c3f089 100644 --- a/ansible/check_disk-space.yml +++ b/ansible/check_disk-space.yml @@ -100,7 +100,7 @@ command: df -h register:cassandra_disk_space - - name: Print cassandra disk space + - name: Print cassandra disk space debug: msg: "dp-cassandra disk space: {{ dp-cassandra_disk_space.stdout_lines }}" From 8a43730bb689713c757c456ab72bd4706135d0cb Mon Sep 17 00:00:00 2001 From: rajesh3419 <127076903+rajesh3419@users.noreply.github.com> Date: Wed, 8 May 2024 10:35:59 +0530 Subject: [PATCH 603/616] Update check_disk-space.yml updated the syntax --- ansible/check_disk-space.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/check_disk-space.yml b/ansible/check_disk-space.yml index 3157c3f089..b893017c08 100644 --- a/ansible/check_disk-space.yml +++ b/ansible/check_disk-space.yml @@ -100,7 +100,7 @@ command: df -h register:cassandra_disk_space - - name: Print cassandra disk space + - name: Print cassandra disk space debug: msg: "dp-cassandra disk space: {{ dp-cassandra_disk_space.stdout_lines }}" From 98d8dc93ffde1ab16311cd1ad430cd9c400fc169 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Wed, 8 May 2024 10:47:56 +0530 Subject: [PATCH 604/616] Update check_disk-space.yml Updated scripts --- ansible/check_disk-space.yml | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/ansible/check_disk-space.yml b/ansible/check_disk-space.yml index b893017c08..ba58871ff8 100644 --- a/ansible/check_disk-space.yml +++ b/ansible/check_disk-space.yml @@ -91,17 +91,4 @@ debug: msg: "influxdb disk space: {{ influxdb_disk_space.stdout_lines }}" -- name: Check disk space on dp-cassandra - hosts: dp-cassandra - gather_facts: no - - tasks: - - name: Check cassandra disk space - command: df -h - register:cassandra_disk_space - - name: Print cassandra disk space - debug: - msg: "dp-cassandra disk space: {{ dp-cassandra_disk_space.stdout_lines }}" - - From d4b01f9cd839d1251caa5c54d75ecab80cd56155 Mon Sep 17 00:00:00 2001 From: rajesh3419 <127076903+rajesh3419@users.noreply.github.com> Date: Wed, 8 May 2024 10:50:21 +0530 Subject: [PATCH 605/616] Update check_disk-space.yml syntax issue --- ansible/check_disk-space.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/ansible/check_disk-space.yml b/ansible/check_disk-space.yml index ba58871ff8..468604d9de 100644 --- a/ansible/check_disk-space.yml +++ b/ansible/check_disk-space.yml @@ -91,4 +91,17 @@ debug: msg: "influxdb disk space: {{ influxdb_disk_space.stdout_lines }}" +- name: Check disk space on dp-cassandra + hosts: dp-cassandra + gather_facts: no + + tasks: + - name: Check cassandra disk space + command: df -h + register: cassandra_disk_space + - name: Print cassandra disk space + debug: + msg: "dp-cassandra disk space: {{ dp-cassandra_disk_space.stdout_lines }}" + + From 556e0ee6677bf07c631aced2611a22d854dd9529 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Wed, 8 May 2024 10:53:21 +0530 Subject: [PATCH 606/616] Update check_disk-space.yml --- ansible/check_disk-space.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/check_disk-space.yml b/ansible/check_disk-space.yml index 468604d9de..2beaab072c 100644 --- a/ansible/check_disk-space.yml +++ b/ansible/check_disk-space.yml @@ -102,6 +102,6 @@ - name: Print cassandra disk space debug: - msg: "dp-cassandra disk space: {{ dp-cassandra_disk_space.stdout_lines }}" + msg: "cassandra disk space: {{ cassandra_disk_space.stdout_lines }}" From 9b1a10f82a1981ed539eb9f53be6d1a8a9aa79d4 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Wed, 8 May 2024 17:40:32 +0530 Subject: [PATCH 607/616] Update check_disk-space.yml modified scripts --- ansible/check_disk-space.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/ansible/check_disk-space.yml b/ansible/check_disk-space.yml index 2beaab072c..1fafb18e56 100644 --- a/ansible/check_disk-space.yml +++ b/ansible/check_disk-space.yml @@ -5,7 +5,7 @@ tasks: - name: Check mongo disk space - command: df -h + command: df -h | awk '/Filesystem|\/dev\/sda1\s/' register: mongo_disk_space - name: Print mongo disk space @@ -18,7 +18,7 @@ tasks: - name: Check postgres disk space - command: df -h + command: df -h | awk '/Filesystem|\/dev\/sda1\s/' register: postgres_disk_space - name: Print spark disk space @@ -32,7 +32,7 @@ tasks: - name: Check spark-sl disk space - command: df -h + command: df -h | awk '/Filesystem|\/dev\/sda1\s/' register: sparksl_disk_space - name: Print spark disk space @@ -45,7 +45,7 @@ tasks: - name: Check redis disk space - command: df -h + command: df -h | awk '/Filesystem|\/dev\/sda1\s/' register: redis_disk_space - name: Print redis disk space @@ -58,7 +58,7 @@ tasks: - name: Check keycloak disk space - command: df -h + command: df -h | awk '/Filesystem|\/dev\/sda1\s/' register: keycloak_disk_space - name: Print keycloak disk space @@ -71,7 +71,7 @@ tasks: - name: Check learning1 disk space - command: df -h + command: df -h | awk '/Filesystem|\/dev\/sda1\s/' register: learning1_disk_space - name: Print learning1 disk space @@ -84,7 +84,7 @@ tasks: - name: Check influxdb disk space - command: df -h + command: df -h | awk '/Filesystem|\/dev\/sda1\s/' register: influxdb_disk_space - name: Print influxdb disk space @@ -97,7 +97,7 @@ tasks: - name: Check cassandra disk space - command: df -h + command: df -h | awk '/Filesystem|\/dev\/sda1\s/' register: cassandra_disk_space - name: Print cassandra disk space From 2f0fed86eec65ce8fb390178c54a3311a8c32cf2 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Wed, 8 May 2024 17:47:27 +0530 Subject: [PATCH 608/616] Update check_disk-space.yml --- ansible/check_disk-space.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/ansible/check_disk-space.yml b/ansible/check_disk-space.yml index 1fafb18e56..5b6222e0a2 100644 --- a/ansible/check_disk-space.yml +++ b/ansible/check_disk-space.yml @@ -5,7 +5,7 @@ tasks: - name: Check mongo disk space - command: df -h | awk '/Filesystem|\/dev\/sda1\s/' + shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' register: mongo_disk_space - name: Print mongo disk space @@ -18,7 +18,7 @@ tasks: - name: Check postgres disk space - command: df -h | awk '/Filesystem|\/dev\/sda1\s/' + shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' register: postgres_disk_space - name: Print spark disk space @@ -32,7 +32,7 @@ tasks: - name: Check spark-sl disk space - command: df -h | awk '/Filesystem|\/dev\/sda1\s/' + shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' register: sparksl_disk_space - name: Print spark disk space @@ -45,7 +45,7 @@ tasks: - name: Check redis disk space - command: df -h | awk '/Filesystem|\/dev\/sda1\s/' + shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' register: redis_disk_space - name: Print redis disk space @@ -58,7 +58,7 @@ tasks: - name: Check keycloak disk space - command: df -h | awk '/Filesystem|\/dev\/sda1\s/' + shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' register: keycloak_disk_space - name: Print keycloak disk space @@ -71,7 +71,7 @@ tasks: - name: Check learning1 disk space - command: df -h | awk '/Filesystem|\/dev\/sda1\s/' + shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' register: learning1_disk_space - name: Print learning1 disk space @@ -84,7 +84,7 @@ tasks: - name: Check influxdb disk space - command: df -h | awk '/Filesystem|\/dev\/sda1\s/' + shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' register: influxdb_disk_space - name: Print influxdb disk space @@ -97,7 +97,7 @@ tasks: - name: Check cassandra disk space - command: df -h | awk '/Filesystem|\/dev\/sda1\s/' + shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' register: cassandra_disk_space - name: Print cassandra disk space From 8bac5d4807cba59ea28c20be0db9db664e86e8bf Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Wed, 8 May 2024 17:56:44 +0530 Subject: [PATCH 609/616] Update check_disk-space.yml --- ansible/check_disk-space.yml | 55 +++++++++++++++--------------------- 1 file changed, 22 insertions(+), 33 deletions(-) diff --git a/ansible/check_disk-space.yml b/ansible/check_disk-space.yml index 5b6222e0a2..ee875d6354 100644 --- a/ansible/check_disk-space.yml +++ b/ansible/check_disk-space.yml @@ -2,106 +2,95 @@ - name: Check disk space on mongo_master hosts: mongo_master gather_facts: no - tasks: - name: Check mongo disk space - shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' + shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' | column -t register: mongo_disk_space - name: Print mongo disk space debug: - msg: "mongo disk space: {{ mongo_disk_space.stdout }}" + msg: "{{ mongo_disk_space.stdout }}" - name: Check disk space on postgres hosts: postgresql-master-1 gather_facts: no - tasks: - name: Check postgres disk space - shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' + shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' | column -t register: postgres_disk_space - name: Print spark disk space debug: - msg: "postgres disk space: {{ postgres_disk_space.stdout }}" - + msg: "{{ postgres_disk_space.stdout }}" - name: Check disk space on spark-sl hosts: py-spark-sl gather_facts: no - tasks: - name: Check spark-sl disk space - shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' + shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' | column -t register: sparksl_disk_space - name: Print spark disk space debug: - msg: "sparksl disk space: {{ sparksl_disk_space.stdout_lines }}" - + msg: "{{ sparksl_disk_space.stdout }}" + - name: Check disk space on redis hosts: redis gather_facts: no - tasks: - name: Check redis disk space - shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' + shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' | column -t register: redis_disk_space - name: Print redis disk space debug: - msg: "redis disk space: {{ redis_disk_space.stdout_lines }}" - -- name: Check disk space on keycloak-1 + msg: "{{ redis_disk_space.stdout }}" + +- name: Check disk space on keycloak-1 hosts: keycloak-1 gather_facts: no - tasks: - name: Check keycloak disk space - shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' + shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' | column -t register: keycloak_disk_space - name: Print keycloak disk space debug: - msg: "keycloak disk space: {{ keycloak_disk_space.stdout_lines }}" - -- name: Check disk space on learning1 + msg: "{{ keycloak_disk_space.stdout }}" + +- name: Check disk space on learning1 hosts: learning1 gather_facts: no - tasks: - name: Check learning1 disk space - shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' + shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' | column -t register: learning1_disk_space - name: Print learning1 disk space debug: - msg: "learning1 disk space: {{ learning1_disk_space.stdout_lines }}" - + msg: "{{ learning1_disk_space.stdout }}" + - name: Check disk space on influxdb hosts: dp-influx-ps gather_facts: no - tasks: - name: Check influxdb disk space - shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' + shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' | column -t register: influxdb_disk_space - name: Print influxdb disk space debug: - msg: "influxdb disk space: {{ influxdb_disk_space.stdout_lines }}" + msg: "{{ influxdb_disk_space.stdout }}" - name: Check disk space on dp-cassandra hosts: dp-cassandra gather_facts: no - tasks: - name: Check cassandra disk space - shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' + shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' | column -t register: cassandra_disk_space - name: Print cassandra disk space debug: - msg: "cassandra disk space: {{ cassandra_disk_space.stdout_lines }}" - - + msg: "{{ cassandra_disk_space.stdout }}" From 6b82d617f607d0995aadadec83f85d0729086ded Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Thu, 9 May 2024 12:49:43 +0530 Subject: [PATCH 610/616] Update check_disk-space.yml updated script to print only for hosts 75% above disk space --- ansible/check_disk-space.yml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/ansible/check_disk-space.yml b/ansible/check_disk-space.yml index ee875d6354..f1e9f32546 100644 --- a/ansible/check_disk-space.yml +++ b/ansible/check_disk-space.yml @@ -6,10 +6,12 @@ - name: Check mongo disk space shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' | column -t register: mongo_disk_space + changed_when: false - name: Print mongo disk space debug: msg: "{{ mongo_disk_space.stdout }}" + when: mongo_disk_space.stdout_lines[1].split()[4] | int > 75 - name: Check disk space on postgres hosts: postgresql-master-1 @@ -18,10 +20,12 @@ - name: Check postgres disk space shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' | column -t register: postgres_disk_space + changed_when: false - name: Print spark disk space debug: msg: "{{ postgres_disk_space.stdout }}" + when: postgres_disk_space.stdout_lines[1].split()[4] | int > 75 - name: Check disk space on spark-sl hosts: py-spark-sl @@ -30,10 +34,12 @@ - name: Check spark-sl disk space shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' | column -t register: sparksl_disk_space + changed_when: false - name: Print spark disk space debug: msg: "{{ sparksl_disk_space.stdout }}" + when: sparksl_disk_space.stdout_lines[1].split()[4] | int > 75 - name: Check disk space on redis hosts: redis @@ -42,10 +48,12 @@ - name: Check redis disk space shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' | column -t register: redis_disk_space + changed_when: false - name: Print redis disk space debug: msg: "{{ redis_disk_space.stdout }}" + when: redis_disk_space.stdout_lines[1].split()[4] | int > 75 - name: Check disk space on keycloak-1 hosts: keycloak-1 @@ -54,10 +62,12 @@ - name: Check keycloak disk space shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' | column -t register: keycloak_disk_space + changed_when: false - name: Print keycloak disk space debug: msg: "{{ keycloak_disk_space.stdout }}" + when: keycloak_disk_space.stdout_lines[1].split()[4] | int > 75 - name: Check disk space on learning1 hosts: learning1 @@ -66,10 +76,12 @@ - name: Check learning1 disk space shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' | column -t register: learning1_disk_space + changed_when: false - name: Print learning1 disk space debug: msg: "{{ learning1_disk_space.stdout }}" + when: learning1_disk_space.stdout_lines[1].split()[4] | int > 75 - name: Check disk space on influxdb hosts: dp-influx-ps @@ -78,10 +90,12 @@ - name: Check influxdb disk space shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' | column -t register: influxdb_disk_space + changed_when: false - name: Print influxdb disk space debug: msg: "{{ influxdb_disk_space.stdout }}" + when: influxdb_disk_space.stdout_lines[1].split()[4] | int > 75 - name: Check disk space on dp-cassandra hosts: dp-cassandra @@ -90,7 +104,9 @@ - name: Check cassandra disk space shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' | column -t register: cassandra_disk_space + changed_when: false - name: Print cassandra disk space debug: msg: "{{ cassandra_disk_space.stdout }}" + when: cassandra_disk_space.stdout_lines[1].split()[4] | int > 75 From c22b8e9a39cf547326db8d4387cfe24ffef366ba Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Tue, 14 May 2024 16:28:46 +0530 Subject: [PATCH 611/616] Update check_disk-space.yml --- ansible/check_disk-space.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/ansible/check_disk-space.yml b/ansible/check_disk-space.yml index f1e9f32546..b535e38e6b 100644 --- a/ansible/check_disk-space.yml +++ b/ansible/check_disk-space.yml @@ -11,7 +11,7 @@ - name: Print mongo disk space debug: msg: "{{ mongo_disk_space.stdout }}" - when: mongo_disk_space.stdout_lines[1].split()[4] | int > 75 + when: mongo_disk_space.stdout_lines[1].split()[4] | int > 5 - name: Check disk space on postgres hosts: postgresql-master-1 @@ -25,7 +25,7 @@ - name: Print spark disk space debug: msg: "{{ postgres_disk_space.stdout }}" - when: postgres_disk_space.stdout_lines[1].split()[4] | int > 75 + when: postgres_disk_space.stdout_lines[1].split()[4] | int > 5 - name: Check disk space on spark-sl hosts: py-spark-sl @@ -39,7 +39,7 @@ - name: Print spark disk space debug: msg: "{{ sparksl_disk_space.stdout }}" - when: sparksl_disk_space.stdout_lines[1].split()[4] | int > 75 + when: sparksl_disk_space.stdout_lines[1].split()[4] | int > 5 - name: Check disk space on redis hosts: redis @@ -53,7 +53,7 @@ - name: Print redis disk space debug: msg: "{{ redis_disk_space.stdout }}" - when: redis_disk_space.stdout_lines[1].split()[4] | int > 75 + when: redis_disk_space.stdout_lines[1].split()[4] | int > 5 - name: Check disk space on keycloak-1 hosts: keycloak-1 @@ -67,7 +67,7 @@ - name: Print keycloak disk space debug: msg: "{{ keycloak_disk_space.stdout }}" - when: keycloak_disk_space.stdout_lines[1].split()[4] | int > 75 + when: keycloak_disk_space.stdout_lines[1].split()[4] | int > 5 - name: Check disk space on learning1 hosts: learning1 @@ -81,7 +81,7 @@ - name: Print learning1 disk space debug: msg: "{{ learning1_disk_space.stdout }}" - when: learning1_disk_space.stdout_lines[1].split()[4] | int > 75 + when: learning1_disk_space.stdout_lines[1].split()[4] | int > 5 - name: Check disk space on influxdb hosts: dp-influx-ps @@ -95,7 +95,7 @@ - name: Print influxdb disk space debug: msg: "{{ influxdb_disk_space.stdout }}" - when: influxdb_disk_space.stdout_lines[1].split()[4] | int > 75 + when: influxdb_disk_space.stdout_lines[1].split()[4] | int > 5 - name: Check disk space on dp-cassandra hosts: dp-cassandra @@ -109,4 +109,4 @@ - name: Print cassandra disk space debug: msg: "{{ cassandra_disk_space.stdout }}" - when: cassandra_disk_space.stdout_lines[1].split()[4] | int > 75 + when: cassandra_disk_space.stdout_lines[1].split()[4] | int > 5 From 060b5647d4a879d02b58bdcb47a2fb575141b595 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Tue, 14 May 2024 17:04:43 +0530 Subject: [PATCH 612/616] Update check_disk-space.yml --- ansible/check_disk-space.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/check_disk-space.yml b/ansible/check_disk-space.yml index b535e38e6b..ef048457ee 100644 --- a/ansible/check_disk-space.yml +++ b/ansible/check_disk-space.yml @@ -60,7 +60,7 @@ gather_facts: no tasks: - name: Check keycloak disk space - shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' | column -t + shell: df -h | awk '/Filesystem|\/dev\/sda1\s/ {print $5}' register: keycloak_disk_space changed_when: false From e991a63b0b0652c1d659f48802987f7b18c5ebbd Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Tue, 14 May 2024 17:17:13 +0530 Subject: [PATCH 613/616] Update check_disk-space.yml --- ansible/check_disk-space.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/check_disk-space.yml b/ansible/check_disk-space.yml index ef048457ee..21963df6ab 100644 --- a/ansible/check_disk-space.yml +++ b/ansible/check_disk-space.yml @@ -60,14 +60,14 @@ gather_facts: no tasks: - name: Check keycloak disk space - shell: df -h | awk '/Filesystem|\/dev\/sda1\s/ {print $5}' + shell: df -h | awk '/Filesystem|\/dev\/sda1\s/ {print $5}' register: keycloak_disk_space changed_when: false - name: Print keycloak disk space debug: msg: "{{ keycloak_disk_space.stdout }}" - when: keycloak_disk_space.stdout_lines[1].split()[4] | int > 5 + ##when: keycloak_disk_space.stdout_lines[1].split()[4] | - name: Check disk space on learning1 hosts: learning1 From d2dc0d46c1b1a13f4c6c0ff3f3776d518b6e0be2 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Tue, 14 May 2024 17:23:17 +0530 Subject: [PATCH 614/616] Update check_disk-space.yml setting up threshold value --- ansible/check_disk-space.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/check_disk-space.yml b/ansible/check_disk-space.yml index 21963df6ab..5c82a4992b 100644 --- a/ansible/check_disk-space.yml +++ b/ansible/check_disk-space.yml @@ -60,14 +60,14 @@ gather_facts: no tasks: - name: Check keycloak disk space - shell: df -h | awk '/Filesystem|\/dev\/sda1\s/ {print $5}' + shell: df -h | awk 'BEGIN {print "Filesystem Size Used Avail Use% Mounted on"} /Filesystem|\/dev\/sda1\s/ && $5+0 > 5' | column -t register: keycloak_disk_space changed_when: false - name: Print keycloak disk space debug: msg: "{{ keycloak_disk_space.stdout }}" - ##when: keycloak_disk_space.stdout_lines[1].split()[4] | +##when: keycloak_disk_space.stdout_lines[1].split()[4] | int > 10 - name: Check disk space on learning1 hosts: learning1 From 447f9d15adfdfbeebc3605570f73c3489ebba8d7 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Tue, 14 May 2024 17:34:17 +0530 Subject: [PATCH 615/616] Update check_disk-space.yml --- ansible/check_disk-space.yml | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/ansible/check_disk-space.yml b/ansible/check_disk-space.yml index 5c82a4992b..71bdb032b9 100644 --- a/ansible/check_disk-space.yml +++ b/ansible/check_disk-space.yml @@ -4,56 +4,56 @@ gather_facts: no tasks: - name: Check mongo disk space - shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' | column -t + shell: df -h | awk 'BEGIN {print "Filesystem Size Used Avail Use% Mounted on"} /Filesystem|\/dev\/sda1\s/ && $5+0 > 5' | column -t register: mongo_disk_space changed_when: false - name: Print mongo disk space debug: msg: "{{ mongo_disk_space.stdout }}" - when: mongo_disk_space.stdout_lines[1].split()[4] | int > 5 + ##when: mongo_disk_space.stdout_lines[1].split()[4] | int > 5 - name: Check disk space on postgres hosts: postgresql-master-1 gather_facts: no tasks: - name: Check postgres disk space - shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' | column -t + shell: df -h | awk 'BEGIN {print "Filesystem Size Used Avail Use% Mounted on"} /Filesystem|\/dev\/sda1\s/ && $5+0 > 5' | column -t register: postgres_disk_space changed_when: false - name: Print spark disk space debug: msg: "{{ postgres_disk_space.stdout }}" - when: postgres_disk_space.stdout_lines[1].split()[4] | int > 5 + ##when: postgres_disk_space.stdout_lines[1].split()[4] | int > 5 - name: Check disk space on spark-sl hosts: py-spark-sl gather_facts: no tasks: - name: Check spark-sl disk space - shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' | column -t + shell: df -h | awk 'BEGIN {print "Filesystem Size Used Avail Use% Mounted on"} /Filesystem|\/dev\/sda1\s/ && $5+0 > 5' | column -t register: sparksl_disk_space changed_when: false - name: Print spark disk space debug: msg: "{{ sparksl_disk_space.stdout }}" - when: sparksl_disk_space.stdout_lines[1].split()[4] | int > 5 + ##when: sparksl_disk_space.stdout_lines[1].split()[4] | int > 5 - name: Check disk space on redis hosts: redis gather_facts: no tasks: - name: Check redis disk space - shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' | column -t + shell: df -h | awk 'BEGIN {print "Filesystem Size Used Avail Use% Mounted on"} /Filesystem|\/dev\/sda1\s/ && $5+0 > 5' | column -t register: redis_disk_space changed_when: false - name: Print redis disk space debug: msg: "{{ redis_disk_space.stdout }}" - when: redis_disk_space.stdout_lines[1].split()[4] | int > 5 + ##when: redis_disk_space.stdout_lines[1].split()[4] | int > 5 - name: Check disk space on keycloak-1 hosts: keycloak-1 @@ -74,39 +74,39 @@ gather_facts: no tasks: - name: Check learning1 disk space - shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' | column -t + shell: df -h | awk 'BEGIN {print "Filesystem Size Used Avail Use% Mounted on"} /Filesystem|\/dev\/sda1\s/ && $5+0 > 5' | column -t register: learning1_disk_space changed_when: false - name: Print learning1 disk space debug: msg: "{{ learning1_disk_space.stdout }}" - when: learning1_disk_space.stdout_lines[1].split()[4] | int > 5 + ##when: learning1_disk_space.stdout_lines[1].split()[4] | int > 5 - name: Check disk space on influxdb hosts: dp-influx-ps gather_facts: no tasks: - name: Check influxdb disk space - shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' | column -t + shell: df -h | awk 'BEGIN {print "Filesystem Size Used Avail Use% Mounted on"} /Filesystem|\/dev\/sda1\s/ && $5+0 > 5' | column -t register: influxdb_disk_space changed_when: false - name: Print influxdb disk space debug: msg: "{{ influxdb_disk_space.stdout }}" - when: influxdb_disk_space.stdout_lines[1].split()[4] | int > 5 + ##when: influxdb_disk_space.stdout_lines[1].split()[4] | int > 5 - name: Check disk space on dp-cassandra hosts: dp-cassandra gather_facts: no tasks: - name: Check cassandra disk space - shell: df -h | awk '/Filesystem|\/dev\/sda1\s/' | column -t + shell: df -h | awk 'BEGIN {print "Filesystem Size Used Avail Use% Mounted on"} /Filesystem|\/dev\/sda1\s/ && $5+0 > 5' | column -t register: cassandra_disk_space changed_when: false - name: Print cassandra disk space debug: msg: "{{ cassandra_disk_space.stdout }}" - when: cassandra_disk_space.stdout_lines[1].split()[4] | int > 5 + ##when: cassandra_disk_space.stdout_lines[1].split()[4] | int > 5 From 1d4238c75b97b5c90631277d68e26acfc94963e6 Mon Sep 17 00:00:00 2001 From: bijeshkashyap <159909529+bijeshkashyap@users.noreply.github.com> Date: Tue, 14 May 2024 20:52:38 +0530 Subject: [PATCH 616/616] Update check_disk-space.yml modified scripts for threshold limit setups --- ansible/check_disk-space.yml | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/ansible/check_disk-space.yml b/ansible/check_disk-space.yml index 71bdb032b9..44b8008531 100644 --- a/ansible/check_disk-space.yml +++ b/ansible/check_disk-space.yml @@ -4,109 +4,109 @@ gather_facts: no tasks: - name: Check mongo disk space - shell: df -h | awk 'BEGIN {print "Filesystem Size Used Avail Use% Mounted on"} /Filesystem|\/dev\/sda1\s/ && $5+0 > 5' | column -t + shell: df -h | awk 'BEGIN {print "Filesystem Size Used Avail Use% Mounted on"} /Filesystem|\/dev\/sda1\s/ && $5+0 > 75' | column -t register: mongo_disk_space changed_when: false - name: Print mongo disk space debug: msg: "{{ mongo_disk_space.stdout }}" - ##when: mongo_disk_space.stdout_lines[1].split()[4] | int > 5 + ##when: mongo_disk_space.stdout_lines[1].split()[4] | int > 75 - name: Check disk space on postgres hosts: postgresql-master-1 gather_facts: no tasks: - name: Check postgres disk space - shell: df -h | awk 'BEGIN {print "Filesystem Size Used Avail Use% Mounted on"} /Filesystem|\/dev\/sda1\s/ && $5+0 > 5' | column -t + shell: df -h | awk 'BEGIN {print "Filesystem Size Used Avail Use% Mounted on"} /Filesystem|\/dev\/sda1\s/ && $5+0 > 75' | column -t register: postgres_disk_space changed_when: false - name: Print spark disk space debug: msg: "{{ postgres_disk_space.stdout }}" - ##when: postgres_disk_space.stdout_lines[1].split()[4] | int > 5 + ##when: postgres_disk_space.stdout_lines[1].split()[4] | int > 75 - name: Check disk space on spark-sl hosts: py-spark-sl gather_facts: no tasks: - name: Check spark-sl disk space - shell: df -h | awk 'BEGIN {print "Filesystem Size Used Avail Use% Mounted on"} /Filesystem|\/dev\/sda1\s/ && $5+0 > 5' | column -t + shell: df -h | awk 'BEGIN {print "Filesystem Size Used Avail Use% Mounted on"} /Filesystem|\/dev\/sda1\s/ && $5+0 > 75' | column -t register: sparksl_disk_space changed_when: false - name: Print spark disk space debug: msg: "{{ sparksl_disk_space.stdout }}" - ##when: sparksl_disk_space.stdout_lines[1].split()[4] | int > 5 + ##when: sparksl_disk_space.stdout_lines[1].split()[4] | int > 75 - name: Check disk space on redis hosts: redis gather_facts: no tasks: - name: Check redis disk space - shell: df -h | awk 'BEGIN {print "Filesystem Size Used Avail Use% Mounted on"} /Filesystem|\/dev\/sda1\s/ && $5+0 > 5' | column -t + shell: df -h | awk 'BEGIN {print "Filesystem Size Used Avail Use% Mounted on"} /Filesystem|\/dev\/sda1\s/ && $5+0 > 75' | column -t register: redis_disk_space changed_when: false - name: Print redis disk space debug: msg: "{{ redis_disk_space.stdout }}" - ##when: redis_disk_space.stdout_lines[1].split()[4] | int > 5 + ##when: redis_disk_space.stdout_lines[1].split()[4] | int > 75 - name: Check disk space on keycloak-1 hosts: keycloak-1 gather_facts: no tasks: - name: Check keycloak disk space - shell: df -h | awk 'BEGIN {print "Filesystem Size Used Avail Use% Mounted on"} /Filesystem|\/dev\/sda1\s/ && $5+0 > 5' | column -t + shell: df -h | awk 'BEGIN {print "Filesystem Size Used Avail Use% Mounted on"} /Filesystem|\/dev\/sda1\s/ && $5+0 > 75' | column -t register: keycloak_disk_space changed_when: false - name: Print keycloak disk space debug: msg: "{{ keycloak_disk_space.stdout }}" -##when: keycloak_disk_space.stdout_lines[1].split()[4] | int > 10 +##when: keycloak_disk_space.stdout_lines[1].split()[4] | int > 75 - name: Check disk space on learning1 hosts: learning1 gather_facts: no tasks: - name: Check learning1 disk space - shell: df -h | awk 'BEGIN {print "Filesystem Size Used Avail Use% Mounted on"} /Filesystem|\/dev\/sda1\s/ && $5+0 > 5' | column -t + shell: df -h | awk 'BEGIN {print "Filesystem Size Used Avail Use% Mounted on"} /Filesystem|\/dev\/sda1\s/ && $5+0 > 75' | column -t register: learning1_disk_space changed_when: false - name: Print learning1 disk space debug: msg: "{{ learning1_disk_space.stdout }}" - ##when: learning1_disk_space.stdout_lines[1].split()[4] | int > 5 + ##when: learning1_disk_space.stdout_lines[1].split()[4] | int > 75 - name: Check disk space on influxdb hosts: dp-influx-ps gather_facts: no tasks: - name: Check influxdb disk space - shell: df -h | awk 'BEGIN {print "Filesystem Size Used Avail Use% Mounted on"} /Filesystem|\/dev\/sda1\s/ && $5+0 > 5' | column -t + shell: df -h | awk 'BEGIN {print "Filesystem Size Used Avail Use% Mounted on"} /Filesystem|\/dev\/sda1\s/ && $5+0 > 75' | column -t register: influxdb_disk_space changed_when: false - name: Print influxdb disk space debug: msg: "{{ influxdb_disk_space.stdout }}" - ##when: influxdb_disk_space.stdout_lines[1].split()[4] | int > 5 + ##when: influxdb_disk_space.stdout_lines[1].split()[4] | int > 75 - name: Check disk space on dp-cassandra hosts: dp-cassandra gather_facts: no tasks: - name: Check cassandra disk space - shell: df -h | awk 'BEGIN {print "Filesystem Size Used Avail Use% Mounted on"} /Filesystem|\/dev\/sda1\s/ && $5+0 > 5' | column -t + shell: df -h | awk 'BEGIN {print "Filesystem Size Used Avail Use% Mounted on"} /Filesystem|\/dev\/sda1\s/ && $5+0 > 75' | column -t register: cassandra_disk_space changed_when: false - name: Print cassandra disk space debug: msg: "{{ cassandra_disk_space.stdout }}" - ##when: cassandra_disk_space.stdout_lines[1].split()[4] | int > 5 + ##when: cassandra_disk_space.stdout_lines[1].split()[4] | int > 75