Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

PMM-12494 fix sqlite to postgres for docker #2486

Merged
merged 8 commits into from
Sep 18, 2023
109 changes: 87 additions & 22 deletions update/ansible/playbook/tasks/roles/initialization/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,23 +5,6 @@
ui_upgrade: False
when: ui_upgrade is undefined


# PMM-10858 - In certain environments, including AWS EC2, some of the
# EPEL repository mirrors do not respond within the time limit defined
# by pmm-update which is currently set to 30 seconds. This was causing
# supervisord to kill pmm-update-checker
- name: Update repository settings
when:
- ansible_distribution == "CentOS"
- ansible_distribution_major_version == "7"
command: yum-config-manager --setopt=epel.timeout=1 --save
changed_when: True

- name: Clean yum metadata
command: yum clean metadata
become: true
changed_when: True

- name: Get current version
slurp:
src: /srv/grafana/PERCONA_DASHBOARDS_VERSION
Expand Down Expand Up @@ -51,6 +34,32 @@
debug:
msg: "Current version: {{ pmm_current_version }} Image Version: {{ pmm_image_version }}"


# We use current_version_file['failed'] because we don't want to run this on creating container
# and we use pmm_current_version is version(pmm_image_version, '>=') to run it only if upgrade is required
- name: Enable maintenance mode only for docker upgrade
copy:
src: maintenance.html
dest: /usr/share/pmm-server/maintenance/
mode: 0644
when: not ui_upgrade and current_version_file['failed'] == false and not pmm_current_version is version(pmm_image_version, '>=')

# PMM-10858 - In certain environments, including AWS EC2, some of the
# EPEL repository mirrors do not respond within the time limit defined
# by pmm-update which is currently set to 30 seconds. This was causing
# supervisord to kill pmm-update-checker
- name: Update repository settings
when:
- ansible_distribution == "CentOS"
- ansible_distribution_major_version == "7"
command: yum-config-manager --setopt=epel.timeout=1 --save
changed_when: True

- name: Clean yum metadata
command: yum clean metadata
become: true
changed_when: True

- name: Check if we need an update or not
include_role:
name: dashboards_upgrade
Expand Down Expand Up @@ -102,12 +111,68 @@
path: /srv/grafana/grafana.db
register: is_database_sqlite

- name: Migrate Grafana database from SQLite to Postgresql
include_role:
name: sqlite-to-postgres
- name: Temporary change database to SQLite
block:
- name: Remove database options (SQLite is default)
ini_file:
dest: /etc/grafana/grafana.ini
section: database
option: type
state: absent

- name: Remove database host
ini_file:
dest: /etc/grafana/grafana.ini
section: database
option: host
state: absent

- name: Remove database user
ini_file:
dest: /etc/grafana/grafana.ini
section: database
option: user
state: absent

- name: Remove database password
ini_file:
dest: /etc/grafana/grafana.ini
section: database
option: password
state: absent

- name: Upgrade grafana database (Get the latest schema)
command: grafana cli --homepath=/usr/share/grafana admin data-migration encrypt-datasource-passwords
changed_when: True

- name: Start grafana again
supervisorctl:
name: grafana
state: restarted

- name: Wait for grafana
pause: seconds=10

- name: Migrate Grafana database from SQLite to Postgresql
include_role:
name: sqlite-to-postgres
tags:
- skip_ansible_lint # '503 Tasks that run when changed should likely be handlers'.
when: is_database_sqlite.stat.exists
tags:
- skip_ansible_lint # '503 Tasks that run when changed should likely be handlers'.

- name: Wait for PMM to be ready
ansible.builtin.uri:
url: "http://127.0.0.1:7772/v1/readyz"
status_code: 200
method: GET
retries: 120
delay: 1

- name: Disable maintenance mode
file:
state: absent
path: /usr/share/pmm-server/maintenance/maintenance.html
# We use current_version_file['failed'] because we don't want to run this on creating container
when: not ui_upgrade and current_version_file['failed'] == false


Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@
url: "http://127.0.0.1:3000/api/health"
status_code: 200
method: GET
register: healthcheck
until: healthcheck is not failed
retries: 120
delay: 1

Expand Down
27 changes: 18 additions & 9 deletions update/ansible/playbook/tasks/update.yml
Original file line number Diff line number Diff line change
Expand Up @@ -397,12 +397,11 @@
- /etc/yum/yum-cron-hourly.conf
- /etc/yum/yum-cron.conf

- name: Enable pmm-update-perform-init after build
ini_file:
path: /etc/supervisord.d/pmm.ini
section: program:pmm-update-perform-init
option: autostart
value: "true"
# Regenerating to enable pmm-update-perform-init
- name: Generate new supervisor config
command: pmm-managed-init
register: managed_init_result
changed_when: True

- name: Reread pmm-update-perform-init supervisor config EL7
when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7'
Expand Down Expand Up @@ -454,6 +453,16 @@
- ALTER TABLE api_key ALTER COLUMN name TYPE text;
when: not ansible_check_mode

- name: Wait for PMM to be ready
ansible.builtin.uri:
url: "http://127.0.0.1:7772/v1/readyz"
status_code: 200
method: GET
register: healthcheck
until: healthcheck is not failed
retries: 120
delay: 1

# SIGUSR2 is sent to supervisord by pmm-managed right before the update for logging to work correctly.
# We use that fact to show what was restarted during the update.
- name: Get supervisord logs EL7
Expand All @@ -468,14 +477,14 @@
register: maintail_result
changed_when: False

- name: Print supervisord logs
debug: var=maintail_result.stdout_lines

- name: Disable maintenance mode
file:
state: absent
path: /usr/share/pmm-server/maintenance/maintenance.html

- name: Print supervisord logs
debug: var=maintail_result.stdout_lines

- name: Cleanup yum cache
file:
state: absent
Expand Down