From c6243936151ad5f9035948e654003bad35455d99 Mon Sep 17 00:00:00 2001 From: Alastair Flynn Date: Thu, 28 Mar 2024 14:47:57 -0400 Subject: [PATCH] Trigger a settings rewrite when one of the units leaves the dbcluster relation --- src/charm.py | 12 +++++++++++- tests/test_charm.py | 41 ++++++++++++++++++++++++++++++++++++++++- 2 files changed, 51 insertions(+), 2 deletions(-) diff --git a/src/charm.py b/src/charm.py index 553a26d..5ad3923 100755 --- a/src/charm.py +++ b/src/charm.py @@ -62,6 +62,8 @@ def _observe(self): self.on.metrics_endpoint_relation_broken, self._on_metrics_endpoint_relation_broken) self.framework.observe( self.on.dbcluster_relation_changed, self._on_dbcluster_relation_changed) + self.framework.observe( + self.on.dbcluster_relation_departed, self._on_dbcluster_relation_departed) def _on_install(self, event: InstallEvent): """Ensure that the controller configuration file exists.""" @@ -157,12 +159,20 @@ def _on_metrics_endpoint_relation_broken(self, event: RelationDepartedEvent): self._control_socket.remove_metrics_user(username) def _on_dbcluster_relation_changed(self, event): + relation = event.relation + self._update_bind_addresses(relation) + + def _on_dbcluster_relation_departed(self, event): + relation = event.relation + self._update_bind_addresses(relation) + + def _update_bind_addresses(self, relation): """Maintain our own bind address in relation data. If we are the leader, aggregate the bind addresses for all the peers, and ensure the result is set in the application data bag. If the aggregate addresses have changed, rewrite the config file. """ - relation = event.relation + try: ip = self._set_db_bind_address(relation) except DBBindAddressException as e: diff --git a/tests/test_charm.py b/tests/test_charm.py index c26b302..d39f263 100644 --- a/tests/test_charm.py +++ b/tests/test_charm.py @@ -158,7 +158,7 @@ def test_dbcluster_relation_changed_single_addr( harness = self.harness mock_get_binding.return_value = mockBinding(['192.168.1.17']) - # This unit's agent ID happends to correspond with the unit ID. + # This unit's agent ID happens to correspond with the unit ID. mock_get_agent_id.return_value = '0' harness.set_leader() @@ -247,6 +247,45 @@ def test_dbcluster_relation_changed_write_file( # The last thing we should have done is send a reload request via the socket.. mock_reload_config.assert_called_once() + @patch("builtins.open", new_callable=mock_open, read_data=agent_conf) + @patch("configchangesocket.ConfigChangeSocketClient.get_controller_agent_id") + @patch("ops.model.Model.get_binding") + @patch("configchangesocket.ConfigChangeSocketClient.reload_config") + def test_dbcluster_relation_departed( + self, mock_reload_config, mock_get_binding, mock_get_agent_id, *__): + harness = self.harness + mock_get_binding.return_value = mockBinding(['192.168.1.17']) + + # This unit's agent ID happens to correspond with the unit ID. + mock_get_agent_id.return_value = '0' + + harness.set_leader() + + # Have another unit enter the relation. + relation_id = harness.add_relation('dbcluster', harness.charm.app) + harness.add_relation_unit(relation_id, 'juju-controller/1') + self.harness.update_relation_data( + relation_id, 'juju-controller/1', { + 'db-bind-address': '192.168.1.100', + 'agent-id': '9', + }) + + # Assert that the second units agent bind address is in the data bag. + app_data = harness.get_relation_data(relation_id, 'juju-controller') + exp = {'0': '192.168.1.17', '9': '192.168.1.100'} + self.assertEqual(json.loads(app_data['db-bind-addresses']), exp) + + # Remove the second unit. + harness.remove_relation_unit(relation_id, 'juju-controller/1') + + # Assert that the second unit's address is gone from the data bag. + app_data = harness.get_relation_data(relation_id, 'juju-controller') + exp = {'0': '192.168.1.17'} + self.assertEqual(json.loads(app_data['db-bind-addresses']), exp) + + harness.evaluate_status() + self.assertIsInstance(harness.charm.unit.status, ActiveStatus) + class mockNetwork: def __init__(self, addresses):