diff --git a/HISTORY.rst b/HISTORY.rst index 19d56393a..06683dd19 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -30,6 +30,9 @@ unreleased * Modification of how route testing works for testing all route sources. If `az iot hub message-route test` is called without specifying a route name or type, all types will be tested rather than only DeviceMessage routes. + * Addition of new parameters is introduced in `az iot hub configuration create` and + `az iot edge deployment create` to let user insert labels and metrics in key-value pair style + **Digital Twins updates** @@ -48,6 +51,12 @@ unreleased If an output path is specified, this command will also create tar files containing each device's certificate bundle, an IoT Edge `config.toml` config file and an installation script to configure a target Edge device with these settings. + +**IoT DPS updates** + +* Removed file extension restriction for attached certificates in individual enrollments and enrollment groups creation/update commands, + and added suggested certificate format in `--help` docs. + 0.18.3 +++++++++++++++ @@ -69,11 +78,6 @@ unreleased * Fixed an issue with enrollement group certificate encoding -**IoT DPS updates** - -* Removed file extension restriction for attached certificates in individual enrollments and enrollment groups creation/update commands, - and added suggested certificate format in `--help` docs. - 0.18.2 +++++++++++++++ diff --git a/azext_iot/_help.py b/azext_iot/_help.py index 53c7bafe0..b38d8fb1e 100644 --- a/azext_iot/_help.py +++ b/azext_iot/_help.py @@ -748,7 +748,7 @@ text: > az iot hub configuration create -c {config_name} -n {iothub_name} --content device_content.json --target-condition "tags.building=9 and tags.environment='test'" --priority 3 - - name: Create a device configuration with labels and provide user metrics inline (bash syntax example) + - name: Create a device configuration with labels and provide user metrics inline (bash syntax example). text: > az iot hub configuration create -c {config_name} -n {iothub_name} --content device_content.json --target-condition "tags.building=9" --labels '{"key0":"value0", "key1":"value1"}' --priority 10 @@ -758,12 +758,17 @@ az iot hub configuration create -c {config_name} -n {iothub_name} --content module_content.json --target-condition "from devices.modules where tags.building=9" --labels "{\\"key0\\":\\"value0\\", \\"key1\\":\\"value1\\"}" --metrics "{\\"metrics\\": {\\"queries\\": {\\"mymetric\\": \\"select moduleId from devices.modules where tags.location='US'\\"}}}" - - name: Create a module configuration with content and user metrics inline (powershell syntax example) + - name: Create a module configuration with content and user metrics inline (powershell syntax example). text: > az iot hub configuration create -c {config_name} -n {iothub_name} --content '{\\"moduleContent\\": {\\"properties.desired.chillerWaterSettings\\": {\\"temperature\\": 38, \\"pressure\\": 78}}}' --target-condition "from devices.modules where tags.building=9" --priority 1 --metrics '{\\"metrics\\": {\\"queries\\": {\\"mymetric\\":\\"select moduleId from devices.modules where tags.location=''US''\\"}}}' + - name: Create a device configuration with an alternative input style of labels and metrics (shell agnostic). + text: > + az iot hub configuration create -c {config_name} -n {iothub_name} --content device_content.json + --target-condition "from devices.modules where tags.building=9" --custom-labels key0="value0" key1="value1" --priority 10 + --custom-metric-queries mymetric1="select deviceId from devices where tags.location='US'" mymetric2="select *" """ helps[ @@ -1096,6 +1101,14 @@ --target-condition "tags.building=9 and tags.environment='test'" --metrics metrics_content.json --layered + - name: Create a layered deployment with an alternative input style of labels and metrics (shell agnostic) + text: > + az iot edge deployment create -d {deployment_name} -n {iothub_name} + --content layered_modules_content.json + --target-condition "tags.building=9 and tags.environment='test'" + --custom-labels key0="value0" key1="value1" + --custom-metric-queries mymetric1="select deviceId from devices where tags.location='US'" mymetric2="select *" + --layered """ helps[ diff --git a/azext_iot/_params.py b/azext_iot/_params.py index 479b1ac6c..632e79f4a 100644 --- a/azext_iot/_params.py +++ b/azext_iot/_params.py @@ -230,6 +230,22 @@ def load_arguments(self, _): " from the supplied symmetric key without further validation. All other command parameters aside from" " duration will be ignored. Supported connection string types: Iot Hub, Device, Module." ) + context.argument( + "custom_metric_queries", + nargs="+", + options_list=["--custom-metric-queries", "--cmq"], + help="An alternative input style (space separated key=value pairs) for --metrics and intended to replace " + "it in the future." + 'For example: metric1="select deviceId from devices where tags.location=''US''" metric2="select *"', + ) + context.argument( + "custom_labels", + nargs="+", + options_list=["--custom-labels", "--cl"], + help="An alternative input style (space separated key=value pairs) for --labels and intended to replace " + "it in the future." + 'For example: key1=value1 key2="this is my value"', + ) with self.argument_context("iot hub") as context: context.argument( @@ -805,12 +821,14 @@ def load_arguments(self, _): context.argument( "metrics", options_list=["--metrics", "-m"], - help="Device configuration metric definitions. Provide file path or raw json.", + help="Device configuration metric definitions. Provide file path or raw json." + "Using --custom-metric-queries instead of --metrics is recommended.", ) context.argument( "labels", options_list=["--labels", "--lab"], help="Map of labels to be applied to target configuration. " + "Using --custom-labels instead of --labels is recommended." 'Format example: {"key0":"value0", "key1":"value1"}', ) context.argument( @@ -845,12 +863,14 @@ def load_arguments(self, _): context.argument( "metrics", options_list=["--metrics", "-m"], - help="IoT Edge deployment metric definitions. Provide file path or raw json.", + help="IoT Edge deployment metric definitions. Provide file path or raw json." + "Using --custom-metric-queries instead of --metrics is recommended.", ) context.argument( "labels", options_list=["--labels", "--lab"], help="Map of labels to be applied to target deployment. " + "Using --custom-labels instead of --labels is recommended." 'Use the following format: \'{"key0":"value0", "key1":"value1"}\'', ) context.argument( diff --git a/azext_iot/deviceupdate/_help.py b/azext_iot/deviceupdate/_help.py index 93566d36c..9ce50c4a3 100644 --- a/azext_iot/deviceupdate/_help.py +++ b/azext_iot/deviceupdate/_help.py @@ -842,13 +842,8 @@ def load_deviceupdate_help(): `--related-file`. Review examples and parameter descriptions for details on how to fully utilize the operation. - - For bash inline json format use '{"key":"value"}' and \\ (backslash) for command continuation. - - For powershell inline json format use '{\\"key\\":\\"value\\"}' and ` (tilde) for command continuation. - - For cmd inline json format use \"{\\"key\\":\\"value\\"}\" and ^ (caret) for command continuation. - - For file based json input use "@/path/to/file". File based input avoids shell quotation issues. - - For a detailed explanation of shell quoting rules please goto - https://learn.microsoft.com/en-us/cli/azure/use-cli-effectively + Read more about using quotation marks and escape characters in different shells here: + https://aka.ms/aziotcli-json examples: - name: Initialize a minimum content import manifest. Inline json optimized for `bash`. diff --git a/azext_iot/operations/hub.py b/azext_iot/operations/hub.py index ec505136f..6a47501c8 100644 --- a/azext_iot/operations/hub.py +++ b/azext_iot/operations/hub.py @@ -38,6 +38,7 @@ ) from azext_iot.iothub.providers.discovery import IotHubDiscovery from azext_iot.common.utility import ( + assemble_nargs_to_dict, handle_service_exception, read_file_content, init_monitoring, @@ -1188,6 +1189,8 @@ def iot_edge_deployment_create( cmd, config_id, content, + custom_labels=None, + custom_metric_queries=None, hub_name=None, target_condition="", priority=0, @@ -1205,6 +1208,8 @@ def iot_edge_deployment_create( cmd=cmd, config_id=config_id, content=content, + custom_labels=custom_labels, + custom_metric_queries=custom_metric_queries, hub_name=hub_name, target_condition=target_condition, priority=priority, @@ -1221,6 +1226,8 @@ def iot_hub_configuration_create( cmd, config_id, content, + custom_labels=None, + custom_metric_queries=None, hub_name=None, target_condition="", priority=0, @@ -1234,6 +1241,8 @@ def iot_hub_configuration_create( cmd=cmd, config_id=config_id, content=content, + custom_labels=custom_labels, + custom_metric_queries=custom_metric_queries, hub_name=hub_name, target_condition=target_condition, priority=priority, @@ -1251,6 +1260,8 @@ def _iot_hub_configuration_create( config_id, content, config_type, + custom_labels=None, + custom_metric_queries=None, hub_name=None, target_condition="", priority=0, @@ -1302,9 +1313,13 @@ def _iot_hub_configuration_create( "metrics json must include the '{}' property".format(metrics_key) ) metrics = metrics[metrics_key] + elif custom_metric_queries: + metrics = assemble_nargs_to_dict(custom_metric_queries) if labels: labels = process_json_arg(labels, argument_name="labels") + elif custom_labels: + labels = assemble_nargs_to_dict(custom_labels) config_content = ConfigurationContent(**processed_content) diff --git a/azext_iot/tests/iothub/configurations/test_config_kvp_metrics.json b/azext_iot/tests/iothub/configurations/test_config_kvp_metrics.json new file mode 100644 index 000000000..7b5893d7f --- /dev/null +++ b/azext_iot/tests/iothub/configurations/test_config_kvp_metrics.json @@ -0,0 +1,4 @@ +{ + "mymetric1": "select deviceId from devices where tags.location='US'", + "mymetric2": "select *" +} \ No newline at end of file diff --git a/azext_iot/tests/iothub/configurations/test_iot_config_int.py b/azext_iot/tests/iothub/configurations/test_iot_config_int.py index c5e98c62a..ebb003c10 100644 --- a/azext_iot/tests/iothub/configurations/test_iot_config_int.py +++ b/azext_iot/tests/iothub/configurations/test_iot_config_int.py @@ -22,6 +22,7 @@ __file__, "test_edge_deployment_malformed.json" ) generic_metrics_path = get_context_path(__file__, "test_config_generic_metrics.json") +kvp_metrics_path = get_context_path(__file__, "test_config_kvp_metrics.json") adm_content_module_path = get_context_path(__file__, "test_adm_module_content.json") adm_content_device_path = get_context_path(__file__, "test_adm_device_content.json") @@ -83,7 +84,7 @@ def test_edge_set_modules(self): def test_edge_deployments(self): for auth_phase in DATAPLANE_AUTH_TYPES: - config_count = 5 + config_count = 6 config_ids = self.generate_config_names(config_count) self.kwargs["generic_metrics"] = read_file_content(generic_metrics_path) @@ -96,6 +97,8 @@ def test_edge_deployments(self): edge_content_malformed_path ) self.kwargs["labels"] = '{"key0": "value0"}' + self.kwargs["custom_labels"] = '{"key0": "value0", "key1": "value1"}' + self.kwargs["custom_metric_queries"] = read_file_content(kvp_metrics_path) priority = random.randint(1, 10) condition = "tags.building=9 and tags.environment='test'" @@ -165,6 +168,41 @@ def test_edge_deployments(self): ], ) + # Metrics + labels using narg kvp parameters. Configurations must be lowercase and will be lower()'ed. + # Note: $schema is included as a nested property in the sample content. + self.cmd( + self.set_cmd_auth_type( + """iot edge deployment create -d {} --pri {} --tc \"{}\" --cl {} -k '{}' + --cmq {} -n {} -g {}""".format( + config_ids[5], + priority, + condition, + "key0=value0 key1=value1", + edge_content_path, + 'mymetric1="select deviceId from devices where tags.location=\'US\'" mymetric2="select *"', + self.entity_name, + self.entity_rg + ), + auth_type=auth_phase + ), + checks=[ + self.check("id", config_ids[5]), + self.check("priority", priority), + self.check("targetCondition", condition), + self.check("labels", json.loads(self.kwargs["custom_labels"])), + self.check( + "content.modulesContent", + json.loads(self.kwargs["edge_content"])["content"][ + "modulesContent" + ], + ), + self.check( + "metrics.queries", + json.loads(self.kwargs["custom_metric_queries"]), + ), + ], + ) + # Layered deployment with content + metrics from file. # No labels, target-condition or priority self.cmd( @@ -433,7 +471,7 @@ def test_edge_deployments(self): self.tearDown() def test_device_configurations(self): - config_count = 3 + config_count = 4 config_ids = self.generate_config_names(config_count) edge_config_ids = self.generate_config_names(1, True) @@ -442,7 +480,8 @@ def test_device_configurations(self): self.kwargs["adm_content_module"] = read_file_content(adm_content_module_path) self.kwargs["edge_content"] = read_file_content(edge_content_path) self.kwargs["labels"] = '{"key0": "value0"}' - + self.kwargs["custom_labels"] = '{"key0": "value0", "key1": "this is value"}' + self.kwargs["custom_metric_queries"] = read_file_content(kvp_metrics_path) priority = random.randint(1, 10) condition = "tags.building=9 and tags.environment='test'" @@ -514,6 +553,43 @@ def test_device_configurations(self): ], ) + # Metrics + labels using narg kvp parameters. + # Configurations must be lowercase and will be lower()'ed. + # Note: $schema is included as a nested property in the sample content. + self.cmd( + self.set_cmd_auth_type( + """iot hub configuration create -c {} --pri {} --tc \"{}\" --cl {} + -k '{}' --cmq {} -n {} -g {}""" + .format( + config_ids[3].upper(), + priority, + module_condition, + 'key0=value0 key1="this is value"', + adm_content_module_path, + 'mymetric1="select deviceId from devices where tags.location=\'US\'" mymetric2="select *"', + self.entity_name, + self.entity_rg + ), + auth_type=auth_phase, + ), + checks=[ + self.check("id", config_ids[3].lower()), + self.check("priority", priority), + self.check("targetCondition", module_condition), + self.check("labels", json.loads(self.kwargs["custom_labels"])), + self.check( + "content.moduleContent", + json.loads(self.kwargs["adm_content_module"])["content"][ + "moduleContent" + ], + ), + self.check( + "metrics.queries", + json.loads(self.kwargs["custom_metric_queries"]), + ), + ], + ) + # Device content + metrics from file. # Configurations must be lowercase and will be lower()'ed. # No labels, target-condition or priority diff --git a/azext_iot/tests/iothub/configurations/test_iot_config_unit.py b/azext_iot/tests/iothub/configurations/test_iot_config_unit.py index dd3ffe816..88a42f444 100644 --- a/azext_iot/tests/iothub/configurations/test_iot_config_unit.py +++ b/azext_iot/tests/iothub/configurations/test_iot_config_unit.py @@ -13,7 +13,7 @@ from random import randint from knack.cli import CLIError from azext_iot.operations import hub as subject -from azext_iot.common.utility import read_file_content, evaluate_literal +from azext_iot.common.utility import read_file_content, evaluate_literal, validate_key_value_pairs from azext_iot.tests.conftest import ( build_mock_response, path_service_client, @@ -261,7 +261,7 @@ def serviceclient(self, mocker, fixture_ghcs, fixture_sas, request): return service_client @pytest.mark.parametrize( - "config_id, hub_name, target_condition, priority, labels", + "config_id, hub_name, target_condition, priority, labels, custom_labels, metrics, custom_metric_queries", [ ( "UPPERCASEID", @@ -269,6 +269,9 @@ def serviceclient(self, mocker, fixture_ghcs, fixture_sas, request): "tags.building=43 and tags.environment='test'", randint(0, 100), '{"key1":"value1"}', + None, + 'test_config_generic_metrics.json', + None ), ( "lowercaseid", @@ -276,8 +279,29 @@ def serviceclient(self, mocker, fixture_ghcs, fixture_sas, request): "tags.building=43 and tags.environment='test'", randint(0, 100), None, + None, + 'test_config_generic_metrics.json', + None + ), + ( + "mixedCaseId", + mock_target["entity"], + None, + None, + None, + None, + 'test_config_generic_metrics.json', + None), + ( + "newid", + mock_target["entity"], + "tags.building=43 and tags.environment='test'", + randint(0, 100), + None, + ['key1=value1'], + None, + ['mymetric1=select deviceId from devices where tags.location=''US''', 'mymetric2=select *'] ), - ("mixedCaseId", mock_target["entity"], None, None, None), ], ) def test_config_create_edge( @@ -285,23 +309,27 @@ def test_config_create_edge( fixture_cmd, serviceclient, sample_config_edge, - sample_config_metrics, config_id, + custom_labels, hub_name, target_condition, priority, labels, + metrics, + custom_metric_queries ): subject.iot_edge_deployment_create( cmd=fixture_cmd, config_id=config_id, hub_name=hub_name, content=sample_config_edge[1], + custom_labels=custom_labels, target_condition=target_condition, priority=priority, labels=labels, - metrics=sample_config_metrics[1], + metrics=metrics, layered=(sample_config_edge[0] == "layered"), + custom_metric_queries=custom_metric_queries ) args = serviceclient.call_args @@ -314,7 +342,11 @@ def test_config_create_edge( assert body["id"] == config_id.lower() assert body.get("targetCondition") == target_condition assert body.get("priority") == priority - assert body.get("labels") == evaluate_literal(labels, dict) + + if labels: + assert body.get("labels") == evaluate_literal(labels, dict) + elif custom_labels: + assert body.get("labels") == validate_key_value_pairs(";".join(custom_labels)) if ( sample_config_edge[0] == "inlineB" @@ -344,10 +376,13 @@ def test_config_create_edge( == json.loads(sample_config_edge[1])["content"]["modulesContent"] ) - self._assert_config_metrics_request(sample_config_metrics, body) + if metrics: + assert body.get("metrics") == json.loads(read_file_content(metrics))['metrics'] + elif custom_metric_queries: + assert body.get("metrics")['queries'] == validate_key_value_pairs(";".join(custom_metric_queries)) @pytest.mark.parametrize( - "config_id, hub_name, target_condition, priority, labels", + "config_id, hub_name, target_condition, priority, labels, custom_labels, metrics, custom_metric_queries", [ ( "lowercaseid", @@ -355,6 +390,19 @@ def test_config_create_edge( "tags.building=43 and tags.environment='test'", randint(0, 100), None, + None, + 'test_config_generic_metrics.json', + None, + ), + ( + "UPPERCASEID", + mock_target["entity"], + "tags.building=43 and tags.environment='test'", + randint(0, 100), + None, + ['key1=value1'], + None, + ['mymetric1=select deviceId from devices where tags.location=''US''', 'mymetric2=select *'] ) ], ) @@ -368,6 +416,9 @@ def test_config_create_edge_malformed( target_condition, priority, labels, + custom_labels, + metrics, + custom_metric_queries ): with pytest.raises(CLIError) as exc: subject.iot_edge_deployment_create( @@ -378,6 +429,9 @@ def test_config_create_edge_malformed( target_condition=target_condition, priority=priority, labels=labels, + custom_labels=custom_labels, + metrics=metrics, + custom_metric_queries=custom_metric_queries ) exception_obj = json.loads(str(exc.value)) @@ -388,7 +442,7 @@ def test_config_create_edge_malformed( assert "schemaPath" in error_element @pytest.mark.parametrize( - "config_id, hub_name, target_condition, priority, labels", + "config_id, hub_name, target_condition, priority, labels, custom_labels, metrics, custom_metric_queries", [ ( "UPPERCASEID", @@ -396,6 +450,9 @@ def test_config_create_edge_malformed( "tags.building=43 and tags.environment='test'", randint(0, 100), '{"key1":"value1"}', + None, + 'test_config_generic_metrics.json', + None ), ( "lowercaseid", @@ -403,8 +460,20 @@ def test_config_create_edge_malformed( "tags.building=43 and tags.environment='test'", randint(0, 100), None, + None, + 'test_config_generic_metrics.json', + None + ), + ( + "mixedCaseId", + mock_target["entity"], + None, + None, + None, + ['key1=value1'], + None, + ['mymetric1=select deviceId from devices where tags.location=''US''', 'mymetric2=select *'] ), - ("mixedCaseId", mock_target["entity"], None, None, None), ], ) def test_config_create_adm( @@ -412,12 +481,14 @@ def test_config_create_adm( fixture_cmd, serviceclient, sample_config_adm, - sample_config_metrics, config_id, hub_name, target_condition, priority, labels, + custom_labels, + metrics, + custom_metric_queries ): contentKey = ( @@ -438,7 +509,9 @@ def test_config_create_adm( target_condition=target_condition, priority=priority, labels=labels, - metrics=sample_config_metrics[1], + custom_labels=custom_labels, + metrics=metrics, + custom_metric_queries=custom_metric_queries ) args = serviceclient.call_args @@ -451,7 +524,11 @@ def test_config_create_adm( assert body["id"] == config_id.lower() assert body.get("targetCondition") == target_condition assert body.get("priority") == priority - assert body.get("labels") == evaluate_literal(labels, dict) + + if labels: + assert body.get("labels") == evaluate_literal(labels, dict) + elif custom_labels: + assert body.get("labels") == validate_key_value_pairs(";".join(custom_labels)) if sample_config_adm[0].endswith("Inline"): assert ( @@ -466,7 +543,10 @@ def test_config_create_adm( ] ) - self._assert_config_metrics_request(sample_config_metrics, body) + if metrics: + assert body.get("metrics") == json.loads(read_file_content(metrics))['metrics'] + elif custom_metric_queries: + assert body.get("metrics")['queries'] == validate_key_value_pairs(";".join(custom_metric_queries)) def _assert_config_metrics_request(self, sample_config_metrics, body): if sample_config_metrics[0]: @@ -487,7 +567,7 @@ def _assert_config_metrics_request(self, sample_config_metrics, body): assert body["metrics"] == {} @pytest.mark.parametrize( - "config_id, hub_name, target_condition, priority, labels", + "config_id, hub_name, target_condition, priority, labels, custom_labels, metrics, custom_metric_queries", [ ( "lowercaseid", @@ -495,6 +575,19 @@ def _assert_config_metrics_request(self, sample_config_metrics, body): "tags.building=43 and tags.environment='test'", randint(0, 100), None, + None, + 'test_config_generic_metrics.json', + None, + ), + ( + "UPPERCASEID", + mock_target["entity"], + "tags.building=43 and tags.environment='test'", + randint(0, 100), + None, + ['key1=value1'], + None, + ['mymetric1=select deviceId from devices where tags.location=''US''', 'mymetric2=select *'] ) ], ) @@ -503,20 +596,26 @@ def test_config_create_adm_invalid( fixture_cmd, serviceclient, config_id, + custom_labels, hub_name, target_condition, priority, labels, + metrics, + custom_metric_queries ): with pytest.raises(CLIError) as exc1: subject.iot_hub_configuration_create( cmd=fixture_cmd, config_id=config_id, + custom_labels=custom_labels, hub_name=hub_name, content=get_context_path(__file__, "test_edge_deployment.json"), target_condition=target_condition, priority=priority, labels=labels, + metrics=metrics, + custom_metric_queries=custom_metric_queries ) # API does not support both deviceContent and moduleContent at the same time. @@ -527,9 +626,12 @@ def test_config_create_adm_invalid( config_id=config_id, hub_name=hub_name, content=content, + custom_labels=custom_labels, target_condition=target_condition, priority=priority, labels=labels, + metrics=metrics, + custom_metric_queries=custom_metric_queries ) for exc in [exc1, exc2]: @@ -546,9 +648,12 @@ def test_config_create_adm_invalid( config_id=config_id, hub_name=hub_name, content=content, + custom_labels=custom_labels, target_condition=target_condition, priority=priority, labels=labels, + metrics=metrics, + custom_metric_queries=custom_metric_queries ) assert ( @@ -557,7 +662,7 @@ def test_config_create_adm_invalid( ) @pytest.mark.parametrize( - "config_id, hub_name, target_condition, priority, labels", + "config_id, hub_name, target_condition, priority, labels, custom_labels, metrics, custom_metric_queries", [ ( "lowercaseid", @@ -565,6 +670,19 @@ def test_config_create_adm_invalid( "tags.building=43 and tags.environment='test'", randint(0, 100), None, + None, + 'test_config_generic_metrics.json', + None, + ), + ( + "UPPERCASEID", + mock_target["entity"], + "tags.building=43 and tags.environment='test'", + randint(0, 100), + None, + ['key1=value1'], + None, + ['mymetric1=select deviceId from devices where tags.location=''US''', 'mymetric2=select *'] ) ], ) @@ -578,6 +696,9 @@ def test_config_create_error( target_condition, priority, labels, + custom_labels, + metrics, + custom_metric_queries ): with pytest.raises(CLIError): subject.iot_edge_deployment_create( @@ -588,6 +709,9 @@ def test_config_create_error( target_condition=target_condition, priority=priority, labels=labels, + custom_labels=custom_labels, + metrics=metrics, + custom_metric_queries=custom_metric_queries )