From 4d7d12b9862c08e654312daee5e9f3503d46718c Mon Sep 17 00:00:00 2001 From: Kathy Tran Date: Tue, 6 Feb 2024 10:58:17 -0500 Subject: [PATCH] Merge branch release/1.15.0 into develop (#483) * Split workflow executions in half if request body size is too big (#481) https://ucsc-cgl.atlassian.net/browse/SEAB-6183 * Update artifacts * Reset version * Aggregate execution metrics by status (#482) https://ucsc-cgl.atlassian.net/browse/SEAB-6199 * Aggregate execution metrics by status * Abstract out ExecutionsRequestBodyAggregator and fix tests * Update artifacts * Reset version --- THIRD-PARTY-LICENSES.txt | 12 +- metricsaggregator/pom.xml | 5 + .../client/cli/TerraMetricsSubmitter.java | 59 ++- .../helper/AggregationHelper.java | 16 +- .../helper/CostAggregator.java | 11 - .../helper/CpuAggregator.java | 11 - .../helper/ExecutionAggregator.java | 76 +--- .../helper/ExecutionStatusAggregator.java | 103 ++++- .../helper/ExecutionTimeAggregator.java | 12 - .../ExecutionsRequestBodyAggregator.java | 89 +++++ .../helper/MemoryAggregator.java | 11 - .../helper/ValidationStatusAggregator.java | 2 +- .../client/cli/MetricsAggregatorClientIT.java | 376 +++++++++++++----- .../helper/AggregationHelperTest.java | 289 -------------- .../helper/ExecutionStatusAggregatorTest.java | 322 ++++++++++++++- pom.xml | 2 +- 16 files changed, 833 insertions(+), 563 deletions(-) create mode 100644 metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/ExecutionsRequestBodyAggregator.java diff --git a/THIRD-PARTY-LICENSES.txt b/THIRD-PARTY-LICENSES.txt index 8090106c..5fbcda4f 100644 --- a/THIRD-PARTY-LICENSES.txt +++ b/THIRD-PARTY-LICENSES.txt @@ -131,10 +131,10 @@ Lists of 417 third-party dependencies. (The Apache Software License, Version 2.0) docker-java-core (com.github.docker-java:docker-java-core:3.3.0 - https://github.com/docker-java/docker-java) (The Apache Software License, Version 2.0) docker-java-transport (com.github.docker-java:docker-java-transport:3.3.0 - https://github.com/docker-java/docker-java) (The Apache Software License, Version 2.0) docker-java-transport-httpclient5 (com.github.docker-java:docker-java-transport-httpclient5:3.3.0 - https://github.com/docker-java/docker-java) - (Apache Software License, Version 2.0) dockstore-common (io.dockstore:dockstore-common:1.15.0-rc.0 - no url defined) - (Apache Software License, Version 2.0) dockstore-integration-testing (io.dockstore:dockstore-integration-testing:1.15.0-rc.0 - no url defined) - (Apache Software License, Version 2.0) dockstore-language-plugin-parent (io.dockstore:dockstore-language-plugin-parent:1.15.0-rc.0 - no url defined) - (Apache Software License, Version 2.0) dockstore-webservice (io.dockstore:dockstore-webservice:1.15.0-rc.0 - no url defined) + (Apache Software License, Version 2.0) dockstore-common (io.dockstore:dockstore-common:1.15.0-rc.2 - no url defined) + (Apache Software License, Version 2.0) dockstore-integration-testing (io.dockstore:dockstore-integration-testing:1.15.0-rc.2 - no url defined) + (Apache Software License, Version 2.0) dockstore-language-plugin-parent (io.dockstore:dockstore-language-plugin-parent:1.15.0-rc.2 - no url defined) + (Apache Software License, Version 2.0) dockstore-webservice (io.dockstore:dockstore-webservice:1.15.0-rc.2 - no url defined) (Apache License 2.0) Dropwizard (io.dropwizard:dropwizard-core:4.0.2 - http://www.dropwizard.io/4.0.2/dropwizard-bom/dropwizard-dependencies/dropwizard-parent/dropwizard-core) (Apache License 2.0) Dropwizard Asset Bundle (io.dropwizard:dropwizard-assets:4.0.2 - http://www.dropwizard.io/4.0.2/dropwizard-bom/dropwizard-dependencies/dropwizard-parent/dropwizard-assets) (Apache License 2.0) Dropwizard Authentication (io.dropwizard:dropwizard-auth:4.0.2 - http://www.dropwizard.io/4.0.2/dropwizard-bom/dropwizard-dependencies/dropwizard-parent/dropwizard-auth) @@ -354,7 +354,7 @@ Lists of 417 third-party dependencies. (Apache License, Version 2.0) Objenesis (org.objenesis:objenesis:3.2 - http://objenesis.org/objenesis) (The Apache Software License, Version 2.0) okhttp (com.squareup.okhttp3:okhttp:4.10.0 - https://square.github.io/okhttp/) (The Apache Software License, Version 2.0) okio (com.squareup.okio:okio-jvm:3.0.0 - https://github.com/square/okio/) - (Apache Software License, Version 2.0) openapi-java-client (io.dockstore:openapi-java-client:1.15.0-rc.0 - no url defined) + (Apache Software License, Version 2.0) openapi-java-client (io.dockstore:openapi-java-client:1.15.0-rc.2 - no url defined) (The Apache License, Version 2.0) OpenCensus (io.opencensus:opencensus-api:0.31.0 - https://github.com/census-instrumentation/opencensus-java) (Apache 2) opencsv (com.opencsv:opencsv:5.7.1 - http://opencsv.sf.net) (Apache 2.0) optics (io.circe:circe-optics_2.13:0.14.1 - https://github.com/circe/circe-optics) @@ -395,7 +395,7 @@ Lists of 417 third-party dependencies. (Apache License 2.0) swagger-core-jakarta (io.swagger.core.v3:swagger-core-jakarta:2.2.15 - https://github.com/swagger-api/swagger-core/modules/swagger-core-jakarta) (Apache License 2.0) swagger-integration-jakarta (io.swagger.core.v3:swagger-integration-jakarta:2.2.15 - https://github.com/swagger-api/swagger-core/modules/swagger-integration-jakarta) (Apache Software License, Version 2.0) swagger-java-bitbucket-client (io.dockstore:swagger-java-bitbucket-client:2.0.3 - no url defined) - (Apache Software License, Version 2.0) swagger-java-client (io.dockstore:swagger-java-client:1.15.0-rc.0 - no url defined) + (Apache Software License, Version 2.0) swagger-java-client (io.dockstore:swagger-java-client:1.15.0-rc.2 - no url defined) (Apache Software License, Version 2.0) swagger-java-discourse-client (io.dockstore:swagger-java-discourse-client:2.0.1 - no url defined) (Apache Software License, Version 2.0) swagger-java-quay-client (io.dockstore:swagger-java-quay-client:2.0.2 - no url defined) (Apache Software License, Version 2.0) swagger-java-sam-client (io.dockstore:swagger-java-sam-client:2.0.2 - no url defined) diff --git a/metricsaggregator/pom.xml b/metricsaggregator/pom.xml index 362f0e83..09695036 100644 --- a/metricsaggregator/pom.xml +++ b/metricsaggregator/pom.xml @@ -156,6 +156,10 @@ org.apache.commons commons-csv + + com.google.guava + guava + org.junit.jupiter junit-jupiter-api @@ -292,6 +296,7 @@ org.javamoney.moneta:moneta-core ch.qos.logback:logback-classic ch.qos.logback:logback-core + com.google.guava:guava diff --git a/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/client/cli/TerraMetricsSubmitter.java b/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/client/cli/TerraMetricsSubmitter.java index 70dc2df4..24cabbc3 100644 --- a/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/client/cli/TerraMetricsSubmitter.java +++ b/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/client/cli/TerraMetricsSubmitter.java @@ -5,6 +5,8 @@ import static io.dockstore.utils.ExceptionHandler.exceptionMessage; import static java.util.stream.Collectors.groupingBy; +import com.google.common.collect.Lists; +import com.google.common.math.IntMath; import io.dockstore.common.Partner; import io.dockstore.metricsaggregator.MetricsAggregatorConfig; import io.dockstore.metricsaggregator.client.cli.CommandLineArgs.SubmitTerraMetrics; @@ -22,6 +24,7 @@ import java.io.FileReader; import java.io.FileWriter; import java.io.IOException; +import java.math.RoundingMode; import java.nio.charset.StandardCharsets; import java.time.Instant; import java.time.LocalDateTime; @@ -44,6 +47,7 @@ import org.apache.commons.csv.CSVPrinter; import org.apache.commons.csv.CSVRecord; import org.apache.commons.lang3.StringUtils; +import org.apache.http.HttpStatus; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -167,21 +171,60 @@ private void submitWorkflowExecutions(String sourceUrl, List workflow } final SourceUrlTrsInfo sourceUrlTrsInfo = sourceUrlToSourceUrlTrsInfo.get(sourceUrl); - final List workflowExecutionsToSubmit = workflowMetricRecords.stream() + List workflowExecutionsToSubmit = workflowMetricRecords.stream() .map(workflowExecution -> getTerraWorkflowExecutionFromCsvRecord(workflowExecution, sourceUrlTrsInfo.sourceUrl(), skippedExecutionsCsvPrinter)) .filter(Optional::isPresent) .map(Optional::get) .toList(); - final ExecutionsRequestBody executionsRequestBody = new ExecutionsRequestBody().runExecutions(workflowExecutionsToSubmit); + String description = "Submitted using the metricsaggregator's submit-terra-metrics command"; + if (StringUtils.isNotBlank(submitTerraMetricsCommand.getDescription())) { + description += ". " + submitTerraMetricsCommand.getDescription(); + } + + executionMetricsPost(workflowExecutionsToSubmit, sourceUrlTrsInfo, description, extendedGa4GhApi, workflowMetricRecords, skippedExecutionsCsvPrinter); + } + + /** + * Submit Terra workflow executions to Dockstore. + * If the request fails with a 413 Request Entity Too Large and there are more than one execution to submit, the function halves the number of workflow executions to submit then re-attempts submission + * until it's successful or a non-413 error occurs. + * @param workflowExecutionsToSubmit + * @param sourceUrlTrsInfo + * @param description + * @param extendedGa4GhApi + * @param workflowMetricRecords + * @param skippedExecutionsCsvPrinter + */ + private void executionMetricsPost(List workflowExecutionsToSubmit, SourceUrlTrsInfo sourceUrlTrsInfo, String description, ExtendedGa4GhApi extendedGa4GhApi, List workflowMetricRecords, CSVPrinter skippedExecutionsCsvPrinter) { try { - String description = "Submitted using the metricsaggregator's submit-terra-metrics command"; - if (StringUtils.isNotBlank(submitTerraMetricsCommand.getDescription())) { - description += ". " + submitTerraMetricsCommand.getDescription(); - } - extendedGa4GhApi.executionMetricsPost(executionsRequestBody, Partner.TERRA.toString(), sourceUrlTrsInfo.trsId(), sourceUrlTrsInfo.version(), description); + extendedGa4GhApi.executionMetricsPost(new ExecutionsRequestBody().runExecutions(workflowExecutionsToSubmit), Partner.TERRA.toString(), sourceUrlTrsInfo.trsId(), + sourceUrlTrsInfo.version(), description); numberOfExecutionsSubmitted.addAndGet(workflowMetricRecords.size()); } catch (ApiException e) { - logSkippedExecutions(sourceUrlTrsInfo.sourceUrl(), workflowMetricRecords, String.format("Could not submit execution metrics to Dockstore for workflow %s: %s", sourceUrlTrsInfo, e.getMessage()), skippedExecutionsCsvPrinter, false); + if (e.getCode() == HttpStatus.SC_REQUEST_TOO_LONG) { + // One execution is too large, not much that can be done, so log and skip it + if (workflowExecutionsToSubmit.size() == 1) { + logSkippedExecutions(sourceUrlTrsInfo.sourceUrl(), workflowMetricRecords, + String.format("Could not submit execution metric to Dockstore for workflow %s. Single execution is too large: %s", sourceUrlTrsInfo, + e.getMessage()), skippedExecutionsCsvPrinter, false); + } else { + int partitionSize = IntMath.divide(workflowExecutionsToSubmit.size(), 2, RoundingMode.UP); + List> workflowExecutionsToSubmitPartitions = Lists.partition(workflowExecutionsToSubmit, + partitionSize); + LOG.info( + "Request body too large, dividing list of {} workflow executions in half with partition size {} and re-attempting", + workflowExecutionsToSubmit.size(), partitionSize); + for (List partition : workflowExecutionsToSubmitPartitions) { + LOG.info("Re-attempting with {} workflow executions", partition.size()); + executionMetricsPost(partition, sourceUrlTrsInfo, description, extendedGa4GhApi, workflowMetricRecords, + skippedExecutionsCsvPrinter); + } + } + } else { + logSkippedExecutions(sourceUrlTrsInfo.sourceUrl(), workflowMetricRecords, + String.format("Could not submit execution metrics to Dockstore for workflow %s: %s", sourceUrlTrsInfo, + e.getMessage()), skippedExecutionsCsvPrinter, false); + } } } diff --git a/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/AggregationHelper.java b/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/AggregationHelper.java index 52b7f8f0..3d581fe6 100644 --- a/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/AggregationHelper.java +++ b/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/AggregationHelper.java @@ -26,13 +26,7 @@ public static Optional getAggregatedMetrics(ExecutionsRequestBody allSu // Set run metrics Optional aggregatedExecutionStatus = new ExecutionStatusAggregator().getAggregatedMetricFromAllSubmissions(allSubmissions); boolean containsRunMetrics = aggregatedExecutionStatus.isPresent(); - if (aggregatedExecutionStatus.isPresent()) { - aggregatedMetrics.setExecutionStatusCount(aggregatedExecutionStatus.get()); - new ExecutionTimeAggregator().getAggregatedMetricFromAllSubmissions(allSubmissions).ifPresent(aggregatedMetrics::setExecutionTime); - new CpuAggregator().getAggregatedMetricFromAllSubmissions(allSubmissions).ifPresent(aggregatedMetrics::setCpu); - new MemoryAggregator().getAggregatedMetricFromAllSubmissions(allSubmissions).ifPresent(aggregatedMetrics::setMemory); - new CostAggregator().getAggregatedMetricFromAllSubmissions(allSubmissions).ifPresent(aggregatedMetrics::setCost); - } + aggregatedExecutionStatus.ifPresent(aggregatedMetrics::setExecutionStatusCount); // Set validation metrics Optional aggregatedValidationStatus = new ValidationStatusAggregator().getAggregatedMetricFromAllSubmissions(allSubmissions); @@ -57,13 +51,7 @@ public static Optional getAggregatedMetrics(List aggregatedMet // Set run metrics Optional aggregatedExecutionStatus = new ExecutionStatusAggregator().getAggregatedMetricFromMetricsList(aggregatedMetrics); boolean containsRunMetrics = aggregatedExecutionStatus.isPresent(); - if (aggregatedExecutionStatus.isPresent()) { - overallMetrics.setExecutionStatusCount(aggregatedExecutionStatus.get()); - new ExecutionTimeAggregator().getAggregatedMetricFromMetricsList(aggregatedMetrics).ifPresent(overallMetrics::setExecutionTime); - new CpuAggregator().getAggregatedMetricFromMetricsList(aggregatedMetrics).ifPresent(overallMetrics::setCpu); - new MemoryAggregator().getAggregatedMetricFromMetricsList(aggregatedMetrics).ifPresent(overallMetrics::setMemory); - new CostAggregator().getAggregatedMetricFromMetricsList(aggregatedMetrics).ifPresent(overallMetrics::setCost); - } + aggregatedExecutionStatus.ifPresent(overallMetrics::setExecutionStatusCount); // Set validation metrics Optional aggregatedValidationStatus = new ValidationStatusAggregator().getAggregatedMetricFromMetricsList(aggregatedMetrics); diff --git a/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/CostAggregator.java b/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/CostAggregator.java index f46207cf..12002d0c 100644 --- a/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/CostAggregator.java +++ b/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/CostAggregator.java @@ -6,8 +6,6 @@ import io.dockstore.metricsaggregator.MoneyStatistics; import io.dockstore.openapi.client.model.Cost; import io.dockstore.openapi.client.model.CostMetric; -import io.dockstore.openapi.client.model.ExecutionsRequestBody; -import io.dockstore.openapi.client.model.Metrics; import io.dockstore.openapi.client.model.RunExecution; import io.dockstore.openapi.client.model.TaskExecutions; import java.util.List; @@ -16,21 +14,12 @@ import org.javamoney.moneta.Money; public class CostAggregator implements ExecutionAggregator { - @Override - public CostMetric getMetricFromMetrics(Metrics metrics) { - return metrics.getCost(); - } @Override public Cost getMetricFromExecution(RunExecution execution) { return execution.getCost(); } - @Override - public List getExecutionsFromExecutionRequestBody(ExecutionsRequestBody executionsRequestBody) { - return executionsRequestBody.getRunExecutions(); - } - @Override public Optional getWorkflowExecutionFromTaskExecutions(TaskExecutions taskExecutionsForOneWorkflowRun) { final List taskExecutions = taskExecutionsForOneWorkflowRun.getTaskExecutions(); diff --git a/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/CpuAggregator.java b/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/CpuAggregator.java index 8fdbb6c1..d8799f92 100644 --- a/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/CpuAggregator.java +++ b/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/CpuAggregator.java @@ -2,8 +2,6 @@ import io.dockstore.metricsaggregator.DoubleStatistics; import io.dockstore.openapi.client.model.CpuMetric; -import io.dockstore.openapi.client.model.ExecutionsRequestBody; -import io.dockstore.openapi.client.model.Metrics; import io.dockstore.openapi.client.model.RunExecution; import io.dockstore.openapi.client.model.TaskExecutions; import java.util.List; @@ -15,21 +13,12 @@ * @return */ public class CpuAggregator implements ExecutionAggregator { - @Override - public CpuMetric getMetricFromMetrics(Metrics metrics) { - return metrics.getCpu(); - } @Override public Integer getMetricFromExecution(RunExecution execution) { return execution.getCpuRequirements(); } - @Override - public List getExecutionsFromExecutionRequestBody(ExecutionsRequestBody executionsRequestBody) { - return executionsRequestBody.getRunExecutions(); - } - @Override public Optional getWorkflowExecutionFromTaskExecutions(TaskExecutions taskExecutionsForOneWorkflowRun) { final List taskExecutions = taskExecutionsForOneWorkflowRun.getTaskExecutions(); diff --git a/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/ExecutionAggregator.java b/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/ExecutionAggregator.java index 440ca377..4fd18256 100644 --- a/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/ExecutionAggregator.java +++ b/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/ExecutionAggregator.java @@ -1,30 +1,19 @@ package io.dockstore.metricsaggregator.helper; import io.dockstore.openapi.client.model.Execution; -import io.dockstore.openapi.client.model.ExecutionsRequestBody; -import io.dockstore.openapi.client.model.Metrics; import io.dockstore.openapi.client.model.TaskExecutions; -import java.util.ArrayList; import java.util.List; import java.util.Objects; import java.util.Optional; -import java.util.stream.Collectors; /** - * An interface defining the methods needed to aggregate workflow executions into aggregated metrics to submit to Dockstore. + * An interface defining the methods needed to aggregate workflow executions into aggregated metrics. * @param The type of execution, example: RunExecution or ValidationExecution, that contains the metric to aggregate * @param The aggregated metric from the Metrics class, a class containing multiple types of aggregated metrics * @param The execution metric to aggregate from the Execution */ public interface ExecutionAggregator { - /** - * Get the aggregated metric associated with the metric type from the aggregated Metrics class, which contains multiple types of aggregated metrics. - * @param metrics - * @return - */ - M getMetricFromMetrics(Metrics metrics); - /** * Get the metric to aggregate from a single workflow execution. * @param execution @@ -32,13 +21,6 @@ public interface ExecutionAggregator { */ E getMetricFromExecution(T execution); - /** - * Get the executions containing the metric to aggregate from ExecutionsRequestBody. - * @param executionsRequestBody - * @return - */ - List getExecutionsFromExecutionRequestBody(ExecutionsRequestBody executionsRequestBody); - /** * Aggregates TaskExecutions that belong to a single workflow run into a workflow-level RunExecution * @param taskExecutionsForOneWorkflowRun @@ -71,60 +53,4 @@ default List getNonNullMetricsFromExecutions(List executions) { .filter(Objects::nonNull) .toList(); } - - /** - * Aggregate metrics from all submissions in the ExecutionsRequestBody. - * This method uses the runExecutions, taskExecutions, and aggregatedExecutions from ExecutionRequestBody to create an aggregated metric. - * Metrics are aggregated by: - *
    - *
  1. Aggregating task executions, provided via ExecutionRequestBody.taskExecutions, into workflow executions.
  2. - *
  3. Aggregating workflow executions,submitted via ExecutionRequestBody.runExecutions and workflow executions that were aggregated from task executions, into an aggregated metric. - *
  4. Aggregating the list of aggregated metrics, submitted via ExecutionRequestBody.aggregatedExecutions and the aggregated metric that was aggregated from workflow executions, into one aggregated metric.
  5. - *
- * @param allSubmissions - * @return - */ - default Optional getAggregatedMetricFromAllSubmissions(ExecutionsRequestBody allSubmissions) { - final List workflowExecutions = new ArrayList<>(getExecutionsFromExecutionRequestBody(allSubmissions)); - - // If task executions are present, calculate the workflow RunExecution containing the overall workflow-level execution time for each list of tasks - if (!allSubmissions.getTaskExecutions().isEmpty()) { - final List calculatedWorkflowExecutionsFromTasks = allSubmissions.getTaskExecutions().stream() - .map(this::getWorkflowExecutionFromTaskExecutions) - .filter(Optional::isPresent) - .map(Optional::get) - .toList(); - workflowExecutions.addAll(calculatedWorkflowExecutionsFromTasks); - } - - // Get aggregated metrics that were submitted to Dockstore - List aggregatedMetrics = allSubmissions.getAggregatedExecutions().stream() - .map(this::getMetricFromMetrics) - .filter(Objects::nonNull) - .collect(Collectors.toCollection(ArrayList::new)); - - // Aggregate workflow executions into one metric and add it to the list of aggregated metrics - Optional aggregatedMetricFromWorkflowExecutions = getAggregatedMetricFromExecutions(workflowExecutions); - aggregatedMetricFromWorkflowExecutions.ifPresent(aggregatedMetrics::add); - - if (!aggregatedMetrics.isEmpty()) { - // Calculate the new aggregated metric from the list of aggregated metrics - return getAggregatedMetricsFromAggregatedMetrics(aggregatedMetrics); - } - return Optional.empty(); - } - - /** - * Given a list of Metrics, a class containing multiple types of aggregated metrics, get the metrics associated with the metric type and - * aggregate them into a metric of this type. - * @param metricsList - * @return - */ - default Optional getAggregatedMetricFromMetricsList(List metricsList) { - List specificMetrics = metricsList.stream() - .map(this::getMetricFromMetrics) - .filter(Objects::nonNull) - .toList(); - return getAggregatedMetricsFromAggregatedMetrics(specificMetrics); - } } diff --git a/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/ExecutionStatusAggregator.java b/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/ExecutionStatusAggregator.java index 4aeac71b..f75a208f 100644 --- a/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/ExecutionStatusAggregator.java +++ b/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/ExecutionStatusAggregator.java @@ -5,6 +5,7 @@ import io.dockstore.openapi.client.model.ExecutionStatusMetric; import io.dockstore.openapi.client.model.ExecutionsRequestBody; import io.dockstore.openapi.client.model.Metrics; +import io.dockstore.openapi.client.model.MetricsByStatus; import io.dockstore.openapi.client.model.RunExecution; import io.dockstore.openapi.client.model.RunExecution.ExecutionStatusEnum; import io.dockstore.openapi.client.model.TaskExecutions; @@ -20,7 +21,13 @@ /** * Aggregate Execution Status metrics by summing up the count of each Execution Status. */ -public class ExecutionStatusAggregator implements ExecutionAggregator { +public class ExecutionStatusAggregator implements + ExecutionsRequestBodyAggregator { + // Aggregators used to calculate metrics by execution status + private final ExecutionTimeAggregator executionTimeAggregator = new ExecutionTimeAggregator(); + private final CpuAggregator cpuAggregator = new CpuAggregator(); + private final MemoryAggregator memoryAggregator = new MemoryAggregator(); + private final CostAggregator costAggregator = new CostAggregator(); @Override public ExecutionStatusMetric getMetricFromMetrics(Metrics metrics) { @@ -40,10 +47,11 @@ public List getExecutionsFromExecutionRequestBody(ExecutionsReques @Override public Optional getWorkflowExecutionFromTaskExecutions(TaskExecutions taskExecutionsForOneWorkflowRun) { final List taskExecutions = taskExecutionsForOneWorkflowRun.getTaskExecutions(); + RunExecution workflowExecution = new RunExecution(); if (taskExecutions != null && taskExecutions.stream().map(RunExecution::getExecutionStatus).allMatch(Objects::nonNull)) { if (taskExecutions.stream().allMatch(taskRunExecution -> taskRunExecution.getExecutionStatus() == ExecutionStatusEnum.SUCCESSFUL)) { // All executions were successful - return Optional.of(new RunExecution().executionStatus(ExecutionStatusEnum.SUCCESSFUL)); + workflowExecution.setExecutionStatus(ExecutionStatusEnum.SUCCESSFUL); } else { // If there were failed executions, set the overall status to the most frequent failed status Optional mostFrequentFailedStatus = taskExecutions.stream() @@ -54,9 +62,15 @@ public Optional getWorkflowExecutionFromTaskExecutions(TaskExecuti .stream() .max(Entry.comparingByValue()) .map(Entry::getKey); - if (mostFrequentFailedStatus.isPresent()) { - return Optional.of(new RunExecution().executionStatus(mostFrequentFailedStatus.get())); - } + mostFrequentFailedStatus.ifPresent(workflowExecution::setExecutionStatus); + } + + if (workflowExecution.getExecutionStatus() != null) { + executionTimeAggregator.getWorkflowExecutionFromTaskExecutions(taskExecutionsForOneWorkflowRun).ifPresent(executionWithTime -> workflowExecution.setExecutionTime(executionWithTime.getExecutionTime())); + cpuAggregator.getWorkflowExecutionFromTaskExecutions(taskExecutionsForOneWorkflowRun).ifPresent(executionWithCpu -> workflowExecution.setCpuRequirements(executionWithCpu.getCpuRequirements())); + memoryAggregator.getWorkflowExecutionFromTaskExecutions(taskExecutionsForOneWorkflowRun).ifPresent(executionWithMemory -> workflowExecution.setMemoryRequirementsGB(executionWithMemory.getMemoryRequirementsGB())); + costAggregator.getWorkflowExecutionFromTaskExecutions(taskExecutionsForOneWorkflowRun).ifPresent(executionWithCost -> workflowExecution.setCost(executionWithCost.getCost())); + return Optional.of(workflowExecution); } } return Optional.empty(); @@ -65,11 +79,20 @@ public Optional getWorkflowExecutionFromTaskExecutions(TaskExecuti @Override public Optional getAggregatedMetricFromExecutions(List executions) { if (!executions.isEmpty()) { - // Calculate the status count from the workflow executions submitted - Map executionsStatusCount = executions.stream() - .map(execution -> execution.getExecutionStatus().toString()) - .collect(groupingBy(Function.identity(), Collectors.reducing(0, e -> 1, Integer::sum))); - return Optional.of(new ExecutionStatusMetric().count(executionsStatusCount)); + Map> executionsByStatus = executions.stream() + .collect(groupingBy(RunExecution::getExecutionStatus)); + + ExecutionStatusMetric executionStatusMetric = new ExecutionStatusMetric(); + executionsByStatus.forEach((status, executionsForStatus) -> { + MetricsByStatus metricsByStatus = getMetricsByStatusFromExecutions(executionsForStatus); + executionStatusMetric.getCount().put(status.toString(), metricsByStatus); + }); + + // Figure out metrics over all statuses + MetricsByStatus overallMetricsByStatus = getMetricsByStatusFromExecutions(executionsByStatus.values().stream().flatMap(Collection::stream).toList()); + executionStatusMetric.getCount().put(ExecutionStatusEnum.ALL.name(), overallMetricsByStatus); + + return Optional.of(executionStatusMetric); } return Optional.empty(); } @@ -77,14 +100,68 @@ public Optional getAggregatedMetricFromExecutions(List getAggregatedMetricsFromAggregatedMetrics(List aggregatedMetrics) { if (!aggregatedMetrics.isEmpty()) { - Map statusCount = aggregatedMetrics.stream() + Map> statusToMetricsByStatus = aggregatedMetrics.stream() .filter(Objects::nonNull) .map(executionStatusMetric -> executionStatusMetric.getCount().entrySet()) .flatMap(Collection::stream) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue, Integer::sum)); + .collect(groupingBy(Map.Entry::getKey, Collectors.mapping(Entry::getValue, Collectors.toList()))); + + if (statusToMetricsByStatus.isEmpty()) { + return Optional.empty(); + } + + ExecutionStatusMetric executionStatusMetric = new ExecutionStatusMetric(); + statusToMetricsByStatus.forEach((status, metricsForStatus) -> { + MetricsByStatus metricsByStatus = getMetricsByStatusFromMetricsByStatusList(metricsForStatus); + executionStatusMetric.getCount().put(status, metricsByStatus); + }); - return statusCount.isEmpty() ? Optional.empty() : Optional.of(new ExecutionStatusMetric().count(statusCount)); + // Calculate metrics over all statuses + // Calculate from previous ALL MetricsByStatus (aggregate using aggregated data) + List metricsByStatusesToCalculateAllStatus = statusToMetricsByStatus.get(ExecutionStatusEnum.ALL.name()); + if (metricsByStatusesToCalculateAllStatus == null) { + // If there's no ALL key, calculate from other statuses + metricsByStatusesToCalculateAllStatus = statusToMetricsByStatus.values().stream().flatMap(Collection::stream).toList(); + } + MetricsByStatus overallMetricsByStatus = getMetricsByStatusFromMetricsByStatusList(metricsByStatusesToCalculateAllStatus); + executionStatusMetric.getCount().put(ExecutionStatusEnum.ALL.name(), overallMetricsByStatus); + return Optional.of(executionStatusMetric); } return Optional.empty(); } + + /** + * Aggregate executions into a MetricsByStatus object. Assumes that all executions have the status + * @param executions + * @return + */ + private MetricsByStatus getMetricsByStatusFromExecutions(List executions) { + MetricsByStatus metricsByStatus = new MetricsByStatus() + .executionStatusCount(executions.size()); + + // Figure out metrics by status + executionTimeAggregator.getAggregatedMetricFromExecutions(executions).ifPresent(metricsByStatus::setExecutionTime); + cpuAggregator.getAggregatedMetricFromExecutions(executions).ifPresent(metricsByStatus::setCpu); + memoryAggregator.getAggregatedMetricFromExecutions(executions).ifPresent(metricsByStatus::setMemory); + costAggregator.getAggregatedMetricFromExecutions(executions).ifPresent(metricsByStatus::setCost); + return metricsByStatus; + } + + /** + * Aggregate a list of MetricsByStatus objects into a MetricsByStatus object. Assumes that all executions have the same status + * @param metricsByStatuses + * @return + */ + private MetricsByStatus getMetricsByStatusFromMetricsByStatusList(List metricsByStatuses) { + final int totalCountForStatus = metricsByStatuses.stream().map(MetricsByStatus::getExecutionStatusCount).reduce(0, Integer::sum); + MetricsByStatus metricsByStatus = new MetricsByStatus() + .executionStatusCount(totalCountForStatus); + + // Figure out metrics by status + executionTimeAggregator.getAggregatedMetricsFromAggregatedMetrics(metricsByStatuses.stream().map(MetricsByStatus::getExecutionTime).filter(Objects::nonNull).toList()).ifPresent(metricsByStatus::setExecutionTime); + cpuAggregator.getAggregatedMetricsFromAggregatedMetrics(metricsByStatuses.stream().map(MetricsByStatus::getCpu).filter(Objects::nonNull).toList()).ifPresent(metricsByStatus::setCpu); + memoryAggregator.getAggregatedMetricsFromAggregatedMetrics(metricsByStatuses.stream().map(MetricsByStatus::getMemory).filter(Objects::nonNull).toList()).ifPresent(metricsByStatus::setMemory); + costAggregator.getAggregatedMetricsFromAggregatedMetrics(metricsByStatuses.stream().map(MetricsByStatus::getCost).filter(Objects::nonNull).toList()).ifPresent(metricsByStatus::setCost); + return metricsByStatus; + } } diff --git a/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/ExecutionTimeAggregator.java b/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/ExecutionTimeAggregator.java index fe44b607..db21a386 100644 --- a/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/ExecutionTimeAggregator.java +++ b/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/ExecutionTimeAggregator.java @@ -5,8 +5,6 @@ import io.dockstore.metricsaggregator.DoubleStatistics; import io.dockstore.openapi.client.model.ExecutionTimeMetric; -import io.dockstore.openapi.client.model.ExecutionsRequestBody; -import io.dockstore.openapi.client.model.Metrics; import io.dockstore.openapi.client.model.RunExecution; import io.dockstore.openapi.client.model.TaskExecutions; import java.time.Duration; @@ -26,16 +24,6 @@ public String getMetricFromExecution(RunExecution execution) { return execution.getExecutionTime(); } - @Override - public ExecutionTimeMetric getMetricFromMetrics(Metrics metrics) { - return metrics.getExecutionTime(); - } - - @Override - public List getExecutionsFromExecutionRequestBody(ExecutionsRequestBody executionsRequestBody) { - return executionsRequestBody.getRunExecutions(); - } - @Override public Optional getWorkflowExecutionFromTaskExecutions(TaskExecutions taskExecutionsForOneWorkflowRun) { final List taskExecutions = taskExecutionsForOneWorkflowRun.getTaskExecutions(); diff --git a/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/ExecutionsRequestBodyAggregator.java b/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/ExecutionsRequestBodyAggregator.java new file mode 100644 index 00000000..895e2836 --- /dev/null +++ b/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/ExecutionsRequestBodyAggregator.java @@ -0,0 +1,89 @@ +package io.dockstore.metricsaggregator.helper; + +import io.dockstore.openapi.client.model.Execution; +import io.dockstore.openapi.client.model.ExecutionsRequestBody; +import io.dockstore.openapi.client.model.Metrics; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.stream.Collectors; + +/** + * An interface defining the methods needed to aggregate workflow executions into aggregated metrics from ExecutionsRequestBody S3 objects to submit to Dockstore. + * @param The type of execution, example: RunExecution or ValidationExecution, that contains the metric to aggregate + * @param The aggregated metric from the Metrics class, a class containing multiple types of aggregated metrics + * @param The execution metric to aggregate from the Execution + */ +public interface ExecutionsRequestBodyAggregator extends ExecutionAggregator { + + /** + * Get the aggregated metric associated with the metric type from the aggregated Metrics class, which contains multiple types of aggregated metrics. + * @param metrics + * @return + */ + M getMetricFromMetrics(Metrics metrics); + + /** + * Get the executions containing the metric to aggregate from ExecutionsRequestBody. + * @param executionsRequestBody + * @return + */ + List getExecutionsFromExecutionRequestBody(ExecutionsRequestBody executionsRequestBody); + + /** + * Aggregate metrics from all submissions in the ExecutionsRequestBody. + * This method uses the runExecutions, taskExecutions, and aggregatedExecutions from ExecutionRequestBody to create an aggregated metric. + * Metrics are aggregated by: + *
    + *
  1. Aggregating task executions, provided via ExecutionRequestBody.taskExecutions, into workflow executions.
  2. + *
  3. Aggregating workflow executions,submitted via ExecutionRequestBody.runExecutions and workflow executions that were aggregated from task executions, into an aggregated metric. + *
  4. Aggregating the list of aggregated metrics, submitted via ExecutionRequestBody.aggregatedExecutions and the aggregated metric that was aggregated from workflow executions, into one aggregated metric.
  5. + *
+ * @param allSubmissions + * @return + */ + default Optional getAggregatedMetricFromAllSubmissions(ExecutionsRequestBody allSubmissions) { + final List workflowExecutions = new ArrayList<>(getExecutionsFromExecutionRequestBody(allSubmissions)); + + // If task executions are present, calculate the workflow RunExecution containing the overall workflow-level execution time for each list of tasks + if (!allSubmissions.getTaskExecutions().isEmpty()) { + final List calculatedWorkflowExecutionsFromTasks = allSubmissions.getTaskExecutions().stream() + .map(this::getWorkflowExecutionFromTaskExecutions) + .filter(Optional::isPresent) + .map(Optional::get) + .toList(); + workflowExecutions.addAll(calculatedWorkflowExecutionsFromTasks); + } + + // Get aggregated metrics that were submitted to Dockstore + List aggregatedMetrics = allSubmissions.getAggregatedExecutions().stream() + .map(this::getMetricFromMetrics) + .filter(Objects::nonNull) + .collect(Collectors.toCollection(ArrayList::new)); + + // Aggregate workflow executions into one metric and add it to the list of aggregated metrics + Optional aggregatedMetricFromWorkflowExecutions = getAggregatedMetricFromExecutions(workflowExecutions); + aggregatedMetricFromWorkflowExecutions.ifPresent(aggregatedMetrics::add); + + if (!aggregatedMetrics.isEmpty()) { + // Calculate the new aggregated metric from the list of aggregated metrics + return getAggregatedMetricsFromAggregatedMetrics(aggregatedMetrics); + } + return Optional.empty(); + } + + /** + * Given a list of Metrics, a class containing multiple types of aggregated metrics, get the metrics associated with the metric type and + * aggregate them into a metric of this type. + * @param metricsList + * @return + */ + default Optional getAggregatedMetricFromMetricsList(List metricsList) { + List specificMetrics = metricsList.stream() + .map(this::getMetricFromMetrics) + .filter(Objects::nonNull) + .toList(); + return getAggregatedMetricsFromAggregatedMetrics(specificMetrics); + } +} diff --git a/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/MemoryAggregator.java b/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/MemoryAggregator.java index 25c16e1a..bc8cafda 100644 --- a/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/MemoryAggregator.java +++ b/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/MemoryAggregator.java @@ -1,9 +1,7 @@ package io.dockstore.metricsaggregator.helper; import io.dockstore.metricsaggregator.DoubleStatistics; -import io.dockstore.openapi.client.model.ExecutionsRequestBody; import io.dockstore.openapi.client.model.MemoryMetric; -import io.dockstore.openapi.client.model.Metrics; import io.dockstore.openapi.client.model.RunExecution; import io.dockstore.openapi.client.model.TaskExecutions; import java.util.List; @@ -14,21 +12,12 @@ * Aggregate Memory metrics by calculating the minimum, maximum, and average. */ public class MemoryAggregator implements ExecutionAggregator { - @Override - public MemoryMetric getMetricFromMetrics(Metrics metrics) { - return metrics.getMemory(); - } @Override public Double getMetricFromExecution(RunExecution execution) { return execution.getMemoryRequirementsGB(); } - @Override - public List getExecutionsFromExecutionRequestBody(ExecutionsRequestBody executionsRequestBody) { - return executionsRequestBody.getRunExecutions(); - } - @Override public Optional getWorkflowExecutionFromTaskExecutions(TaskExecutions taskExecutionsForOneWorkflowRun) { final List taskExecutions = taskExecutionsForOneWorkflowRun.getTaskExecutions(); diff --git a/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/ValidationStatusAggregator.java b/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/ValidationStatusAggregator.java index c2fb21f4..b55775d1 100644 --- a/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/ValidationStatusAggregator.java +++ b/metricsaggregator/src/main/java/io/dockstore/metricsaggregator/helper/ValidationStatusAggregator.java @@ -26,7 +26,7 @@ * each validator tool version. */ public class ValidationStatusAggregator implements - ExecutionAggregator { + ExecutionsRequestBodyAggregator { @Override public ValidationStatusMetric getMetricFromMetrics(Metrics metrics) { diff --git a/metricsaggregator/src/test/java/io/dockstore/metricsaggregator/client/cli/MetricsAggregatorClientIT.java b/metricsaggregator/src/test/java/io/dockstore/metricsaggregator/client/cli/MetricsAggregatorClientIT.java index c4779134..7a8e7994 100644 --- a/metricsaggregator/src/test/java/io/dockstore/metricsaggregator/client/cli/MetricsAggregatorClientIT.java +++ b/metricsaggregator/src/test/java/io/dockstore/metricsaggregator/client/cli/MetricsAggregatorClientIT.java @@ -27,6 +27,7 @@ import static io.dockstore.metricsaggregator.common.TestUtilities.createValidationExecution; import static io.dockstore.metricsaggregator.common.TestUtilities.generateExecutionId; import static io.dockstore.openapi.client.model.RunExecution.ExecutionStatusEnum.ABORTED; +import static io.dockstore.openapi.client.model.RunExecution.ExecutionStatusEnum.ALL; import static io.dockstore.openapi.client.model.RunExecution.ExecutionStatusEnum.FAILED; import static io.dockstore.openapi.client.model.RunExecution.ExecutionStatusEnum.FAILED_RUNTIME_INVALID; import static io.dockstore.openapi.client.model.RunExecution.ExecutionStatusEnum.FAILED_SEMANTIC_INVALID; @@ -63,6 +64,7 @@ import io.dockstore.openapi.client.model.ExecutionsRequestBody; import io.dockstore.openapi.client.model.MemoryMetric; import io.dockstore.openapi.client.model.Metrics; +import io.dockstore.openapi.client.model.MetricsByStatus; import io.dockstore.openapi.client.model.RunExecution; import io.dockstore.openapi.client.model.TaskExecutions; import io.dockstore.openapi.client.model.ValidationExecution; @@ -77,7 +79,6 @@ import java.io.IOException; import java.time.Instant; import java.util.List; -import java.util.Map; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; @@ -199,33 +200,88 @@ void testAggregateMetrics() { // This version now has two execution metrics data for it. Verify that the aggregated metrics are correct assertEquals(1, platform1Metrics.getExecutionStatusCount().getNumberOfSuccessfulExecutions()); assertEquals(1, platform1Metrics.getExecutionStatusCount().getNumberOfFailedExecutions()); - assertEquals(1, platform1Metrics.getExecutionStatusCount().getCount().get(SUCCESSFUL.name())); - assertEquals(1, platform1Metrics.getExecutionStatusCount().getCount().get(FAILED_RUNTIME_INVALID.name())); + assertEquals(2, platform1Metrics.getExecutionStatusCount().getCount().get(ALL.name()).getExecutionStatusCount()); + assertEquals(1, platform1Metrics.getExecutionStatusCount().getCount().get(SUCCESSFUL.name()).getExecutionStatusCount()); + assertEquals(1, platform1Metrics.getExecutionStatusCount().getCount().get(FAILED_RUNTIME_INVALID.name()).getExecutionStatusCount()); assertFalse(platform1Metrics.getExecutionStatusCount().getCount().containsKey(FAILED_SEMANTIC_INVALID.name())); - assertEquals(2, platform1Metrics.getCpu().getNumberOfDataPointsForAverage()); - assertEquals(2, platform1Metrics.getCpu().getMinimum()); - assertEquals(4, platform1Metrics.getCpu().getMaximum()); - assertEquals(3, platform1Metrics.getCpu().getAverage()); - assertNull(platform1Metrics.getCpu().getUnit()); - - assertEquals(2, platform1Metrics.getMemory().getNumberOfDataPointsForAverage()); - assertEquals(2, platform1Metrics.getMemory().getMinimum()); - assertEquals(4.5, platform1Metrics.getMemory().getMaximum()); - assertEquals(3.25, platform1Metrics.getMemory().getAverage()); - assertNotNull(platform1Metrics.getMemory().getUnit()); - - assertEquals(2, platform1Metrics.getCost().getNumberOfDataPointsForAverage()); - assertEquals(2, platform1Metrics.getCost().getMinimum()); - assertEquals(2, platform1Metrics.getCost().getMaximum()); - assertEquals(2, platform1Metrics.getCost().getAverage()); - assertNotNull(platform1Metrics.getCost().getUnit()); - - assertEquals(2, platform1Metrics.getExecutionTime().getNumberOfDataPointsForAverage()); - assertEquals(1, platform1Metrics.getExecutionTime().getMinimum()); - assertEquals(300, platform1Metrics.getExecutionTime().getMaximum()); - assertEquals(150.5, platform1Metrics.getExecutionTime().getAverage()); - assertNotNull(platform1Metrics.getExecutionTime().getUnit()); + // Check metrics for ALL executions statuses + MetricsByStatus platform1AllStatusesMetrics = platform1Metrics.getExecutionStatusCount().getCount().get(ALL.name()); + assertEquals(2, platform1AllStatusesMetrics.getCpu().getNumberOfDataPointsForAverage()); + assertEquals(2, platform1AllStatusesMetrics.getCpu().getMinimum()); + assertEquals(4, platform1AllStatusesMetrics.getCpu().getMaximum()); + assertEquals(3, platform1AllStatusesMetrics.getCpu().getAverage()); + assertNull(platform1AllStatusesMetrics.getCpu().getUnit()); + + assertEquals(2, platform1AllStatusesMetrics.getMemory().getNumberOfDataPointsForAverage()); + assertEquals(2, platform1AllStatusesMetrics.getMemory().getMinimum()); + assertEquals(4.5, platform1AllStatusesMetrics.getMemory().getMaximum()); + assertEquals(3.25, platform1AllStatusesMetrics.getMemory().getAverage()); + assertNotNull(platform1AllStatusesMetrics.getMemory().getUnit()); + + assertEquals(2, platform1AllStatusesMetrics.getCost().getNumberOfDataPointsForAverage()); + assertEquals(2, platform1AllStatusesMetrics.getCost().getMinimum()); + assertEquals(2, platform1AllStatusesMetrics.getCost().getMaximum()); + assertEquals(2, platform1AllStatusesMetrics.getCost().getAverage()); + assertNotNull(platform1AllStatusesMetrics.getCost().getUnit()); + + assertEquals(2, platform1AllStatusesMetrics.getExecutionTime().getNumberOfDataPointsForAverage()); + assertEquals(1, platform1AllStatusesMetrics.getExecutionTime().getMinimum()); + assertEquals(300, platform1AllStatusesMetrics.getExecutionTime().getMaximum()); + assertEquals(150.5, platform1AllStatusesMetrics.getExecutionTime().getAverage()); + assertNotNull(platform1AllStatusesMetrics.getExecutionTime().getUnit()); + + // Check metrics for successful executions + MetricsByStatus platform1SuccessfulMetrics = platform1Metrics.getExecutionStatusCount().getCount().get(SUCCESSFUL.name()); + assertEquals(1, platform1SuccessfulMetrics.getCpu().getNumberOfDataPointsForAverage()); + assertEquals(2, platform1SuccessfulMetrics.getCpu().getMinimum()); + assertEquals(2, platform1SuccessfulMetrics.getCpu().getMaximum()); + assertEquals(2, platform1SuccessfulMetrics.getCpu().getAverage()); + assertNull(platform1SuccessfulMetrics.getCpu().getUnit()); + + assertEquals(1, platform1SuccessfulMetrics.getMemory().getNumberOfDataPointsForAverage()); + assertEquals(2, platform1SuccessfulMetrics.getMemory().getMinimum()); + assertEquals(2, platform1SuccessfulMetrics.getMemory().getMaximum()); + assertEquals(2, platform1SuccessfulMetrics.getMemory().getAverage()); + assertNotNull(platform1SuccessfulMetrics.getMemory().getUnit()); + + assertEquals(1, platform1SuccessfulMetrics.getCost().getNumberOfDataPointsForAverage()); + assertEquals(2, platform1SuccessfulMetrics.getCost().getMinimum()); + assertEquals(2, platform1SuccessfulMetrics.getCost().getMaximum()); + assertEquals(2, platform1SuccessfulMetrics.getCost().getAverage()); + assertNotNull(platform1SuccessfulMetrics.getCost().getUnit()); + + assertEquals(1, platform1SuccessfulMetrics.getExecutionTime().getNumberOfDataPointsForAverage()); + assertEquals(300, platform1SuccessfulMetrics.getExecutionTime().getMinimum()); + assertEquals(300, platform1SuccessfulMetrics.getExecutionTime().getMaximum()); + assertEquals(300, platform1SuccessfulMetrics.getExecutionTime().getAverage()); + assertNotNull(platform1SuccessfulMetrics.getExecutionTime().getUnit()); + + // Check metrics for failed runtime invalid executions + MetricsByStatus platform1FailedRuntimeInvalidMetrics = platform1Metrics.getExecutionStatusCount().getCount().get(FAILED_RUNTIME_INVALID.name()); + assertEquals(1, platform1FailedRuntimeInvalidMetrics.getCpu().getNumberOfDataPointsForAverage()); + assertEquals(4, platform1FailedRuntimeInvalidMetrics.getCpu().getMinimum()); + assertEquals(4, platform1FailedRuntimeInvalidMetrics.getCpu().getMaximum()); + assertEquals(4, platform1FailedRuntimeInvalidMetrics.getCpu().getAverage()); + assertNull(platform1FailedRuntimeInvalidMetrics.getCpu().getUnit()); + + assertEquals(1, platform1FailedRuntimeInvalidMetrics.getMemory().getNumberOfDataPointsForAverage()); + assertEquals(4.5, platform1FailedRuntimeInvalidMetrics.getMemory().getMinimum()); + assertEquals(4.5, platform1FailedRuntimeInvalidMetrics.getMemory().getMaximum()); + assertEquals(4.5, platform1FailedRuntimeInvalidMetrics.getMemory().getAverage()); + assertNotNull(platform1FailedRuntimeInvalidMetrics.getMemory().getUnit()); + + assertEquals(1, platform1FailedRuntimeInvalidMetrics.getCost().getNumberOfDataPointsForAverage()); + assertEquals(2, platform1FailedRuntimeInvalidMetrics.getCost().getMinimum()); + assertEquals(2, platform1FailedRuntimeInvalidMetrics.getCost().getMaximum()); + assertEquals(2, platform1FailedRuntimeInvalidMetrics.getCost().getAverage()); + assertNotNull(platform1FailedRuntimeInvalidMetrics.getCost().getUnit()); + + assertEquals(1, platform1FailedRuntimeInvalidMetrics.getExecutionTime().getNumberOfDataPointsForAverage()); + assertEquals(1, platform1FailedRuntimeInvalidMetrics.getExecutionTime().getMinimum()); + assertEquals(1, platform1FailedRuntimeInvalidMetrics.getExecutionTime().getMaximum()); + assertEquals(1, platform1FailedRuntimeInvalidMetrics.getExecutionTime().getAverage()); + assertNotNull(platform1FailedRuntimeInvalidMetrics.getExecutionTime().getUnit()); assertEquals(1, platform1Metrics.getValidationStatus().getValidatorTools().size()); validationInfo = platform1Metrics.getValidationStatus().getValidatorTools().get(MINIWDL.toString()); @@ -239,7 +295,7 @@ void testAggregateMetrics() { assertEquals(50d, validationInfo.getPassingRate()); assertEquals(2, validationInfo.getNumberOfRuns()); - // Submit two TaskExecutions, each one representing the task metrics for a single workflow execution + // Submit one TaskExecutions, representing the task metrics for a single workflow execution // A successful task execution that ran for 11 seconds, requires 6 CPUs and 5.5 GBs of memory. Signifies that this workflow execution only executed one task TaskExecutions taskExecutions = new TaskExecutions().taskExecutions(List.of(createRunExecution(SUCCESSFUL, "PT11S", 6, 5.5, new Cost().value(2.00), "us-central1"))); taskExecutions.setDateExecuted(Instant.now().toString()); @@ -259,33 +315,62 @@ void testAggregateMetrics() { // This version now has three submissions of execution metrics data. Verify that the aggregated metrics are correct assertEquals(2, platform1Metrics.getExecutionStatusCount().getNumberOfSuccessfulExecutions()); assertEquals(1, platform1Metrics.getExecutionStatusCount().getNumberOfFailedExecutions()); - assertEquals(2, platform1Metrics.getExecutionStatusCount().getCount().get(SUCCESSFUL.name())); - assertEquals(1, platform1Metrics.getExecutionStatusCount().getCount().get(FAILED_RUNTIME_INVALID.name())); + assertEquals(3, platform1Metrics.getExecutionStatusCount().getCount().get(ALL.name()).getExecutionStatusCount()); + assertEquals(2, platform1Metrics.getExecutionStatusCount().getCount().get(SUCCESSFUL.name()).getExecutionStatusCount()); + assertEquals(1, platform1Metrics.getExecutionStatusCount().getCount().get(FAILED_RUNTIME_INVALID.name()).getExecutionStatusCount()); assertFalse(platform1Metrics.getExecutionStatusCount().getCount().containsKey(FAILED_SEMANTIC_INVALID.name())); - assertEquals(3, platform1Metrics.getCpu().getNumberOfDataPointsForAverage()); - assertEquals(2, platform1Metrics.getCpu().getMinimum()); - assertEquals(6, platform1Metrics.getCpu().getMaximum()); - assertEquals(4, platform1Metrics.getCpu().getAverage()); - assertNull(platform1Metrics.getCpu().getUnit()); - - assertEquals(3, platform1Metrics.getMemory().getNumberOfDataPointsForAverage()); - assertEquals(2, platform1Metrics.getMemory().getMinimum()); - assertEquals(5.5, platform1Metrics.getMemory().getMaximum()); - assertEquals(4, platform1Metrics.getMemory().getAverage()); - assertNotNull(platform1Metrics.getMemory().getUnit()); - - assertEquals(3, platform1Metrics.getCost().getNumberOfDataPointsForAverage()); - assertEquals(2, platform1Metrics.getCost().getMinimum()); - assertEquals(2, platform1Metrics.getCost().getMaximum()); - assertEquals(2, platform1Metrics.getCost().getAverage()); - assertNotNull(platform1Metrics.getCost().getUnit()); - - assertEquals(3, platform1Metrics.getExecutionTime().getNumberOfDataPointsForAverage()); - assertEquals(1, platform1Metrics.getExecutionTime().getMinimum()); - assertEquals(300, platform1Metrics.getExecutionTime().getMaximum()); - assertEquals(104, platform1Metrics.getExecutionTime().getAverage()); - assertNotNull(platform1Metrics.getExecutionTime().getUnit()); + // Check metrics for ALL executions statuses + platform1AllStatusesMetrics = platform1Metrics.getExecutionStatusCount().getCount().get(ALL.name()); + assertEquals(3, platform1AllStatusesMetrics.getCpu().getNumberOfDataPointsForAverage()); + assertEquals(2, platform1AllStatusesMetrics.getCpu().getMinimum()); + assertEquals(6, platform1AllStatusesMetrics.getCpu().getMaximum()); + assertEquals(4, platform1AllStatusesMetrics.getCpu().getAverage()); + assertNull(platform1AllStatusesMetrics.getCpu().getUnit()); + + assertEquals(3, platform1AllStatusesMetrics.getMemory().getNumberOfDataPointsForAverage()); + assertEquals(2, platform1AllStatusesMetrics.getMemory().getMinimum()); + assertEquals(5.5, platform1AllStatusesMetrics.getMemory().getMaximum()); + assertEquals(4, platform1AllStatusesMetrics.getMemory().getAverage()); + assertNotNull(platform1AllStatusesMetrics.getMemory().getUnit()); + + assertEquals(3, platform1AllStatusesMetrics.getCost().getNumberOfDataPointsForAverage()); + assertEquals(2, platform1AllStatusesMetrics.getCost().getMinimum()); + assertEquals(2, platform1AllStatusesMetrics.getCost().getMaximum()); + assertEquals(2, platform1AllStatusesMetrics.getCost().getAverage()); + assertNotNull(platform1AllStatusesMetrics.getCost().getUnit()); + + assertEquals(3, platform1AllStatusesMetrics.getExecutionTime().getNumberOfDataPointsForAverage()); + assertEquals(1, platform1AllStatusesMetrics.getExecutionTime().getMinimum()); + assertEquals(300, platform1AllStatusesMetrics.getExecutionTime().getMaximum()); + assertEquals(104, platform1AllStatusesMetrics.getExecutionTime().getAverage()); + assertNotNull(platform1AllStatusesMetrics.getExecutionTime().getUnit()); + + // Only checking the metrics for the successful status because that's the one we updated + platform1SuccessfulMetrics = platform1Metrics.getExecutionStatusCount().getCount().get(SUCCESSFUL.name()); + assertEquals(2, platform1SuccessfulMetrics.getCpu().getNumberOfDataPointsForAverage()); + assertEquals(2, platform1SuccessfulMetrics.getCpu().getMinimum()); + assertEquals(6, platform1SuccessfulMetrics.getCpu().getMaximum()); + assertEquals(4, platform1SuccessfulMetrics.getCpu().getAverage()); + assertNull(platform1SuccessfulMetrics.getCpu().getUnit()); + + assertEquals(2, platform1SuccessfulMetrics.getMemory().getNumberOfDataPointsForAverage()); + assertEquals(2, platform1SuccessfulMetrics.getMemory().getMinimum()); + assertEquals(5.5, platform1SuccessfulMetrics.getMemory().getMaximum()); + assertEquals(3.75, platform1SuccessfulMetrics.getMemory().getAverage()); + assertNotNull(platform1SuccessfulMetrics.getMemory().getUnit()); + + assertEquals(2, platform1SuccessfulMetrics.getCost().getNumberOfDataPointsForAverage()); + assertEquals(2, platform1SuccessfulMetrics.getCost().getMinimum()); + assertEquals(2, platform1SuccessfulMetrics.getCost().getMaximum()); + assertEquals(2, platform1SuccessfulMetrics.getCost().getAverage()); + assertNotNull(platform1SuccessfulMetrics.getCost().getUnit()); + + assertEquals(2, platform1SuccessfulMetrics.getExecutionTime().getNumberOfDataPointsForAverage()); + assertEquals(11, platform1SuccessfulMetrics.getExecutionTime().getMinimum()); + assertEquals(300, platform1SuccessfulMetrics.getExecutionTime().getMaximum()); + assertEquals(155.5, platform1SuccessfulMetrics.getExecutionTime().getAverage()); + assertNotNull(platform1SuccessfulMetrics.getExecutionTime().getUnit()); testOverallAggregatedMetrics(version, validatorToolVersion1, validatorToolVersion2, platform1Metrics); } @@ -297,33 +382,62 @@ private static void assertAggregatedMetricsForPlatform(String platform, Workflow // Verify that the aggregated metrics are the same as the single execution for the platform assertEquals(1, platformMetrics.getExecutionStatusCount().getNumberOfSuccessfulExecutions()); assertEquals(0, platformMetrics.getExecutionStatusCount().getNumberOfFailedExecutions()); - assertEquals(1, platformMetrics.getExecutionStatusCount().getCount().get(SUCCESSFUL.name())); + assertEquals(1, platformMetrics.getExecutionStatusCount().getCount().get(ALL.name()).getExecutionStatusCount()); + assertEquals(1, platformMetrics.getExecutionStatusCount().getCount().get(SUCCESSFUL.name()).getExecutionStatusCount()); assertFalse(platformMetrics.getExecutionStatusCount().getCount().containsKey(FAILED_RUNTIME_INVALID.name())); assertFalse(platformMetrics.getExecutionStatusCount().getCount().containsKey(FAILED_SEMANTIC_INVALID.name())); - assertEquals(1, platformMetrics.getCpu().getNumberOfDataPointsForAverage()); - assertEquals(2, platformMetrics.getCpu().getMinimum()); - assertEquals(2, platformMetrics.getCpu().getMaximum()); - assertEquals(2, platformMetrics.getCpu().getAverage()); - assertNull(platformMetrics.getCpu().getUnit()); - - assertEquals(1, platformMetrics.getMemory().getNumberOfDataPointsForAverage()); - assertEquals(2, platformMetrics.getMemory().getMinimum()); - assertEquals(2, platformMetrics.getMemory().getMaximum()); - assertEquals(2, platformMetrics.getMemory().getAverage()); - assertNotNull(platformMetrics.getMemory().getUnit()); - - assertEquals(1, platformMetrics.getCost().getNumberOfDataPointsForAverage()); - assertEquals(2, platformMetrics.getCost().getMinimum()); - assertEquals(2, platformMetrics.getCost().getMaximum()); - assertEquals(2, platformMetrics.getCost().getAverage()); - assertNotNull(platformMetrics.getCost().getUnit()); - - assertEquals(1, platformMetrics.getExecutionTime().getNumberOfDataPointsForAverage()); - assertEquals(300, platformMetrics.getExecutionTime().getMinimum()); - assertEquals(300, platformMetrics.getExecutionTime().getMaximum()); - assertEquals(300, platformMetrics.getExecutionTime().getAverage()); - assertNotNull(platformMetrics.getExecutionTime().getUnit()); + // Check for ALL statuses + MetricsByStatus platformAllStatusesMetrics = platformMetrics.getExecutionStatusCount().getCount().get(ALL.name()); + assertEquals(1, platformAllStatusesMetrics.getCpu().getNumberOfDataPointsForAverage()); + assertEquals(2, platformAllStatusesMetrics.getCpu().getMinimum()); + assertEquals(2, platformAllStatusesMetrics.getCpu().getMaximum()); + assertEquals(2, platformAllStatusesMetrics.getCpu().getAverage()); + assertNull(platformAllStatusesMetrics.getCpu().getUnit()); + + assertEquals(1, platformAllStatusesMetrics.getMemory().getNumberOfDataPointsForAverage()); + assertEquals(2, platformAllStatusesMetrics.getMemory().getMinimum()); + assertEquals(2, platformAllStatusesMetrics.getMemory().getMaximum()); + assertEquals(2, platformAllStatusesMetrics.getMemory().getAverage()); + assertNotNull(platformAllStatusesMetrics.getMemory().getUnit()); + + assertEquals(1, platformAllStatusesMetrics.getCost().getNumberOfDataPointsForAverage()); + assertEquals(2, platformAllStatusesMetrics.getCost().getMinimum()); + assertEquals(2, platformAllStatusesMetrics.getCost().getMaximum()); + assertEquals(2, platformAllStatusesMetrics.getCost().getAverage()); + assertNotNull(platformAllStatusesMetrics.getCost().getUnit()); + + assertEquals(1, platformAllStatusesMetrics.getExecutionTime().getNumberOfDataPointsForAverage()); + assertEquals(300, platformAllStatusesMetrics.getExecutionTime().getMinimum()); + assertEquals(300, platformAllStatusesMetrics.getExecutionTime().getMaximum()); + assertEquals(300, platformAllStatusesMetrics.getExecutionTime().getAverage()); + assertNotNull(platformAllStatusesMetrics.getExecutionTime().getUnit()); + + // Check SUCCESSFUL status metrics + MetricsByStatus platformSuccessfulMetrics = platformMetrics.getExecutionStatusCount().getCount().get(SUCCESSFUL.name()); + assertEquals(1, platformSuccessfulMetrics.getCpu().getNumberOfDataPointsForAverage()); + assertEquals(2, platformSuccessfulMetrics.getCpu().getMinimum()); + assertEquals(2, platformSuccessfulMetrics.getCpu().getMaximum()); + assertEquals(2, platformSuccessfulMetrics.getCpu().getAverage()); + assertNull(platformSuccessfulMetrics.getCpu().getUnit()); + + assertEquals(1, platformSuccessfulMetrics.getMemory().getNumberOfDataPointsForAverage()); + assertEquals(2, platformSuccessfulMetrics.getMemory().getMinimum()); + assertEquals(2, platformSuccessfulMetrics.getMemory().getMaximum()); + assertEquals(2, platformSuccessfulMetrics.getMemory().getAverage()); + assertNotNull(platformSuccessfulMetrics.getMemory().getUnit()); + + assertEquals(1, platformSuccessfulMetrics.getCost().getNumberOfDataPointsForAverage()); + assertEquals(2, platformSuccessfulMetrics.getCost().getMinimum()); + assertEquals(2, platformSuccessfulMetrics.getCost().getMaximum()); + assertEquals(2, platformSuccessfulMetrics.getCost().getAverage()); + assertNotNull(platformSuccessfulMetrics.getCost().getUnit()); + + assertEquals(1, platformSuccessfulMetrics.getExecutionTime().getNumberOfDataPointsForAverage()); + assertEquals(300, platformSuccessfulMetrics.getExecutionTime().getMinimum()); + assertEquals(300, platformSuccessfulMetrics.getExecutionTime().getMaximum()); + assertEquals(300, platformSuccessfulMetrics.getExecutionTime().getAverage()); + assertNotNull(platformSuccessfulMetrics.getExecutionTime().getUnit()); assertEquals(1, platformMetrics.getValidationStatus().getValidatorTools().size()); final String expectedValidatorTool = submittedValidationExecution.getValidatorTool().toString(); @@ -354,30 +468,70 @@ private static void testOverallAggregatedMetrics(WorkflowVersion version, String assertNotNull(overallMetrics); assertEquals(3, overallMetrics.getExecutionStatusCount().getNumberOfSuccessfulExecutions()); assertEquals(1, overallMetrics.getExecutionStatusCount().getNumberOfFailedExecutions()); - assertEquals(3, overallMetrics.getExecutionStatusCount().getCount().get(SUCCESSFUL.name())); - assertEquals(1, platform1Metrics.getExecutionStatusCount().getCount().get(FAILED_RUNTIME_INVALID.name())); + assertEquals(4, overallMetrics.getExecutionStatusCount().getCount().get(ALL.name()).getExecutionStatusCount()); + assertEquals(3, overallMetrics.getExecutionStatusCount().getCount().get(SUCCESSFUL.name()).getExecutionStatusCount()); + assertEquals(1, platform1Metrics.getExecutionStatusCount().getCount().get(FAILED_RUNTIME_INVALID.name()).getExecutionStatusCount()); assertFalse(overallMetrics.getExecutionStatusCount().getCount().containsKey(FAILED_SEMANTIC_INVALID.name())); - // The CPU values submitted were 2, 2, 4, 6 - assertEquals(4, overallMetrics.getCpu().getNumberOfDataPointsForAverage()); - assertEquals(2, overallMetrics.getCpu().getMinimum()); - assertEquals(6, overallMetrics.getCpu().getMaximum()); - assertEquals(3.5, overallMetrics.getCpu().getAverage()); - assertNull(overallMetrics.getCpu().getUnit()); - - // The memory values submitted were 2, 2, 4.5, 5.5 - assertEquals(4, overallMetrics.getMemory().getNumberOfDataPointsForAverage()); - assertEquals(2, overallMetrics.getMemory().getMinimum()); - assertEquals(5.5, overallMetrics.getMemory().getMaximum()); - assertEquals(3.5, overallMetrics.getMemory().getAverage()); - assertNotNull(overallMetrics.getMemory().getUnit()); - - // The execution times submitted were PT5M, PT5M, PT1S, PT11S - assertEquals(4, overallMetrics.getExecutionTime().getNumberOfDataPointsForAverage()); - assertEquals(1, overallMetrics.getExecutionTime().getMinimum()); - assertEquals(300, overallMetrics.getExecutionTime().getMaximum()); - assertEquals(153, overallMetrics.getExecutionTime().getAverage()); - assertNotNull(overallMetrics.getExecutionTime().getUnit()); + // The CPU values submitted were: + // SUCCESSFUL: 2, 2, 6 + // FAILED_RUNTIME_INVALID: 4 + MetricsByStatus overallAllStatusesMetrics = overallMetrics.getExecutionStatusCount().getCount().get(ALL.name()); + MetricsByStatus overallSuccessfulMetrics = overallMetrics.getExecutionStatusCount().getCount().get(SUCCESSFUL.name()); + MetricsByStatus overallFailedRuntimeInvalidMetrics = overallMetrics.getExecutionStatusCount().getCount().get(FAILED_RUNTIME_INVALID.name()); + assertEquals(4, overallAllStatusesMetrics.getCpu().getNumberOfDataPointsForAverage()); + assertEquals(2, overallAllStatusesMetrics.getCpu().getMinimum()); + assertEquals(6, overallAllStatusesMetrics.getCpu().getMaximum()); + assertEquals(3.5, overallAllStatusesMetrics.getCpu().getAverage()); + assertNull(overallAllStatusesMetrics.getCpu().getUnit()); + assertEquals(3, overallSuccessfulMetrics.getCpu().getNumberOfDataPointsForAverage()); + assertEquals(2, overallSuccessfulMetrics.getCpu().getMinimum()); + assertEquals(6, overallSuccessfulMetrics.getCpu().getMaximum()); + assertEquals(3.333333333333333, overallSuccessfulMetrics.getCpu().getAverage()); + assertNull(overallSuccessfulMetrics.getCpu().getUnit()); + assertEquals(1, overallFailedRuntimeInvalidMetrics.getCpu().getNumberOfDataPointsForAverage()); + assertEquals(4, overallFailedRuntimeInvalidMetrics.getCpu().getMinimum()); + assertEquals(4, overallFailedRuntimeInvalidMetrics.getCpu().getMaximum()); + assertEquals(4, overallFailedRuntimeInvalidMetrics.getCpu().getAverage()); + assertNull(overallFailedRuntimeInvalidMetrics.getCpu().getUnit()); + + // The memory values submitted were: + // SUCCESSFUL: 2, 2, 5.5 + // FAILED_RUNTIME_INVALID: 4.5 + assertEquals(4, overallAllStatusesMetrics.getMemory().getNumberOfDataPointsForAverage()); + assertEquals(2, overallAllStatusesMetrics.getMemory().getMinimum()); + assertEquals(5.5, overallAllStatusesMetrics.getMemory().getMaximum()); + assertEquals(3.5, overallAllStatusesMetrics.getMemory().getAverage()); + assertNotNull(overallAllStatusesMetrics.getMemory().getUnit()); + assertEquals(3, overallSuccessfulMetrics.getMemory().getNumberOfDataPointsForAverage()); + assertEquals(2, overallSuccessfulMetrics.getMemory().getMinimum()); + assertEquals(5.5, overallSuccessfulMetrics.getMemory().getMaximum()); + assertEquals(3.1666666666666665, overallSuccessfulMetrics.getMemory().getAverage()); + assertNotNull(overallSuccessfulMetrics.getMemory().getUnit()); + assertEquals(1, overallFailedRuntimeInvalidMetrics.getMemory().getNumberOfDataPointsForAverage()); + assertEquals(4.5, overallFailedRuntimeInvalidMetrics.getMemory().getMinimum()); + assertEquals(4.5, overallFailedRuntimeInvalidMetrics.getMemory().getMaximum()); + assertEquals(4.5, overallFailedRuntimeInvalidMetrics.getMemory().getAverage()); + assertNotNull(overallFailedRuntimeInvalidMetrics.getMemory().getUnit()); + + // The execution times submitted were: + // SUCCESSFUL: PT5M, PT5M, PT11S + // FAILED_RUNTIME_INVALID: PT1S + assertEquals(4, overallAllStatusesMetrics.getExecutionTime().getNumberOfDataPointsForAverage()); + assertEquals(1, overallAllStatusesMetrics.getExecutionTime().getMinimum()); + assertEquals(300, overallAllStatusesMetrics.getExecutionTime().getMaximum()); + assertEquals(153, overallAllStatusesMetrics.getExecutionTime().getAverage()); + assertNotNull(overallAllStatusesMetrics.getExecutionTime().getUnit()); + assertEquals(3, overallSuccessfulMetrics.getExecutionTime().getNumberOfDataPointsForAverage()); + assertEquals(11, overallSuccessfulMetrics.getExecutionTime().getMinimum()); + assertEquals(300, overallSuccessfulMetrics.getExecutionTime().getMaximum()); + assertEquals(203.66666666666666, overallSuccessfulMetrics.getExecutionTime().getAverage()); + assertNotNull(overallSuccessfulMetrics.getExecutionTime().getUnit()); + assertEquals(1, overallFailedRuntimeInvalidMetrics.getExecutionTime().getNumberOfDataPointsForAverage()); + assertEquals(1, overallFailedRuntimeInvalidMetrics.getExecutionTime().getMinimum()); + assertEquals(1, overallFailedRuntimeInvalidMetrics.getExecutionTime().getMaximum()); + assertEquals(1, overallFailedRuntimeInvalidMetrics.getExecutionTime().getAverage()); + assertNotNull(overallFailedRuntimeInvalidMetrics.getExecutionTime().getUnit()); assertEquals(2, overallMetrics.getValidationStatus().getValidatorTools().size()); validationInfo = overallMetrics.getValidationStatus().getValidatorTools().get(MINIWDL.toString()); @@ -441,8 +595,14 @@ void testAggregateExecutionsWithDuplicateIds() { validationExecution.setExecutionId(executionId); // Check if metric is aggregated from aggregated execution by checking memory metric AggregatedExecution aggregatedExecution = new AggregatedExecution().executionId(executionId); - aggregatedExecution.executionStatusCount(new ExecutionStatusMetric().count(Map.of(SUCCESSFUL.name(), 1))); // required metric - aggregatedExecution.memory(new MemoryMetric().minimum(1.0).maximum(1.0).average(1.0).numberOfDataPointsForAverage(1)); + aggregatedExecution.executionStatusCount(new ExecutionStatusMetric().putCountItem(SUCCESSFUL.name(), + new MetricsByStatus() + .executionStatusCount(1) + .memory(new MemoryMetric() + .minimum(1.0) + .maximum(1.0) + .average(1.0) + .numberOfDataPointsForAverage(1)))); // Try to send all of them in one POST. Should fail because the webservice validates that one submission does not include duplicate IDs assertThrows(ApiException.class, () -> extendedGa4GhApi.executionMetricsPost(new ExecutionsRequestBody() @@ -465,9 +625,10 @@ void testAggregateExecutionsWithDuplicateIds() { Metrics metrics = version.getMetricsByPlatform().get(platform); assertNotNull(metrics); // Should be aggregated from aggregatedExecution because it was submitted last - assertNotNull(metrics.getMemory()); - assertNull(metrics.getExecutionTime()); // Verify that the metric from workflow execution wasn't used - assertNull(metrics.getCpu()); // Verify that the metric from task executions weren't used + MetricsByStatus successfulMetrics = metrics.getExecutionStatusCount().getCount().get(SUCCESSFUL.name()); + assertNotNull(successfulMetrics.getMemory()); + assertNull(successfulMetrics.getExecutionTime()); // Verify that the metric from workflow execution wasn't used + assertNull(successfulMetrics.getCpu()); // Verify that the metric from task executions weren't used assertNull(metrics.getValidationStatus()); // Verify that the metric from validation execution wasn't used // Submit a workflow execution. The metric should be from the latest workflow execution. @@ -480,11 +641,12 @@ void testAggregateExecutionsWithDuplicateIds() { metrics = version.getMetricsByPlatform().get(platform); assertNotNull(metrics); // Should be aggregated from aggregatedExecution because it was submitted last - assertNotNull(metrics.getExecutionTime()); - assertEquals(0, metrics.getExecutionTime().getMinimum()); // Verify that the execution time is from the second workflow execution - assertNull(metrics.getCpu()); // Verify that the metric from task executions weren't used + successfulMetrics = metrics.getExecutionStatusCount().getCount().get(SUCCESSFUL.name()); + assertNotNull(successfulMetrics.getExecutionTime()); + assertEquals(0, successfulMetrics.getExecutionTime().getMinimum()); // Verify that the execution time is from the second workflow execution + assertNull(successfulMetrics.getCpu()); // Verify that the metric from task executions weren't used assertNull(metrics.getValidationStatus()); // Verify that the metric from validation execution wasn't used - assertNull(metrics.getMemory()); // Verify that the metric from aggregated execution wasn't used + assertNull(successfulMetrics.getMemory()); // Verify that the metric from aggregated execution wasn't used } @Test diff --git a/metricsaggregator/src/test/java/io/dockstore/metricsaggregator/helper/AggregationHelperTest.java b/metricsaggregator/src/test/java/io/dockstore/metricsaggregator/helper/AggregationHelperTest.java index 24f05965..1e2a29a7 100644 --- a/metricsaggregator/src/test/java/io/dockstore/metricsaggregator/helper/AggregationHelperTest.java +++ b/metricsaggregator/src/test/java/io/dockstore/metricsaggregator/helper/AggregationHelperTest.java @@ -1,9 +1,6 @@ package io.dockstore.metricsaggregator.helper; import static io.dockstore.metricsaggregator.common.TestUtilities.createValidationExecution; -import static io.dockstore.openapi.client.model.RunExecution.ExecutionStatusEnum.FAILED_RUNTIME_INVALID; -import static io.dockstore.openapi.client.model.RunExecution.ExecutionStatusEnum.FAILED_SEMANTIC_INVALID; -import static io.dockstore.openapi.client.model.RunExecution.ExecutionStatusEnum.SUCCESSFUL; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -11,15 +8,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import io.dockstore.openapi.client.model.AggregatedExecution; -import io.dockstore.openapi.client.model.Cost; -import io.dockstore.openapi.client.model.CostMetric; -import io.dockstore.openapi.client.model.CpuMetric; -import io.dockstore.openapi.client.model.ExecutionStatusMetric; -import io.dockstore.openapi.client.model.ExecutionTimeMetric; import io.dockstore.openapi.client.model.ExecutionsRequestBody; -import io.dockstore.openapi.client.model.MemoryMetric; -import io.dockstore.openapi.client.model.RunExecution; -import io.dockstore.openapi.client.model.TaskExecutions; import io.dockstore.openapi.client.model.ValidationExecution; import io.dockstore.openapi.client.model.ValidationStatusMetric; import io.dockstore.openapi.client.model.ValidatorInfo; @@ -33,284 +22,6 @@ class AggregationHelperTest { - @Test - void testGetAggregatedExecutionStatus() { - ExecutionStatusAggregator executionStatusAggregator = new ExecutionStatusAggregator(); - ExecutionsRequestBody allSubmissions = new ExecutionsRequestBody(); - Optional executionStatusMetric = executionStatusAggregator.getAggregatedMetricFromAllSubmissions(allSubmissions); - assertTrue(executionStatusMetric.isEmpty()); - - RunExecution submittedRunExecution = new RunExecution().executionStatus(SUCCESSFUL); - allSubmissions = new ExecutionsRequestBody().runExecutions(List.of(submittedRunExecution)); - executionStatusMetric = executionStatusAggregator.getAggregatedMetricFromAllSubmissions(allSubmissions); - assertTrue(executionStatusMetric.isPresent()); - assertEquals(1, executionStatusMetric.get().getCount().get(SUCCESSFUL.toString())); - - // Aggregate submissions containing run executions and aggregated metrics - AggregatedExecution submittedAggregatedMetrics = new AggregatedExecution(); - submittedAggregatedMetrics.executionStatusCount( - new ExecutionStatusMetric().count( - Map.of(SUCCESSFUL.toString(), 10, FAILED_RUNTIME_INVALID.toString(), 1))); - allSubmissions = new ExecutionsRequestBody().runExecutions(List.of(submittedRunExecution)).aggregatedExecutions(List.of(submittedAggregatedMetrics)); - executionStatusMetric = executionStatusAggregator.getAggregatedMetricFromAllSubmissions(allSubmissions); - assertTrue(executionStatusMetric.isPresent()); - assertEquals(11, executionStatusMetric.get().getCount().get(SUCCESSFUL.toString())); - assertEquals(1, executionStatusMetric.get().getCount().get(FAILED_RUNTIME_INVALID.toString())); - assertNull(executionStatusMetric.get().getCount().get(FAILED_SEMANTIC_INVALID.toString()), "Should be null because the key doesn't exist in the count map"); - - // Aggregate submissions containing workflow run executions and task executions - submittedRunExecution = new RunExecution().executionStatus(SUCCESSFUL); - TaskExecutions taskExecutionsForOneWorkflowRun = new TaskExecutions().taskExecutions(List.of(submittedRunExecution)); - allSubmissions = new ExecutionsRequestBody().runExecutions(List.of(submittedRunExecution)).taskExecutions(List.of(taskExecutionsForOneWorkflowRun)); - executionStatusMetric = executionStatusAggregator.getAggregatedMetricFromAllSubmissions(allSubmissions); - assertTrue(executionStatusMetric.isPresent()); - assertEquals(2, executionStatusMetric.get().getCount().get(SUCCESSFUL.toString())); - // Submit one successful workflow execution and a list of task executions where the single task failed - taskExecutionsForOneWorkflowRun = new TaskExecutions().taskExecutions(List.of(new RunExecution().executionStatus(FAILED_RUNTIME_INVALID))); - allSubmissions = new ExecutionsRequestBody().runExecutions(List.of(submittedRunExecution)).taskExecutions(List.of(taskExecutionsForOneWorkflowRun)); - executionStatusMetric = executionStatusAggregator.getAggregatedMetricFromAllSubmissions(allSubmissions); - assertTrue(executionStatusMetric.isPresent()); - assertEquals(1, executionStatusMetric.get().getCount().get(SUCCESSFUL.toString())); - assertEquals(1, executionStatusMetric.get().getCount().get(FAILED_RUNTIME_INVALID.toString())); - // Submit one successful workflow execution and a list of task executions where there are two tasks that failed due to different reasons - taskExecutionsForOneWorkflowRun = new TaskExecutions().taskExecutions( - List.of(new RunExecution().executionStatus(FAILED_RUNTIME_INVALID), - new RunExecution().executionStatus(FAILED_SEMANTIC_INVALID))); - allSubmissions = new ExecutionsRequestBody().runExecutions(List.of(submittedRunExecution)).taskExecutions(List.of(taskExecutionsForOneWorkflowRun)); - executionStatusMetric = executionStatusAggregator.getAggregatedMetricFromAllSubmissions(allSubmissions); - assertTrue(executionStatusMetric.isPresent()); - assertEquals(1, executionStatusMetric.get().getCount().get(SUCCESSFUL.toString())); - // There should be 1 of either FAILED_RUNTIME_INVALID or FAILED_SEMANTIC_INVALID. - assertTrue(executionStatusMetric.get().getCount().containsKey(FAILED_RUNTIME_INVALID.toString()) || executionStatusMetric.get().getCount().containsKey(FAILED_SEMANTIC_INVALID.toString())); - // Submit one successful workflow execution and a list of task executions where there are 3 tasks that failed due to different reasons - taskExecutionsForOneWorkflowRun = new TaskExecutions().taskExecutions( - List.of(new RunExecution().executionStatus(FAILED_RUNTIME_INVALID), - new RunExecution().executionStatus(FAILED_RUNTIME_INVALID), - new RunExecution().executionStatus(FAILED_SEMANTIC_INVALID))); - allSubmissions = new ExecutionsRequestBody().runExecutions(List.of(submittedRunExecution)).taskExecutions(List.of(taskExecutionsForOneWorkflowRun)); - executionStatusMetric = executionStatusAggregator.getAggregatedMetricFromAllSubmissions(allSubmissions); - assertTrue(executionStatusMetric.isPresent()); - assertEquals(1, executionStatusMetric.get().getCount().get(SUCCESSFUL.toString())); - // There should be one count of FAILED_RUNTIME_INVALID because it's the most frequent failed status in the list of task executions - assertEquals(1, executionStatusMetric.get().getCount().get(FAILED_RUNTIME_INVALID.toString())); - } - - @Test - void testGetAggregatedExecutionTime() { - ExecutionTimeAggregator executionTimeAggregator = new ExecutionTimeAggregator(); - List badExecutions = new ArrayList<>(); - Optional executionTimeMetric = executionTimeAggregator.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(badExecutions)); - assertTrue(executionTimeMetric.isEmpty()); - - // Add an execution that doesn't have execution time data - badExecutions.add(new RunExecution().executionStatus(SUCCESSFUL)); - executionTimeMetric = executionTimeAggregator.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(badExecutions)); - assertTrue(executionTimeMetric.isEmpty()); - - // Add an execution with malformed execution time data - badExecutions.add(new RunExecution().executionStatus(SUCCESSFUL).executionTime("1 second")); - executionTimeMetric = executionTimeAggregator.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(badExecutions)); - assertTrue(executionTimeMetric.isEmpty()); - - // Add an execution with execution time - final int timeInSeconds = 10; - List executions = List.of(new RunExecution().executionStatus(SUCCESSFUL).executionTime(String.format("PT%dS", timeInSeconds))); - executionTimeMetric = executionTimeAggregator.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions)); - assertTrue(executionTimeMetric.isPresent()); - assertEquals(timeInSeconds, executionTimeMetric.get().getMinimum()); - assertEquals(timeInSeconds, executionTimeMetric.get().getMaximum()); - assertEquals(timeInSeconds, executionTimeMetric.get().getAverage()); - assertEquals(1, executionTimeMetric.get().getNumberOfDataPointsForAverage()); - - // Aggregate submissions containing run executions and aggregated metrics - AggregatedExecution submittedAggregatedMetrics = new AggregatedExecution(); - submittedAggregatedMetrics.executionTime(new ExecutionTimeMetric() - .minimum(2.0) - .maximum(6.0) - .average(4.0) - .numberOfDataPointsForAverage(2)); - executionTimeMetric = executionTimeAggregator.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions).aggregatedExecutions(List.of(submittedAggregatedMetrics))); - assertTrue(executionTimeMetric.isPresent()); - assertEquals(2.0, executionTimeMetric.get().getMinimum()); - assertEquals(10.0, executionTimeMetric.get().getMaximum()); - assertEquals(6, executionTimeMetric.get().getAverage()); - assertEquals(3, executionTimeMetric.get().getNumberOfDataPointsForAverage()); - - // Aggregate submissions containing workflow run executions and task executions - // Submit a single workflow execution that took 10s and a single task that took 10s - executions = List.of(new RunExecution().executionStatus(SUCCESSFUL).executionTime(String.format("PT%dS", timeInSeconds))); - executionTimeMetric = executionTimeAggregator.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions).taskExecutions(List.of(new TaskExecutions().taskExecutions(executions)))); - assertTrue(executionTimeMetric.isPresent()); - assertEquals(timeInSeconds, executionTimeMetric.get().getMinimum()); - assertEquals(timeInSeconds, executionTimeMetric.get().getMaximum()); - assertEquals(timeInSeconds, executionTimeMetric.get().getAverage()); - assertEquals(2, executionTimeMetric.get().getNumberOfDataPointsForAverage()); // There should be 2 data points: 1 for the workflow execution and 1 for the list of tasks - // Submit a single workflow execution that took 10s and two tasks that took 10 seconds. This time, dateExecuted is provided - RunExecution task1 = new RunExecution().executionStatus(SUCCESSFUL).executionTime(String.format("PT%dS", timeInSeconds)); - RunExecution task2 = new RunExecution().executionStatus(SUCCESSFUL).executionTime(String.format("PT%dS", timeInSeconds)); - // The time difference between these two tasks is 10 seconds. When there is more than one task, the duration will be calculated from the dates executed, plus the duration of the last task, which is 10s - task1.setDateExecuted("2023-11-09T21:54:10.571285905Z"); - task2.setDateExecuted("2023-11-09T21:54:20.571285905Z"); - executionTimeMetric = executionTimeAggregator.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions).taskExecutions(List.of(new TaskExecutions().taskExecutions(List.of(task1, task2))))); - assertTrue(executionTimeMetric.isPresent()); - assertEquals(10, executionTimeMetric.get().getMinimum(), "The minimum is from the workflow execution"); - assertEquals(20, executionTimeMetric.get().getMaximum(), "The maximum is from the workflow execution calculated from the two tasks"); - assertEquals(15, executionTimeMetric.get().getAverage()); - assertEquals(2, executionTimeMetric.get().getNumberOfDataPointsForAverage()); // There should be 2 data points: 1 for the workflow execution and 1 for the list of tasks - } - - @Test - void testGetAggregatedCpu() { - CpuAggregator cpuAggregator = new CpuAggregator(); - List executions = new ArrayList<>(); - Optional cpuMetric = cpuAggregator.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions)); - assertTrue(cpuMetric.isEmpty()); - - // Add an execution that doesn't have cpu data - executions.add(new RunExecution().executionStatus(SUCCESSFUL)); - cpuMetric = cpuAggregator.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions)); - assertTrue(cpuMetric.isEmpty()); - - // Add an execution with cpu data - final int cpu = 1; - executions.add(new RunExecution().executionStatus(SUCCESSFUL).cpuRequirements(cpu)); - cpuMetric = cpuAggregator.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions)); - assertTrue(cpuMetric.isPresent()); - assertEquals(cpu, cpuMetric.get().getMinimum()); - assertEquals(cpu, cpuMetric.get().getMaximum()); - assertEquals(cpu, cpuMetric.get().getAverage()); - assertEquals(1, cpuMetric.get().getNumberOfDataPointsForAverage()); - - // Aggregate submissions containing run executions and aggregated metrics - AggregatedExecution submittedAggregatedMetrics = new AggregatedExecution(); - submittedAggregatedMetrics.cpu(new CpuMetric() - .minimum(2.0) - .maximum(6.0) - .average(4.0) - .numberOfDataPointsForAverage(2)); - cpuMetric = cpuAggregator.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions).aggregatedExecutions(List.of(submittedAggregatedMetrics))); - assertTrue(cpuMetric.isPresent()); - assertEquals(1.0, cpuMetric.get().getMinimum()); - assertEquals(6.0, cpuMetric.get().getMaximum()); - assertEquals(3, cpuMetric.get().getAverage()); - assertEquals(3, cpuMetric.get().getNumberOfDataPointsForAverage()); - - // Aggregate submissions containing workflow run executions and task executions - executions = List.of(new RunExecution().executionStatus(SUCCESSFUL).cpuRequirements(cpu)); - // Two task executions with different CPU requirements. The workflow execution calculated from these tasks should take the highest cpuRequirement from the tasks - TaskExecutions taskExecutions = new TaskExecutions().taskExecutions(List.of( - new RunExecution().executionStatus(SUCCESSFUL).cpuRequirements(1), - new RunExecution().executionStatus(SUCCESSFUL).cpuRequirements(4))); - cpuMetric = cpuAggregator.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions).taskExecutions(List.of(taskExecutions))); - assertTrue(cpuMetric.isPresent()); - assertEquals(1.0, cpuMetric.get().getMinimum()); - assertEquals(4.0, cpuMetric.get().getMaximum()); - assertEquals(2.5, cpuMetric.get().getAverage()); - assertEquals(2, cpuMetric.get().getNumberOfDataPointsForAverage()); // Two data points: 1 from the workflow execution and 1 for the list of tasks - } - - @Test - void testGetAggregatedMemory() { - MemoryAggregator memoryAggregator = new MemoryAggregator(); - List executions = new ArrayList<>(); - Optional memoryMetric = memoryAggregator.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions)); - assertTrue(memoryMetric.isEmpty()); - - // Add an execution that doesn't have memory data - executions.add(new RunExecution().executionStatus(SUCCESSFUL)); - memoryMetric = memoryAggregator.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions)); - assertTrue(memoryMetric.isEmpty()); - - // Add an execution with memory data - Double memoryInGB = 2.0; - executions.add(new RunExecution().executionStatus(SUCCESSFUL).memoryRequirementsGB(memoryInGB)); - memoryMetric = memoryAggregator.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions)); - assertTrue(memoryMetric.isPresent()); - assertEquals(memoryInGB, memoryMetric.get().getMinimum()); - assertEquals(memoryInGB, memoryMetric.get().getMaximum()); - assertEquals(memoryInGB, memoryMetric.get().getAverage()); - assertEquals(1, memoryMetric.get().getNumberOfDataPointsForAverage()); - - // Aggregate submissions containing run executions and aggregated metrics - AggregatedExecution submittedAggregatedMetrics = new AggregatedExecution(); - submittedAggregatedMetrics.memory(new MemoryMetric() - .minimum(2.0) - .maximum(6.0) - .average(4.0) - .numberOfDataPointsForAverage(2)); - memoryMetric = memoryAggregator.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions).aggregatedExecutions(List.of(submittedAggregatedMetrics))); - assertTrue(memoryMetric.isPresent()); - assertEquals(2.0, memoryMetric.get().getMinimum()); - assertEquals(6.0, memoryMetric.get().getMaximum()); - assertEquals(3.333333333333333, memoryMetric.get().getAverage()); - assertEquals(3, memoryMetric.get().getNumberOfDataPointsForAverage()); - - // Aggregate submissions containing workflow run executions and task executions - executions = List.of(new RunExecution().executionStatus(SUCCESSFUL).memoryRequirementsGB(2.0)); - // Two task executions with different memory requirements. The workflow execution calculated from these tasks should take the highest memoryRequirementsGB from the tasks - TaskExecutions taskExecutions = new TaskExecutions().taskExecutions(List.of( - new RunExecution().executionStatus(SUCCESSFUL).memoryRequirementsGB(2.0), - new RunExecution().executionStatus(SUCCESSFUL).memoryRequirementsGB(4.0))); - memoryMetric = memoryAggregator.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions).taskExecutions(List.of(taskExecutions))); - assertTrue(memoryMetric.isPresent()); - assertEquals(2.0, memoryMetric.get().getMinimum()); - assertEquals(4.0, memoryMetric.get().getMaximum()); - assertEquals(3.0, memoryMetric.get().getAverage()); - assertEquals(2, memoryMetric.get().getNumberOfDataPointsForAverage()); // Two data points: 1 from the workflow execution and 1 for the list of tasks - - } - - @Test - void testGetAggregatedCost() { - CostAggregator costAggregator = new CostAggregator(); - List executions = new ArrayList<>(); - Optional costMetric = costAggregator.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions)); - assertTrue(costMetric.isEmpty()); - - // Add an execution that doesn't have cost data - executions.add(new RunExecution().executionStatus(SUCCESSFUL)); - costMetric = costAggregator.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions)); - assertTrue(costMetric.isEmpty()); - - // Add an execution with cost data - Double costInUSD = 2.00; - executions.add(new RunExecution().executionStatus(SUCCESSFUL).cost(new Cost().value(costInUSD))); - costMetric = costAggregator.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions)); - assertTrue(costMetric.isPresent()); - assertEquals(costInUSD, costMetric.get().getMinimum()); - assertEquals(costInUSD, costMetric.get().getMaximum()); - assertEquals(costInUSD, costMetric.get().getAverage()); - assertEquals(1, costMetric.get().getNumberOfDataPointsForAverage()); - - // Aggregate submissions containing run executions and aggregated metrics - AggregatedExecution submittedAggregatedMetrics = new AggregatedExecution(); - submittedAggregatedMetrics.cost(new CostMetric() - .minimum(2.00) - .maximum(6.00) - .average(4.00) - .numberOfDataPointsForAverage(2)); - costMetric = costAggregator.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions).aggregatedExecutions(List.of(submittedAggregatedMetrics))); - assertTrue(costMetric.isPresent()); - assertEquals(2.0, costMetric.get().getMinimum()); - assertEquals(6.0, costMetric.get().getMaximum()); - assertEquals(3.333333333333333, costMetric.get().getAverage()); - assertEquals(3, costMetric.get().getNumberOfDataPointsForAverage()); - - // Aggregate submissions containing workflow run executions and task executions - executions = List.of(new RunExecution().executionStatus(SUCCESSFUL).cost(new Cost().value(2.00))); - // Two task executions with different memory requirements. The workflow execution calculated from these tasks should have the sum of the cost of all tasks - TaskExecutions taskExecutions = new TaskExecutions().taskExecutions(List.of( - new RunExecution().executionStatus(SUCCESSFUL).cost(new Cost().value(2.00)), - new RunExecution().executionStatus(SUCCESSFUL).cost(new Cost().value(4.00)) - )); - costMetric = costAggregator.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions).taskExecutions(List.of(taskExecutions))); - assertTrue(costMetric.isPresent()); - assertEquals(2.0, costMetric.get().getMinimum()); - assertEquals(6.0, costMetric.get().getMaximum()); // The max is the cost of the two tasks summed together - assertEquals(4.0, costMetric.get().getAverage()); - assertEquals(2, costMetric.get().getNumberOfDataPointsForAverage()); // Two data points: 1 from the workflow execution and 1 for the list of tasks - } - @Test void testGetAggregatedValidationStatus() { List executions = new ArrayList<>(); diff --git a/metricsaggregator/src/test/java/io/dockstore/metricsaggregator/helper/ExecutionStatusAggregatorTest.java b/metricsaggregator/src/test/java/io/dockstore/metricsaggregator/helper/ExecutionStatusAggregatorTest.java index 6f6971ea..cafd324b 100644 --- a/metricsaggregator/src/test/java/io/dockstore/metricsaggregator/helper/ExecutionStatusAggregatorTest.java +++ b/metricsaggregator/src/test/java/io/dockstore/metricsaggregator/helper/ExecutionStatusAggregatorTest.java @@ -5,12 +5,24 @@ import static io.dockstore.openapi.client.model.RunExecution.ExecutionStatusEnum.SUCCESSFUL; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; +import io.dockstore.openapi.client.model.AggregatedExecution; +import io.dockstore.openapi.client.model.Cost; +import io.dockstore.openapi.client.model.CostMetric; +import io.dockstore.openapi.client.model.CpuMetric; import io.dockstore.openapi.client.model.ExecutionStatusMetric; +import io.dockstore.openapi.client.model.ExecutionTimeMetric; +import io.dockstore.openapi.client.model.ExecutionsRequestBody; +import io.dockstore.openapi.client.model.MemoryMetric; +import io.dockstore.openapi.client.model.MetricsByStatus; import io.dockstore.openapi.client.model.RunExecution; import io.dockstore.openapi.client.model.TaskExecutions; +import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.Optional; import org.junit.jupiter.api.Test; @@ -78,7 +90,7 @@ void testGetAggregatedMetricFromWorkflowExecutions() { List workflowExecutions = List.of(new RunExecution().executionStatus(SUCCESSFUL)); executionStatusMetric = EXECUTION_STATUS_AGGREGATOR.getAggregatedMetricFromExecutions(workflowExecutions); assertTrue(executionStatusMetric.isPresent()); - assertEquals(1, executionStatusMetric.get().getCount().get(SUCCESSFUL.toString())); + assertEquals(1, executionStatusMetric.get().getCount().get(SUCCESSFUL.toString()).getExecutionStatusCount()); assertFalse(executionStatusMetric.get().getCount().containsKey(FAILED_SEMANTIC_INVALID.toString())); assertFalse(executionStatusMetric.get().getCount().containsKey(FAILED_RUNTIME_INVALID.toString())); @@ -90,8 +102,310 @@ void testGetAggregatedMetricFromWorkflowExecutions() { ); executionStatusMetric = EXECUTION_STATUS_AGGREGATOR.getAggregatedMetricFromExecutions(workflowExecutions); assertTrue(executionStatusMetric.isPresent()); - assertEquals(1, executionStatusMetric.get().getCount().get(SUCCESSFUL.toString())); - assertEquals(1, executionStatusMetric.get().getCount().get(FAILED_SEMANTIC_INVALID.toString())); - assertEquals(1, executionStatusMetric.get().getCount().get(FAILED_RUNTIME_INVALID.toString())); + assertEquals(1, executionStatusMetric.get().getCount().get(SUCCESSFUL.toString()).getExecutionStatusCount()); + assertEquals(1, executionStatusMetric.get().getCount().get(FAILED_SEMANTIC_INVALID.toString()).getExecutionStatusCount()); + assertEquals(1, executionStatusMetric.get().getCount().get(FAILED_RUNTIME_INVALID.toString()).getExecutionStatusCount()); + } + + @Test + void testGetAggregatedExecutionStatus() { + ExecutionStatusAggregator executionStatusAggregator = new ExecutionStatusAggregator(); + ExecutionsRequestBody allSubmissions = new ExecutionsRequestBody(); + Optional executionStatusMetric = executionStatusAggregator.getAggregatedMetricFromAllSubmissions(allSubmissions); + assertTrue(executionStatusMetric.isEmpty()); + + RunExecution submittedRunExecution = new RunExecution().executionStatus(SUCCESSFUL); + allSubmissions = new ExecutionsRequestBody().runExecutions(List.of(submittedRunExecution)); + executionStatusMetric = executionStatusAggregator.getAggregatedMetricFromAllSubmissions(allSubmissions); + assertTrue(executionStatusMetric.isPresent()); + assertEquals(1, executionStatusMetric.get().getCount().get(SUCCESSFUL.toString()).getExecutionStatusCount()); + + // Aggregate submissions containing run executions and aggregated metrics + AggregatedExecution submittedAggregatedMetrics = new AggregatedExecution(); + submittedAggregatedMetrics.executionStatusCount( + new ExecutionStatusMetric().count( + Map.of(SUCCESSFUL.toString(), new MetricsByStatus().executionStatusCount(10), + FAILED_RUNTIME_INVALID.toString(), new MetricsByStatus().executionStatusCount(1)))); + allSubmissions = new ExecutionsRequestBody().runExecutions(List.of(submittedRunExecution)).aggregatedExecutions(List.of(submittedAggregatedMetrics)); + executionStatusMetric = executionStatusAggregator.getAggregatedMetricFromAllSubmissions(allSubmissions); + assertTrue(executionStatusMetric.isPresent()); + assertEquals(11, executionStatusMetric.get().getCount().get(SUCCESSFUL.toString()).getExecutionStatusCount()); + assertEquals(1, executionStatusMetric.get().getCount().get(FAILED_RUNTIME_INVALID.toString()).getExecutionStatusCount()); + assertNull(executionStatusMetric.get().getCount().get(FAILED_SEMANTIC_INVALID.toString()), "Should be null because the key doesn't exist in the count map"); + + // Aggregate submissions containing workflow run executions and task executions + submittedRunExecution = new RunExecution().executionStatus(SUCCESSFUL); + TaskExecutions taskExecutionsForOneWorkflowRun = new TaskExecutions().taskExecutions(List.of(submittedRunExecution)); + allSubmissions = new ExecutionsRequestBody().runExecutions(List.of(submittedRunExecution)).taskExecutions(List.of(taskExecutionsForOneWorkflowRun)); + executionStatusMetric = executionStatusAggregator.getAggregatedMetricFromAllSubmissions(allSubmissions); + assertTrue(executionStatusMetric.isPresent()); + assertEquals(2, executionStatusMetric.get().getCount().get(SUCCESSFUL.toString()).getExecutionStatusCount()); + // Submit one successful workflow execution and a list of task executions where the single task failed + taskExecutionsForOneWorkflowRun = new TaskExecutions().taskExecutions(List.of(new RunExecution().executionStatus(FAILED_RUNTIME_INVALID))); + allSubmissions = new ExecutionsRequestBody().runExecutions(List.of(submittedRunExecution)).taskExecutions(List.of(taskExecutionsForOneWorkflowRun)); + executionStatusMetric = executionStatusAggregator.getAggregatedMetricFromAllSubmissions(allSubmissions); + assertTrue(executionStatusMetric.isPresent()); + assertEquals(1, executionStatusMetric.get().getCount().get(SUCCESSFUL.toString()).getExecutionStatusCount()); + assertEquals(1, executionStatusMetric.get().getCount().get(FAILED_RUNTIME_INVALID.toString()).getExecutionStatusCount()); + // Submit one successful workflow execution and a list of task executions where there are two tasks that failed due to different reasons + taskExecutionsForOneWorkflowRun = new TaskExecutions().taskExecutions( + List.of(new RunExecution().executionStatus(FAILED_RUNTIME_INVALID), + new RunExecution().executionStatus(FAILED_SEMANTIC_INVALID))); + allSubmissions = new ExecutionsRequestBody().runExecutions(List.of(submittedRunExecution)).taskExecutions(List.of(taskExecutionsForOneWorkflowRun)); + executionStatusMetric = executionStatusAggregator.getAggregatedMetricFromAllSubmissions(allSubmissions); + assertTrue(executionStatusMetric.isPresent()); + assertEquals(1, executionStatusMetric.get().getCount().get(SUCCESSFUL.toString()).getExecutionStatusCount()); + // There should be 1 of either FAILED_RUNTIME_INVALID or FAILED_SEMANTIC_INVALID. + assertTrue(executionStatusMetric.get().getCount().containsKey(FAILED_RUNTIME_INVALID.toString()) || executionStatusMetric.get().getCount().containsKey(FAILED_SEMANTIC_INVALID.toString())); + // Submit one successful workflow execution and a list of task executions where there are 3 tasks that failed due to different reasons + taskExecutionsForOneWorkflowRun = new TaskExecutions().taskExecutions( + List.of(new RunExecution().executionStatus(FAILED_RUNTIME_INVALID), + new RunExecution().executionStatus(FAILED_RUNTIME_INVALID), + new RunExecution().executionStatus(FAILED_SEMANTIC_INVALID))); + allSubmissions = new ExecutionsRequestBody().runExecutions(List.of(submittedRunExecution)).taskExecutions(List.of(taskExecutionsForOneWorkflowRun)); + executionStatusMetric = executionStatusAggregator.getAggregatedMetricFromAllSubmissions(allSubmissions); + assertTrue(executionStatusMetric.isPresent()); + assertEquals(1, executionStatusMetric.get().getCount().get(SUCCESSFUL.toString()).getExecutionStatusCount()); + // There should be one count of FAILED_RUNTIME_INVALID because it's the most frequent failed status in the list of task executions + assertEquals(1, executionStatusMetric.get().getCount().get(FAILED_RUNTIME_INVALID.toString()).getExecutionStatusCount()); + } + + @Test + void testGetAggregatedExecutionTime() { + List badExecutions = new ArrayList<>(); + + // Add an execution that doesn't have execution time data + badExecutions.add(new RunExecution().executionStatus(SUCCESSFUL)); + Optional executionStatusMetric = EXECUTION_STATUS_AGGREGATOR.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(badExecutions)); + assertTrue(executionStatusMetric.isPresent()); + ExecutionTimeMetric executionTimeMetric = executionStatusMetric.get().getCount().get(SUCCESSFUL.name()).getExecutionTime(); + assertNull(executionTimeMetric); + + // Add an execution with malformed execution time data + badExecutions.add(new RunExecution().executionStatus(SUCCESSFUL).executionTime("1 second")); + executionStatusMetric = EXECUTION_STATUS_AGGREGATOR.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(badExecutions)); + executionTimeMetric = executionStatusMetric.get().getCount().get(SUCCESSFUL.name()).getExecutionTime(); + assertNull(executionTimeMetric); + + // Add an execution with execution time + final int timeInSeconds = 10; + List executions = List.of(new RunExecution().executionStatus(SUCCESSFUL).executionTime(String.format("PT%dS", timeInSeconds))); + executionStatusMetric = EXECUTION_STATUS_AGGREGATOR.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions)); + executionTimeMetric = executionStatusMetric.get().getCount().get(SUCCESSFUL.name()).getExecutionTime(); + assertNotNull(executionTimeMetric); + assertEquals(timeInSeconds, executionTimeMetric.getMinimum()); + assertEquals(timeInSeconds, executionTimeMetric.getMaximum()); + assertEquals(timeInSeconds, executionTimeMetric.getAverage()); + assertEquals(1, executionTimeMetric.getNumberOfDataPointsForAverage()); + + // Aggregate submissions containing run executions and aggregated metrics + AggregatedExecution submittedAggregatedMetrics = new AggregatedExecution(); + ExecutionStatusMetric submittedExecutionStatusMetric = new ExecutionStatusMetric().putCountItem(SUCCESSFUL.name(), + new MetricsByStatus() + .executionStatusCount(1) + .executionTime(new ExecutionTimeMetric() + .minimum(2.0) + .maximum(6.0) + .average(4.0) + .numberOfDataPointsForAverage(2))); + submittedAggregatedMetrics.executionStatusCount(submittedExecutionStatusMetric); + executionStatusMetric = EXECUTION_STATUS_AGGREGATOR.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions).aggregatedExecutions(List.of(submittedAggregatedMetrics))); + executionTimeMetric = executionStatusMetric.get().getCount().get(SUCCESSFUL.name()).getExecutionTime(); + assertNotNull(executionTimeMetric); + assertEquals(2.0, executionTimeMetric.getMinimum()); + assertEquals(10.0, executionTimeMetric.getMaximum()); + assertEquals(6, executionTimeMetric.getAverage()); + assertEquals(3, executionTimeMetric.getNumberOfDataPointsForAverage()); + + // Aggregate submissions containing workflow run executions and task executions + // Submit a single workflow execution that took 10s and a single task that took 10s + executions = List.of(new RunExecution().executionStatus(SUCCESSFUL).executionTime(String.format("PT%dS", timeInSeconds))); + executionStatusMetric = EXECUTION_STATUS_AGGREGATOR.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions).taskExecutions(List.of(new TaskExecutions().taskExecutions(executions)))); + executionTimeMetric = executionStatusMetric.get().getCount().get(SUCCESSFUL.name()).getExecutionTime(); + assertNotNull(executionTimeMetric); + assertEquals(timeInSeconds, executionTimeMetric.getMinimum()); + assertEquals(timeInSeconds, executionTimeMetric.getMaximum()); + assertEquals(timeInSeconds, executionTimeMetric.getAverage()); + assertEquals(2, executionTimeMetric.getNumberOfDataPointsForAverage()); // There should be 2 data points: 1 for the workflow execution and 1 for the list of tasks + // Submit a single workflow execution that took 10s and two tasks that took 10 seconds. This time, dateExecuted is provided + RunExecution task1 = new RunExecution().executionStatus(SUCCESSFUL).executionTime(String.format("PT%dS", timeInSeconds)); + RunExecution task2 = new RunExecution().executionStatus(SUCCESSFUL).executionTime(String.format("PT%dS", timeInSeconds)); + // The time difference between these two tasks is 10 seconds. When there is more than one task, the duration will be calculated from the dates executed, plus the duration of the last task, which is 10s + task1.setDateExecuted("2023-11-09T21:54:10.571285905Z"); + task2.setDateExecuted("2023-11-09T21:54:20.571285905Z"); + executionStatusMetric = EXECUTION_STATUS_AGGREGATOR.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions).taskExecutions(List.of(new TaskExecutions().taskExecutions(List.of(task1, task2))))); + executionTimeMetric = executionStatusMetric.get().getCount().get(SUCCESSFUL.name()).getExecutionTime(); + assertNotNull(executionTimeMetric); + assertEquals(10, executionTimeMetric.getMinimum(), "The minimum is from the workflow execution"); + assertEquals(20, executionTimeMetric.getMaximum(), "The maximum is from the workflow execution calculated from the two tasks"); + assertEquals(15, executionTimeMetric.getAverage()); + assertEquals(2, executionTimeMetric.getNumberOfDataPointsForAverage()); // There should be 2 data points: 1 for the workflow execution and 1 for the list of tasks + } + + @Test + void testGetAggregatedCpu() { + List executions = new ArrayList<>(); + + // Add an execution that doesn't have cpu data + executions.add(new RunExecution().executionStatus(SUCCESSFUL)); + Optional executionStatusMetric = EXECUTION_STATUS_AGGREGATOR.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions)); + CpuMetric cpuMetric = executionStatusMetric.get().getCount().get(SUCCESSFUL.name()).getCpu(); + assertNull(cpuMetric); + + // Add an execution with cpu data + final int cpu = 1; + executions.add(new RunExecution().executionStatus(SUCCESSFUL).cpuRequirements(cpu)); + executionStatusMetric = EXECUTION_STATUS_AGGREGATOR.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions)); + cpuMetric = executionStatusMetric.get().getCount().get(SUCCESSFUL.name()).getCpu(); + assertNotNull(cpuMetric); + assertEquals(cpu, cpuMetric.getMinimum()); + assertEquals(cpu, cpuMetric.getMaximum()); + assertEquals(cpu, cpuMetric.getAverage()); + assertEquals(1, cpuMetric.getNumberOfDataPointsForAverage()); + + // Aggregate submissions containing run executions and aggregated metrics + AggregatedExecution submittedAggregatedMetrics = new AggregatedExecution(); + ExecutionStatusMetric submittedExecutionStatusMetric = new ExecutionStatusMetric().putCountItem(SUCCESSFUL.name(), + new MetricsByStatus() + .executionStatusCount(1) + .cpu(new CpuMetric() + .minimum(2.0) + .maximum(6.0) + .average(4.0) + .numberOfDataPointsForAverage(2))); + submittedAggregatedMetrics.executionStatusCount(submittedExecutionStatusMetric); + executionStatusMetric = EXECUTION_STATUS_AGGREGATOR.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions).aggregatedExecutions(List.of(submittedAggregatedMetrics))); + cpuMetric = executionStatusMetric.get().getCount().get(SUCCESSFUL.name()).getCpu(); + assertNotNull(cpuMetric); + assertEquals(1.0, cpuMetric.getMinimum()); + assertEquals(6.0, cpuMetric.getMaximum()); + assertEquals(3, cpuMetric.getAverage()); + assertEquals(3, cpuMetric.getNumberOfDataPointsForAverage()); + + // Aggregate submissions containing workflow run executions and task executions + executions = List.of(new RunExecution().executionStatus(SUCCESSFUL).cpuRequirements(cpu)); + // Two task executions with different CPU requirements. The workflow execution calculated from these tasks should take the highest cpuRequirement from the tasks + TaskExecutions taskExecutions = new TaskExecutions().taskExecutions(List.of( + new RunExecution().executionStatus(SUCCESSFUL).cpuRequirements(1), + new RunExecution().executionStatus(SUCCESSFUL).cpuRequirements(4))); + executionStatusMetric = EXECUTION_STATUS_AGGREGATOR.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions).taskExecutions(List.of(taskExecutions))); + cpuMetric = executionStatusMetric.get().getCount().get(SUCCESSFUL.name()).getCpu(); + assertNotNull(cpuMetric); + assertEquals(1.0, cpuMetric.getMinimum()); + assertEquals(4.0, cpuMetric.getMaximum()); + assertEquals(2.5, cpuMetric.getAverage()); + assertEquals(2, cpuMetric.getNumberOfDataPointsForAverage()); // Two data points: 1 from the workflow execution and 1 for the list of tasks + } + + @Test + void testGetAggregatedMemory() { + List executions = new ArrayList<>(); + + // Add an execution that doesn't have memory data + executions.add(new RunExecution().executionStatus(SUCCESSFUL)); + Optional executionStatusMetric = EXECUTION_STATUS_AGGREGATOR.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions)); + MemoryMetric memoryMetric = executionStatusMetric.get().getCount().get(SUCCESSFUL.name()).getMemory(); + assertNull(memoryMetric); + + // Add an execution with memory data + Double memoryInGB = 2.0; + executions.add(new RunExecution().executionStatus(SUCCESSFUL).memoryRequirementsGB(memoryInGB)); + executionStatusMetric = EXECUTION_STATUS_AGGREGATOR.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions)); + memoryMetric = executionStatusMetric.get().getCount().get(SUCCESSFUL.name()).getMemory(); + assertNotNull(memoryMetric); + assertEquals(memoryInGB, memoryMetric.getMinimum()); + assertEquals(memoryInGB, memoryMetric.getMaximum()); + assertEquals(memoryInGB, memoryMetric.getAverage()); + assertEquals(1, memoryMetric.getNumberOfDataPointsForAverage()); + + // Aggregate submissions containing run executions and aggregated metrics + AggregatedExecution submittedAggregatedMetrics = new AggregatedExecution(); + ExecutionStatusMetric submittedExecutionStatusMetric = new ExecutionStatusMetric().putCountItem(SUCCESSFUL.name(), + new MetricsByStatus() + .executionStatusCount(1) + .memory(new MemoryMetric() + .minimum(2.0) + .maximum(6.0) + .average(4.0) + .numberOfDataPointsForAverage(2))); + submittedAggregatedMetrics.executionStatusCount(submittedExecutionStatusMetric); + executionStatusMetric = EXECUTION_STATUS_AGGREGATOR.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions).aggregatedExecutions(List.of(submittedAggregatedMetrics))); + memoryMetric = executionStatusMetric.get().getCount().get(SUCCESSFUL.name()).getMemory(); + assertNotNull(memoryMetric); + assertEquals(2.0, memoryMetric.getMinimum()); + assertEquals(6.0, memoryMetric.getMaximum()); + assertEquals(3.333333333333333, memoryMetric.getAverage()); + assertEquals(3, memoryMetric.getNumberOfDataPointsForAverage()); + + // Aggregate submissions containing workflow run executions and task executions + executions = List.of(new RunExecution().executionStatus(SUCCESSFUL).memoryRequirementsGB(2.0)); + // Two task executions with different memory requirements. The workflow execution calculated from these tasks should take the highest memoryRequirementsGB from the tasks + TaskExecutions taskExecutions = new TaskExecutions().taskExecutions(List.of( + new RunExecution().executionStatus(SUCCESSFUL).memoryRequirementsGB(2.0), + new RunExecution().executionStatus(SUCCESSFUL).memoryRequirementsGB(4.0))); + executionStatusMetric = EXECUTION_STATUS_AGGREGATOR.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions).taskExecutions(List.of(taskExecutions))); + memoryMetric = executionStatusMetric.get().getCount().get(SUCCESSFUL.name()).getMemory(); + assertNotNull(memoryMetric); + assertEquals(2.0, memoryMetric.getMinimum()); + assertEquals(4.0, memoryMetric.getMaximum()); + assertEquals(3.0, memoryMetric.getAverage()); + assertEquals(2, memoryMetric.getNumberOfDataPointsForAverage()); // Two data points: 1 from the workflow execution and 1 for the list of tasks + } + + @Test + void testGetAggregatedCost() { + List executions = new ArrayList<>(); + + // Add an execution that doesn't have cost data + executions.add(new RunExecution().executionStatus(SUCCESSFUL)); + Optional executionStatusMetric = EXECUTION_STATUS_AGGREGATOR.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions)); + assertTrue(executionStatusMetric.isPresent()); + CostMetric costMetric = executionStatusMetric.get().getCount().get(SUCCESSFUL.name()).getCost(); + assertNull(costMetric); + + // Add an execution with cost data + Double costInUSD = 2.00; + executions.add(new RunExecution().executionStatus(SUCCESSFUL).cost(new Cost().value(costInUSD))); + executionStatusMetric = EXECUTION_STATUS_AGGREGATOR.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions)); + costMetric = executionStatusMetric.get().getCount().get(SUCCESSFUL.name()).getCost(); + assertNotNull(costMetric); + assertEquals(costInUSD, costMetric.getMinimum()); + assertEquals(costInUSD, costMetric.getMaximum()); + assertEquals(costInUSD, costMetric.getAverage()); + assertEquals(1, costMetric.getNumberOfDataPointsForAverage()); + + // Aggregate submissions containing run executions and aggregated metrics + AggregatedExecution submittedAggregatedMetrics = new AggregatedExecution(); + ExecutionStatusMetric submittedExecutionStatusMetric = new ExecutionStatusMetric().putCountItem(SUCCESSFUL.name(), + new MetricsByStatus() + .executionStatusCount(1) + .cost(new CostMetric() + .minimum(2.0) + .maximum(6.0) + .average(4.0) + .numberOfDataPointsForAverage(2))); + submittedAggregatedMetrics.executionStatusCount(submittedExecutionStatusMetric); + executionStatusMetric = EXECUTION_STATUS_AGGREGATOR.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions).aggregatedExecutions(List.of(submittedAggregatedMetrics))); + costMetric = executionStatusMetric.get().getCount().get(SUCCESSFUL.name()).getCost(); + assertNotNull(costMetric); + assertEquals(2.0, costMetric.getMinimum()); + assertEquals(6.0, costMetric.getMaximum()); + assertEquals(3.333333333333333, costMetric.getAverage()); + assertEquals(3, costMetric.getNumberOfDataPointsForAverage()); + + // Aggregate submissions containing workflow run executions and task executions + executions = List.of(new RunExecution().executionStatus(SUCCESSFUL).cost(new Cost().value(2.00))); + // Two task executions with different memory requirements. The workflow execution calculated from these tasks should have the sum of the cost of all tasks + TaskExecutions taskExecutions = new TaskExecutions().taskExecutions(List.of( + new RunExecution().executionStatus(SUCCESSFUL).cost(new Cost().value(2.00)), + new RunExecution().executionStatus(SUCCESSFUL).cost(new Cost().value(4.00)) + )); + executionStatusMetric = EXECUTION_STATUS_AGGREGATOR.getAggregatedMetricFromAllSubmissions(new ExecutionsRequestBody().runExecutions(executions).taskExecutions(List.of(taskExecutions))); + costMetric = executionStatusMetric.get().getCount().get(SUCCESSFUL.name()).getCost(); + assertNotNull(costMetric); + assertEquals(2.0, costMetric.getMinimum()); + assertEquals(6.0, costMetric.getMaximum()); // The max is the cost of the two tasks summed together + assertEquals(4.0, costMetric.getAverage()); + assertEquals(2, costMetric.getNumberOfDataPointsForAverage()); // Two data points: 1 from the workflow execution and 1 for the list of tasks } } diff --git a/pom.xml b/pom.xml index 79e132cd..ff6581a8 100644 --- a/pom.xml +++ b/pom.xml @@ -38,7 +38,7 @@ scm:git:git@github.com:dockstore/dockstore-support.git UTF-8 - 1.15.0-rc.0 + 1.15.0-rc.2 3.0.0-M5 2.22.2 false