diff --git a/argus/backend/controller/view_api.py b/argus/backend/controller/view_api.py
index cd9cc139..d1dca588 100644
--- a/argus/backend/controller/view_api.py
+++ b/argus/backend/controller/view_api.py
@@ -6,6 +6,7 @@
request,
)
from argus.backend.controller.views_widgets.highlights import bp as highlights_bp
+from argus.backend.controller.views_widgets.summary import bp as summary_bp
from argus.backend.error_handlers import handle_api_exception
from argus.backend.models.web import User
from argus.backend.service.stats import ViewStatsCollector
@@ -16,6 +17,7 @@
bp = Blueprint('view_api', __name__, url_prefix='/views')
LOGGER = logging.getLogger(__name__)
bp.register_blueprint(highlights_bp)
+bp.register_blueprint(summary_bp)
bp.register_error_handler(Exception, handle_api_exception)
@@ -161,4 +163,4 @@ def view_resolve(view_id: str):
return {
"status": "ok",
"response": res
- }
\ No newline at end of file
+ }
diff --git a/argus/backend/controller/views_widgets/summary.py b/argus/backend/controller/views_widgets/summary.py
new file mode 100644
index 00000000..86beed1d
--- /dev/null
+++ b/argus/backend/controller/views_widgets/summary.py
@@ -0,0 +1,42 @@
+from uuid import UUID
+
+from flask import Blueprint, request, g
+
+from argus.backend.models.web import ArgusUserView
+from argus.backend.service.results_service import ResultsService
+from argus.backend.service.user import api_login_required
+from argus.backend.util.common import get_payload
+
+bp = Blueprint("summary", __name__, url_prefix="/widgets")
+
+
+@bp.route("/summary/versioned_runs", methods=["GET"])
+@api_login_required
+def get_versioned_runs():
+ view_id = UUID(request.args.get("view_id"))
+ view: ArgusUserView = ArgusUserView.get(id=view_id)
+ service = ResultsService()
+ versioned_runs = service.get_tests_by_version("scylla-server", view.tests)
+ return {
+ "status": "ok",
+ "response": versioned_runs,
+ }
+
+@bp.route("/summary/runs_results", methods=["POST"])
+@api_login_required
+def get_runs_results():
+ versioned_runs = get_payload(request)
+ service = ResultsService()
+ response = {}
+ for test_id, test_methods in versioned_runs.items():
+ response[test_id] = {}
+ for method, run in test_methods.items():
+ response[test_id][method] = {}
+ run_id = run['run_id']
+ response[test_id][method][run_id] = service.get_run_results(UUID(test_id), UUID(run_id), key_metrics=[
+ "P99 read", "P99 write", "duration", "Throughput write", "Throughput read", "allocs_per_op",
+ "cpu_cycles_per_op", "instructions_per_op", "logallocs_per_op"])
+ return {
+ "status": "ok",
+ "response": response,
+ }
diff --git a/argus/backend/service/results_service.py b/argus/backend/service/results_service.py
index 029efa9a..a3f2b348 100644
--- a/argus/backend/service/results_service.py
+++ b/argus/backend/service/results_service.py
@@ -108,33 +108,33 @@ class RunsDetails:
}
colors = [
- 'rgba(220, 53, 69, 1.0)', # Soft Red
- 'rgba(40, 167, 69, 1.0)', # Soft Green
- 'rgba(0, 123, 255, 1.0)', # Soft Blue
- 'rgba(23, 162, 184, 1.0)', # Soft Cyan
+ 'rgba(220, 53, 69, 1.0)', # Soft Red
+ 'rgba(40, 167, 69, 1.0)', # Soft Green
+ 'rgba(0, 123, 255, 1.0)', # Soft Blue
+ 'rgba(23, 162, 184, 1.0)', # Soft Cyan
'rgba(108, 117, 125, 1.0)', # Soft Magenta
- 'rgba(255, 193, 7, 1.0)', # Soft Yellow
- 'rgba(255, 133, 27, 1.0)', # Soft Orange
- 'rgba(102, 16, 242, 1.0)', # Soft Purple
+ 'rgba(255, 193, 7, 1.0)', # Soft Yellow
+ 'rgba(255, 133, 27, 1.0)', # Soft Orange
+ 'rgba(102, 16, 242, 1.0)', # Soft Purple
'rgba(111, 207, 151, 1.0)', # Soft Lime
'rgba(255, 182, 193, 1.0)', # Soft Pink
- 'rgba(32, 201, 151, 1.0)', # Soft Teal
- 'rgba(134, 83, 78, 1.0)', # Soft Brown
- 'rgba(0, 84, 153, 1.0)', # Soft Navy
- 'rgba(128, 128, 0, 1.0)', # Soft Olive
- 'rgba(255, 159, 80, 1.0)' # Soft Coral
+ 'rgba(32, 201, 151, 1.0)', # Soft Teal
+ 'rgba(134, 83, 78, 1.0)', # Soft Brown
+ 'rgba(0, 84, 153, 1.0)', # Soft Navy
+ 'rgba(128, 128, 0, 1.0)', # Soft Olive
+ 'rgba(255, 159, 80, 1.0)' # Soft Coral
]
shapes = ["circle", "triangle", "rect", "star", "dash", "crossRot", "line"]
def get_sorted_data_for_column_and_row(data: List[ArgusGenericResultData], column: str, row: str,
runs_details: RunsDetails, main_package: str) -> List[Dict[str, Any]]:
- points = sorted([{"x": entry.sut_timestamp.strftime('%Y-%m-%dT%H:%M:%SZ'),
- "y": entry.value,
- "id": entry.run_id,
- }
- for entry in data if entry.column == column and entry.row == row],
- key=lambda point: point["x"])
+ points = sorted([{"x": entry.sut_timestamp.strftime('%Y-%m-%dT%H:%M:%SZ'),
+ "y": entry.value,
+ "id": entry.run_id,
+ }
+ for entry in data if entry.column == column and entry.row == row],
+ key=lambda point: point["x"])
if not points:
return points
packages = runs_details.packages
@@ -159,6 +159,7 @@ def get_sorted_data_for_column_and_row(data: List[ArgusGenericResultData], colum
prev_versions = current_versions
return points
+
def get_min_max_y(datasets: List[Dict[str, Any]]) -> (float, float):
"""0.5 - 1.5 of min/max of 50% results"""
y = [entry['y'] for dataset in datasets for entry in dataset['data']]
@@ -216,7 +217,7 @@ def calculate_limits(points: List[dict], best_results: List, validation_rules_li
def create_datasets_for_column(table: ArgusGenericResultMetadata, data: list[ArgusGenericResultData],
best_results: dict[str, List[BestResult]], releases_map: ReleasesMap, column: ColumnMetadata,
- runs_details: RunsDetails, main_package:str) -> List[Dict]:
+ runs_details: RunsDetails, main_package: str) -> List[Dict]:
"""
Create datasets (series) for a specific column, splitting by version and showing limit lines.
"""
@@ -325,7 +326,6 @@ def calculate_graph_ticks(graphs: List[Dict]) -> dict[str, str]:
return {"min": min_x[:10], "max": max_x[:10]}
-
def _identify_most_changed_package(packages_list: list[PackageVersion]) -> str:
version_date_changes: dict[str, set[tuple[str, str]]] = defaultdict(set)
@@ -420,27 +420,73 @@ def get_table_metadata(self, test_id: UUID, table_name: str) -> ArgusGenericResu
table_meta = self.cluster.session.execute(query=query, parameters=(test_id, table_name))
return [ArgusGenericResultMetadata(**table) for table in table_meta][0] if table_meta else None
- def get_run_results(self, test_id: UUID, run_id: UUID) -> list[dict]:
+ def get_run_results(self, test_id: UUID, run_id: UUID, key_metrics: list[str] | None = None) -> list:
query_fields = ["column", "row", "value", "value_text", "status"]
- raw_query = (f"SELECT {','.join(query_fields)},WRITETIME(status) as ordering"
- f" FROM generic_result_data_v1 WHERE test_id = ? AND run_id = ? AND name = ?")
+ raw_query = (f"SELECT {','.join(query_fields)}, WRITETIME(status) as ordering "
+ f"FROM generic_result_data_v1 WHERE test_id = ? AND run_id = ? AND name = ?")
query = self.cluster.prepare(raw_query)
tables_meta = self._get_tables_metadata(test_id=test_id)
- tables = []
+ table_entries = []
for table in tables_meta:
cells = self.cluster.session.execute(query=query, parameters=(test_id, run_id, table.name))
+ cells = [dict(cell.items()) for cell in cells]
+ if key_metrics:
+ cells = [cell for cell in cells if cell['column'] in key_metrics]
if not cells:
continue
- cells = [dict(cell.items()) for cell in cells]
- tables.append({'meta': {
- 'name': table.name,
- 'description': table.description,
- 'columns_meta': table.columns_meta,
- 'rows_meta': table.rows_meta,
- },
- 'cells': [{k: v for k, v in cell.items() if k in query_fields} for cell in cells],
- 'order': min([cell['ordering'] for cell in cells] or [0])})
- return sorted(tables, key=lambda x: x['order'])
+
+ table_name = table.name
+ table_description = table.description
+ column_types_map = {col_meta.name: col_meta.type for col_meta in table.columns_meta}
+ column_names = [col_meta.name for col_meta in table.columns_meta]
+
+ table_data = {
+ 'description': table_description,
+ 'table_data': {},
+ 'columns': [],
+ 'rows': [],
+ 'table_status': 'PASS',
+ }
+
+ present_columns = {cell['column'] for cell in cells}
+ present_rows = {cell['row'] for cell in cells}
+
+ # Filter columns and rows based on the presence in cells
+ table_data['columns'] = [
+ col_meta for col_meta in table.columns_meta if col_meta.name in present_columns
+ ]
+ table_data['rows'] = [
+ row for row in table.rows_meta if row in present_rows
+ ]
+
+ for row in table_data['rows']:
+ table_data['table_data'][row] = {}
+
+ for cell in cells:
+ column = cell['column']
+ row = cell['row']
+ value = cell.get('value') or cell.get('value_text')
+ status = cell['status']
+
+ if column in column_names and row in table_data['rows']:
+ table_data['table_data'][row][column] = {
+ 'value': value,
+ 'status': status,
+ 'type': column_types_map.get(column)
+ }
+
+ if status not in ["UNSET", "PASS"] and table_data['table_status'] != "ERROR":
+ table_data['table_status'] = status
+
+ table_entries.append({
+ 'table_name': table_name,
+ 'table_data': table_data,
+ 'ordering': cells[0]['ordering']
+ })
+
+ table_entries.sort(key=lambda x: x['ordering'])
+
+ return [{entry['table_name']: entry['table_data']} for entry in table_entries]
def get_test_graphs(self, test_id: UUID, start_date: datetime | None = None, end_date: datetime | None = None):
runs_details = self._get_runs_details(test_id)
@@ -455,7 +501,8 @@ def get_test_graphs(self, test_id: UUID, start_date: datetime | None = None, end
best_results = self.get_best_results(test_id=test_id, name=table.name)
main_package = _identify_most_changed_package([pkg for sublist in runs_details.packages.values() for pkg in sublist])
releases_map = _split_results_by_release(runs_details.packages, main_package=main_package)
- graphs.extend(create_chartjs(table, data, best_results, releases_map=releases_map, runs_details=runs_details, main_package=main_package))
+ graphs.extend(
+ create_chartjs(table, data, best_results, releases_map=releases_map, runs_details=runs_details, main_package=main_package))
releases_filters.update(releases_map.keys())
ticks = calculate_graph_ticks(graphs)
return graphs, ticks, list(releases_filters)
@@ -499,3 +546,67 @@ def update_best_results(self, test_id: UUID, table_name: str, cells: list[Cell],
ArgusBestResultData(test_id=test_id, name=table_name, key=key, value=cell.value, result_date=result_date,
run_id=run_id).save()
return best_results
+
+ def _exclude_disabled_tests(self, test_ids: list[UUID]) -> list[UUID]:
+ is_enabled_query = self.cluster.prepare("SELECT id, enabled FROM argus_test_v2 WHERE id = ?")
+ return [test_id for test_id in test_ids if self.cluster.session.execute(is_enabled_query, parameters=(test_id,)).one()['enabled']]
+
+ def get_tests_by_version(self, sut_package_name: str, test_ids: list[UUID]) -> dict:
+ """
+ Get the latest run details for each test method, excluding ignored runs.
+ Returns:
+ {
+ 'versions': {version: {test_id: {test_method: {'run_id': run_id, 'status': status}}}},
+ 'test_info': {test_id: {'name': test_name, 'build_id': build_id}}
+ }
+ Currently works only with scylla-cluster-tests plugin (due to test_method field requirement)
+ """
+ plugin = TestRunService().get_plugin("scylla-cluster-tests")
+ result = defaultdict(lambda: defaultdict(dict))
+ test_info = {}
+ test_ids = self._exclude_disabled_tests(test_ids)
+ for test_id in test_ids:
+ runs_details_query = self.cluster.prepare(
+ f"""
+ SELECT id, status, investigation_status, test_name, build_id, packages, test_method, started_by
+ FROM {plugin.model.table_name()}
+ WHERE test_id = ? LIMIT 10
+ """
+ )
+ rows = self.cluster.session.execute(runs_details_query, parameters=(test_id,)).all()
+ for row in rows:
+ if row["investigation_status"].lower() == "ignored":
+ continue
+ packages = row['packages']
+ test_method = row['test_method']
+ if not test_method:
+ continue
+ sut_version = next(
+ (f"{pkg.version}-{pkg.date}-{pkg.revision_id}" for pkg in packages if pkg.name == f"{sut_package_name}-upgraded"),
+ None
+ ) or next(
+ (f"{pkg.version}-{pkg.date}-{pkg.revision_id}" for pkg in packages if pkg.name.startswith(sut_package_name)),
+ None
+ )
+
+ if sut_version is None:
+ continue
+ method_name = test_method.rsplit('.', 1)[-1]
+
+ if method_name not in result[sut_version][str(test_id)]:
+ result[sut_version][str(test_id)][method_name] = {
+ 'run_id': str(row['id']),
+ 'status': row['status'],
+ 'started_by': row['started_by']
+ }
+
+ if str(test_id) not in test_info:
+ test_info[str(test_id)] = {
+ 'name': row['test_name'],
+ 'build_id': row['build_id']
+ }
+
+ return {
+ 'versions': {version: dict(tests) for version, tests in result.items()},
+ 'test_info': test_info
+ }
diff --git a/argus/backend/tests/results_service/test_results_service.py b/argus/backend/tests/results_service/test_results_service.py
index ac518401..c451f77b 100644
--- a/argus/backend/tests/results_service/test_results_service.py
+++ b/argus/backend/tests/results_service/test_results_service.py
@@ -3,8 +3,11 @@
import pytest
from argus.backend.models.result import ArgusGenericResultMetadata, ArgusGenericResultData, ColumnMetadata
+from argus.backend.plugins.sct.testrun import SCTTestRun
+from argus.backend.plugins.sct.udt import PackageVersion
from argus.backend.service.results_service import ResultsService
+
@pytest.fixture
def setup_data(argus_db):
test_id = uuid4()
@@ -70,6 +73,7 @@ def test_results_service_should_return_results_within_date_range(setup_data):
assert len(filtered_data) == 1
assert filtered_data[0].value == 150.0
+
def test_results_service_should_return_no_results_outside_date_range(setup_data):
test_id, table, data = setup_data
service = ResultsService()
@@ -87,6 +91,7 @@ def test_results_service_should_return_no_results_outside_date_range(setup_data)
assert len(filtered_data) == 0
+
def test_results_service_should_return_all_results_with_no_date_range(setup_data):
test_id, table, data = setup_data
service = ResultsService()
@@ -98,3 +103,70 @@ def test_results_service_should_return_all_results_with_no_date_range(setup_data
)
assert len(filtered_data) == 3
+
+
+def test_get_tests_by_version_groups_runs_correctly(argus_db):
+ test_id1 = uuid4()
+ test_id2 = uuid4()
+ run_id1 = uuid4()
+ run_id2 = uuid4()
+ run_id3 = uuid4()
+ run_id4 = uuid4()
+ pkg_v4_0 = PackageVersion(name='scylla', version='4.0', date='2021-01-01', revision_id='', build_id='')
+ pkg_v4_1 = PackageVersion(name='scylla', version='4.1', date='2021-02-01', revision_id='', build_id='')
+
+ SCTTestRun(
+ id=run_id1,
+ build_id='build_id1',
+ test_id=test_id1,
+ test_method='test_method1', # Changed to 'test_method'
+ investigation_status='',
+ packages=[pkg_v4_0]
+ ).save()
+ SCTTestRun(
+ id=run_id2,
+ build_id='build_id1',
+ test_id=test_id1,
+ test_method='test_method2', # Changed to 'test_method'
+ investigation_status='ignored',
+ packages=[pkg_v4_0]
+ ).save()
+ SCTTestRun(
+ id=run_id3,
+ build_id='build_id1',
+ test_id=test_id2,
+ test_method='test_method1', # Changed to 'test_method'
+ investigation_status='',
+ packages=[pkg_v4_0]
+ ).save()
+ SCTTestRun(
+ id=run_id4,
+ build_id='build_id1',
+ test_id=test_id2,
+ test_method='test_method1', # Changed to 'test_method'
+ investigation_status='',
+ packages=[pkg_v4_1]
+ ).save()
+
+ sut_package_name = 'scylla'
+ test_ids = [test_id1, test_id2]
+ service = ResultsService()
+ service._exclude_disabled_tests = lambda x: x
+ result = service.get_tests_by_version(sut_package_name, test_ids)
+
+ expected_result = {'test_info': {str(test_id1): {'build_id': 'build_id1',
+ 'name': None},
+ str(test_id2): {'build_id': 'build_id1',
+ 'name': None}},
+ 'versions': {'4.0-2021-01-01-': {
+ str(test_id1): {'test_method1': {'run_id': str(run_id1),
+ 'started_by': None,
+ 'status': 'created'}},
+ str(test_id2): {'test_method1': {'run_id': str(run_id3),
+ 'started_by': None,
+ 'status': 'created'}}},
+ '4.1-2021-02-01-': {str(test_id2): {
+ 'test_method1': {'run_id': str(run_id4),
+ 'started_by': None,
+ 'status': 'created'}}}}}
+ assert result == expected_result
diff --git a/argus/backend/tests/results_service/test_validation_rules.py b/argus/backend/tests/results_service/test_validation_rules.py
index 6400bce3..6d1144b5 100644
--- a/argus/backend/tests/results_service/test_validation_rules.py
+++ b/argus/backend/tests/results_service/test_validation_rules.py
@@ -37,15 +37,18 @@ class SampleCell:
def results_to_dict(results):
- return {
- cell['column']: {
- cell['row']: {
- 'value': cell['value'] if cell['value'] is not None else cell['value_text'],
- 'status': cell['status']
+ actual_cells = {}
+ table_data = results['Test Table Name']['table_data']
+
+ for row_key, row_data in table_data.items():
+ for col_name, col_data in row_data.items():
+ if col_name not in actual_cells:
+ actual_cells[col_name] = {}
+ actual_cells[col_name][row_key] = {
+ 'value': col_data['value'],
+ 'status': col_data['status']
}
- }
- for cell in results['cells']
- }
+ return actual_cells
def test_can_track_validation_rules_changes(fake_test, client_service, results_service, release, group):
diff --git a/frontend/Common/ViewTypes.js b/frontend/Common/ViewTypes.js
index 7561ddbb..f0fcd042 100644
--- a/frontend/Common/ViewTypes.js
+++ b/frontend/Common/ViewTypes.js
@@ -9,6 +9,7 @@ import {TestStatus} from "./TestStatus";
import {subUnderscores, titleCase} from "./TextUtils";
import ViewHighlights from "../Views/Widgets/ViewHighlights/ViewHighlights.svelte";
import IntegerValue from "../Views/WidgetSettingTypes/IntegerValue.svelte";
+import SummaryWidget from "../Views/Widgets/SummaryWidget/SummaryWidget.svelte";
export class Widget {
constructor(position = -1, type = "testDashboard", settings = {}) {
@@ -107,6 +108,18 @@ export const WIDGET_TYPES = {
}
},
},
+ summary: {
+ type: SummaryWidget,
+ friendlyName: "Per version summary for specified release",
+ settingDefinitions: {
+ packageName: {
+ type: StringValue,
+ default: "scylla-server",
+ help: "Package name (from Packages tab) to monitor",
+ displayName: "Package Name"
+ }
+ },
+ },
};
diff --git a/frontend/Github/GithubIssues.svelte b/frontend/Github/GithubIssues.svelte
index d860720e..c7fc165c 100644
--- a/frontend/Github/GithubIssues.svelte
+++ b/frontend/Github/GithubIssues.svelte
@@ -4,12 +4,10 @@
import { newIssueDestinations } from "../Common/IssueDestinations";
import GithubIssue from "./GithubIssue.svelte";
import { sendMessage } from "../Stores/AlertStore";
- import { faChevronDown, faChevronUp, faClipboard, faCopy } from "@fortawesome/free-solid-svg-icons";
+ import { faChevronDown, faChevronUp, faCopy } from "@fortawesome/free-solid-svg-icons";
import Fa from "svelte-fa";
import Color from "color";
- import ModalWindow from "../Common/ModalWindow.svelte";
- import { titleCase } from "../Common/TextUtils";
- import { faHtml5, faMarkdown } from "@fortawesome/free-brands-svg-icons";
+ import GithubIssuesCopyModal from "./GithubIssuesCopyModal.svelte";
export let id = "";
export let testId;
export let pluginName;
@@ -47,7 +45,6 @@
let currentPage = 0;
let PAGE_SIZE = 10;
let filterString = "";
- let issueCopy = false;
let availableLabels = [];
let selectedLabels = [];
const stateFilter = {
@@ -197,92 +194,12 @@
$: sortedIssues = paginateIssues(issues, sortCriteria, reverseSort, filterString, selectedLabels, stateFilter, PAGE_SIZE);
- const copyIssueTableAsMarkdown = function() {
- const issues = sortedIssues[currentPage] ?? [];
- let issueFormattedList = issues
- .sort((a, b) => a.issue_number - b.issue_number)
- .map(val => `|${val.state ? `${val.state.state.toUpperCase()} ` : " "}|${val.url}|${ val.state ? val.state.labels.map(v => v.name).join("\t") : ""}|${val.state && val.state.assignee ? val.state.assignee.login : ""}|`);
- navigator.clipboard.writeText(`Current Issues ${selectedLabels.length > 0 ? selectedLabels.map(label => `[${label.name}]`).join(" ") : ""}\n|State|Issue|Tags|Assignee|\n|---|---|---|---|\n${issueFormattedList.join("\n")}`);
- };
-
- const copyIssueTableAsText = function() {
- const issues = sortedIssues[currentPage] ?? [];
-
- const lines = issues.map(i => ` * ${i.url} (${i?.state?.state ?? ""}) [${i?.state?.assignee?.login ?? "Nobody"}]`);
- navigator.clipboard.writeText(`Issues\n${lines.join("\n")}`);
- };
-
- const copyIssueTableAsHTML = async function() {
- const table = document.querySelector("div#modalTableIssueView");
-
- const data = table.innerHTML;
- // Baseline: June 2024
- // eslint-disable-next-line no-undef
- const clipboardItem = new ClipboardItem({
- "text/html": new Blob([data], { type: "text/html" }),
- "text/plain": new Blob([data], { type: "text/plain" })
- });
-
- await navigator.clipboard.write([clipboardItem]);
- };
onMount(() => {
fetchIssues();
});
-{#if issueCopy}
-
-
-
- State
- Issue
- Title
- Tags
- Assignee
-
-
- {#each sortedIssues[currentPage] ?? [] as issue (issue.id)}
-
-
- {/each}
-
- {titleCase(issue?.state?.state ?? "")}
-
- {issue.owner}/{issue.repo}#{issue.issue_number}
-
-
- {issue.title}
-
-
- {#if issue.state}
- {#each issue.state.labels as label}
- {label.name}
-
- {/each}
- {/if}
-
- {#if issue.state.assignee}
- @{issue.state.assignee.login}
- {:else}
- None
- {/if}
-
-
State | +Issue | +Title | +Tags | +Assignee | + + + {#each sortedIssues[currentPage] ?? [] as issue (issue.id)} +
---|---|---|---|---|
{titleCase(issue?.state?.state ?? issue?.last_status + " (?)")} | ++ {issue.owner}/{issue.repo}#{issue.issue_number} + | ++ {issue.title} + | +
+ {#if issue.state}
+ {#each issue.state.labels as label}
+ {label.name} + {/each} + {:else} + Unknown + {/if} + |
+ + {#if issue.state} + {#if issue.state.assignee} + @{issue.state.assignee.login} + {:else} + None + {/if} + {:else} + Unknown + {/if} + | +
{result.description}
+ {/if} ++ + | + {#each result.columns as col} +{col.name} {col.unit ? `[${col.unit}]` : ''} | + {/each} +
---|---|
{row} | + {#each result.columns as col} + {#key result.table_data[row][col.name]} +
+ |
+ {/key}
+ {/each}
+