", unsafe_allow_html=True)
- _, button_column = st.columns([0.2, 0.8])
- with button_column:
- run_now_commands_tab, cli_commands_tab = st.tabs(["Test Suite Actions", "View CLI Commands"])
-
- with cli_commands_tab:
- if st.button(
- "Test Generation Command",
- help="Shows the run-test-generation CLI command",
- use_container_width=True,
- ):
- generate_tests_cli_dialog(selected)
-
- if st.button(
- "Test Execution Command",
- help="Shows the run-tests CLI command",
- use_container_width=True,
- ):
- run_tests_cli_dialog(project_code, selected)
-
- if st.button(
- "Observability Export Command",
- help="Shows the export-observability CLI command",
- use_container_width=True,
- ):
- observability_export_cli_dialog(selected)
-
- with run_now_commands_tab:
- if st.button("Run Test Generation", help="Run Test Generation", use_container_width=True):
- generate_tests_dialog(selected)
-
- if st.button("Run Test Execution", help="Run the tests", use_container_width=True):
- run_tests_dialog(project_code, selected)
-
- if st.button(
- "Run Observability Export",
- help="Exports test results to Observability for the current Test Suite",
- use_container_width=True,
- ):
- observability_export_dialog(selected)
-
-
-@st.dialog(title="Generate Tests")
-def generate_tests_dialog(selected_test_suite):
- container = st.empty()
- with container:
- st.markdown(":green[**Execute Test Generation for the Test Suite**]")
-
- warning_container = st.container()
- options_container = st.container()
- button_container = st.empty()
- status_container = st.empty()
-
- test_ct, unlocked_test_ct, unlocked_edits_ct = test_suite_service.get_test_suite_refresh_warning(
- selected_test_suite["id"]
- )
- if test_ct:
- warning_msg = ""
- counts_msg = f"\n\nAuto-Generated Tests: {test_ct}, Unlocked: {unlocked_test_ct}, Edited Unlocked: {unlocked_edits_ct}"
- if unlocked_edits_ct > 0:
- if unlocked_edits_ct > 1:
- warning_msg = "Manual changes have been made to auto-generated tests in this Test Suite that have not been locked. "
- else:
- warning_msg = "A manual change has been made to an auto-generated test in this Test Suite that has not been locked. "
- elif unlocked_test_ct > 0:
- warning_msg = "Auto-generated tests are present in this Test Suite that have not been locked. "
- warning_msg = f"{warning_msg}Generating tests now will overwrite unlocked tests subject to auto-generation based on the latest profiling.{counts_msg}"
- with warning_container:
- st.warning(warning_msg)
- if unlocked_edits_ct > 0:
- lock_edits_button = st.button("Lock Edited Tests")
- if lock_edits_button:
- edits_locked = test_suite_service.lock_edited_tests(selected_test_suite["id"])
- if edits_locked:
- st.info("Edited tests have been successfully locked.")
-
- with options_container:
- lst_generation_sets = test_suite_service.get_generation_set_choices()
- if lst_generation_sets:
- lst_generation_sets.insert(0, "(All Test Types)")
- str_generation_set = st.selectbox("Generation Set", lst_generation_sets)
- if str_generation_set == "(All Test Types)":
- str_generation_set = ""
- else:
- str_generation_set = ""
-
- with button_container:
- start_process_button_message = "Start"
- test_generation_button = st.button(start_process_button_message)
-
- if test_generation_button:
- button_container.empty()
-
- table_group_id = selected_test_suite["table_groups_id"]
- test_suite_key = selected_test_suite["test_suite"]
- status_container.info("Executing Test Generation...")
-
- try:
- run_test_gen_queries(table_group_id, test_suite_key, str_generation_set)
- except Exception as e:
- status_container.empty()
- status_container.error(f"Process had errors: {e!s}.")
-
- status_container.empty()
- status_container.success("Process has successfully finished.")
-
+ df = test_suite_service.get_by_table_group(project_code, table_group_id)
-@st.dialog(title="Delete Test Suite")
-def delete_test_suite_dialog(selected):
- selected_test_suite = selected[0]
- test_suite_id = selected_test_suite["id"]
- can_be_deleted = test_suite_service.cascade_delete([test_suite_id], dry_run=True)
+ for _, test_suite in df.iterrows():
+ subtitle = f"{connection['connection_name']} > {table_group['table_groups_name']}"
+ with testgen.card(title=test_suite["test_suite"], subtitle=subtitle) as test_suite_card:
+ with test_suite_card.actions:
+ testgen.button(
+ type="icon",
+ icon="output",
+ tooltip="Export results to observability",
+ tooltip_position="right",
+ on_click=lambda: observability_export_dialog(test_suite),
+ key=f"test_suite:keys:export:{test_suite['id']}",
+ )
+ testgen.button(
+ type="icon",
+ icon="edit",
+ tooltip="Edit test suite",
+ tooltip_position="right",
+ on_click=lambda: edit_test_suite_dialog(project_code, connection, table_group, test_suite),
+ key=f"test_suite:keys:edit:{test_suite['id']}",
+ )
+ testgen.button(
+ type="icon",
+ icon="delete",
+ tooltip="Delete test suite",
+ tooltip_position="right",
+ on_click=lambda: delete_test_suite_dialog(test_suite),
+ key=f"test_suite:keys:delete:{test_suite['id']}",
+ )
+
+ main_section, latest_run_section, actions_section = st.columns([.4, .4, .2])
+
+ with main_section:
+ testgen.link(
+ label=f"{test_suite['test_ct']} tests definitions",
+ href="test-definitions",
+ right_icon="chevron_right",
+ key=f"test_suite:keys:go-to-definitions:{test_suite['id']}",
+ )
+
+ st.html(f"""
+
+
Description
+
{test_suite['test_suite_description']}
+
+ """)
+
+ if (latest_run_start := test_suite['latest_run_start']) and not pd.isnull(latest_run_start):
+ with latest_run_section:
+ st.html('')
+ st.html('
Latest Run
')
+ testgen.link(
+ label=latest_run_start.strftime("%B %d, %H:%M %p"),
+ href="test-runs",
+ right_icon="chevron_right",
+ style="margin-bottom: 8px;",
+ height=29,
+ key=f"test_suite:keys:go-to-runs:{test_suite['id']}",
+ )
+ testgen.summary_bar(
+ items=[
+ { "label": "Passed", "value": int(test_suite["last_run_passed_ct"]), "color": "green" },
+ { "label": "Warnings", "value": int(test_suite["last_run_warning_ct"]), "color": "yellow" },
+ { "label": "Failed", "value": int(test_suite["last_run_failed_ct"]), "color": "red" },
+ { "label": "Errors", "value": int(test_suite["last_run_error_ct"]), "color": "grey" },
+ ],
+ height=30,
+ width=100,
+ key=f"test_suite:keys:run-rummary:{test_suite['id']}",
+ )
+
+ with actions_section:
+ testgen.button(
+ type="stroked",
+ label="Run Tests",
+ on_click=lambda: run_tests_dialog(project_code, test_suite),
+ key=f"test_suite:keys:runtests:{test_suite['id']}",
+ )
+ testgen.button(
+ type="stroked",
+ label="Generate Tests",
+ on_click=lambda: generate_tests_dialog(test_suite),
+ key=f"test_suite:keys:generatetests:{test_suite['id']}",
+ )
- fm.render_html_list(
- selected_test_suite,
- [
- "id",
- "test_suite",
- "test_suite_description",
- ],
- "Test Suite Information",
- int_data_width=700,
- )
- if not can_be_deleted:
- st.markdown(
- ":orange[This Test Suite has related data, which includes test definitions and may include test results. If you proceed, all related data will be permanently deleted. Are you sure you want to proceed?]",
- unsafe_allow_html=True,
- )
- accept_cascade_delete = st.toggle("I accept deletion of this Test Suite and all related TestGen data.")
+@st.dialog(title="Add Test Suite")
+def add_test_suite_dialog(project_code, connection, table_group):
+ show_test_suite("add", project_code, connection, table_group)
- with st.form("Delete Test Suite", clear_on_submit=True):
- disable_delete_button = authentication_service.current_user_has_read_role() or (
- not can_be_deleted and not accept_cascade_delete
- )
- delete = st.form_submit_button("Delete", disabled=disable_delete_button, type="primary")
- if delete:
- if test_suite_service.are_test_suites_in_use([test_suite_id]):
- st.error("This Test Suite is in use by a running process and cannot be deleted.")
- else:
- test_suite_service.cascade_delete([test_suite_id])
- success_message = f"Test Suite {selected_test_suite['test_suite']} has been deleted. "
- st.success(success_message)
- time.sleep(1)
- st.rerun()
+@st.dialog(title="Edit Test Suite")
+def edit_test_suite_dialog(project_code, connection, table_group, selected):
+ show_test_suite("edit", project_code, connection, table_group, selected)
def show_test_suite(mode, project_code, connection, table_group, selected=None):
connection_id = connection["connection_id"]
table_group_id = table_group["id"]
severity_options = ["Inherit", "Failed", "Warning"]
-
- selected_test_suite = selected[0] if mode == "edit" else None
+ selected_test_suite = selected if mode == "edit" else None
if mode == "edit" and not selected_test_suite["severity"]:
selected_test_suite["severity"] = severity_options[0]
@@ -307,7 +190,7 @@ def show_test_suite(mode, project_code, connection, table_group, selected=None):
with expander:
expander_left_column, expander_right_column = st.columns([0.50, 0.50])
- with st.form("Test Suite Add / Edit", clear_on_submit=True):
+ with st.form("Test Suite Add / Edit", clear_on_submit=True, border=False):
entity = {
"id": test_suite_id,
"project_code": project_code,
@@ -351,10 +234,13 @@ def show_test_suite(mode, project_code, connection, table_group, selected=None):
),
}
- submit_button_text = "Save" if mode == "edit" else "Add"
- submit = st.form_submit_button(
- submit_button_text, disabled=authentication_service.current_user_has_read_role()
- )
+ _, button_column = st.columns([.85, .15])
+ with button_column:
+ submit = st.form_submit_button(
+ "Save" if mode == "edit" else "Add",
+ use_container_width=True,
+ disabled=authentication_service.current_user_has_read_role(),
+ )
if submit:
if " " in entity["test_suite"]:
@@ -377,33 +263,82 @@ def show_test_suite(mode, project_code, connection, table_group, selected=None):
st.rerun()
-@st.dialog(title="Add Test Suite")
-def add_test_suite_dialog(project_code, connection, table_group):
- show_test_suite("add", project_code, connection, table_group)
+@st.dialog(title="Delete Test Suite")
+def delete_test_suite_dialog(selected_test_suite):
+ test_suite_id = selected_test_suite["id"]
+ test_suite_name = selected_test_suite["test_suite"]
+ can_be_deleted = test_suite_service.cascade_delete([test_suite_id], dry_run=True)
+ fm.render_html_list(
+ selected_test_suite,
+ [
+ "id",
+ "test_suite",
+ "test_suite_description",
+ ],
+ "Test Suite Information",
+ int_data_width=700,
+ )
-@st.dialog(title="Edit Test Suite")
-def edit_test_suite_dialog(project_code, connection, table_group, selected):
- show_test_suite("edit", project_code, connection, table_group, selected)
+ if not can_be_deleted:
+ st.markdown(
+ ":orange[This Test Suite has related data, which includes test definitions and may include test results. If you proceed, all related data will be permanently deleted. Are you sure you want to proceed?]",
+ unsafe_allow_html=True,
+ )
+ accept_cascade_delete = st.toggle("I accept deletion of this Test Suite and all related TestGen data.")
+
+ with st.form("Delete Test Suite", clear_on_submit=True, border=False):
+ disable_delete_button = authentication_service.current_user_has_read_role() or (
+ not can_be_deleted and not accept_cascade_delete
+ )
+
+ delete = False
+ _, button_column = st.columns([.85, .15])
+ with button_column:
+ delete = st.form_submit_button(
+ "Delete",
+ type="primary",
+ disabled=disable_delete_button,
+ use_container_width=True,
+ )
+
+ if delete:
+ if test_suite_service.are_test_suites_in_use([test_suite_id]):
+ st.error("This Test Suite is in use by a running process and cannot be deleted.")
+ else:
+ test_suite_service.cascade_delete([test_suite_id])
+ success_message = f"Test Suite {test_suite_name} has been deleted. "
+ st.success(success_message)
+ time.sleep(1)
+ st.rerun()
@st.dialog(title="Run Tests")
def run_tests_dialog(project_code, selected_test_suite):
- container = st.empty()
- with container:
- st.markdown(":green[**Run Tests for the Test Suite**]")
+ test_suite_key = selected_test_suite["test_suite"]
+ start_process_button_message = "Start"
+
+ with st.container():
+ st.markdown(f"Run tests for the test suite :green[{test_suite_key}]?")
+
+ if testgen.expander_toggle(expand_label="Show CLI command", key="test_suite:keys:run-tests-show-cli"):
+ st.code(
+ f"testgen run-tests --project-key {project_code} --test-suite-key {selected_test_suite['test_suite']}",
+ language="shellSession"
+ )
button_container = st.empty()
status_container = st.empty()
+ run_test_button = None
with button_container:
- start_process_button_message = "Start"
- run_test_button = st.button(start_process_button_message)
+ _, button_column = st.columns([.85, .15])
+ with button_column:
+ run_test_button = st.button(start_process_button_message, use_container_width=True)
if run_test_button:
button_container.empty()
- test_suite_key = selected_test_suite["test_suite"]
status_container.info(f"Running tests for test suite {test_suite_key}")
try:
@@ -418,41 +353,106 @@ def run_tests_dialog(project_code, selected_test_suite):
)
-@st.dialog(title="Run Tests CLI Command")
-def run_tests_cli_dialog(project_code, selected_test_suite):
- test_suite_name = selected_test_suite["test_suite"]
- command = f"testgen run-tests --project-key {project_code} --test-suite-key {test_suite_name}"
- st.code(command, language="shellSession")
-
-
-@st.dialog(title="Generate Tests CLI Command")
-def generate_tests_cli_dialog(selected_test_suite):
+@st.dialog(title="Generate Tests")
+def generate_tests_dialog(selected_test_suite):
+ test_suite_id = selected_test_suite["id"]
test_suite_key = selected_test_suite["test_suite"]
table_group_id = selected_test_suite["table_groups_id"]
- command = f"testgen run-test-generation --table-group-id {table_group_id} --test-suite-key {test_suite_key}"
- st.code(command, language="shellSession")
+ start_process_button_message = "Start"
+ with st.container():
+ st.markdown(f"Execute the test generation for test suite :green[{test_suite_key}]?")
-@st.dialog(title="Observability Export CLI Command")
-def observability_export_cli_dialog(selected_test_suite):
- test_suite_key = selected_test_suite["test_suite"]
- project_key = selected_test_suite["project_code"]
- command = f"testgen export-observability --project-key {project_key} --test-suite-key {test_suite_key}"
- st.code(command, language="shellSession")
+ warning_container = st.container()
+ options_container = st.container()
+
+ if testgen.expander_toggle(expand_label="Show CLI command", key="test_suite:keys:generate-tests-show-cli"):
+ st.code(
+ f"testgen run-test-generation --table-group-id {table_group_id} --test-suite-key {test_suite_key}",
+ language="shellSession",
+ )
+
+ button_container = st.empty()
+ status_container = st.empty()
+
+ test_ct, unlocked_test_ct, unlocked_edits_ct = test_suite_service.get_test_suite_refresh_warning(test_suite_id)
+ if test_ct:
+ warning_msg = ""
+ counts_msg = f"\n\nAuto-Generated Tests: {test_ct}, Unlocked: {unlocked_test_ct}, Edited Unlocked: {unlocked_edits_ct}"
+ if unlocked_edits_ct > 0:
+ if unlocked_edits_ct > 1:
+
+ warning_msg = "Manual changes have been made to auto-generated tests in this Test Suite that have not been locked. "
+ else:
+ warning_msg = "A manual change has been made to an auto-generated test in this Test Suite that has not been locked. "
+ elif unlocked_test_ct > 0:
+ warning_msg = "Auto-generated tests are present in this Test Suite that have not been locked. "
+ warning_msg = f"{warning_msg}Generating tests now will overwrite unlocked tests subject to auto-generation based on the latest profiling.{counts_msg}"
+ with warning_container:
+ st.warning(warning_msg)
+ if unlocked_edits_ct > 0:
+ lock_edits_button = st.button("Lock Edited Tests")
+ if lock_edits_button:
+ edits_locked = test_suite_service.lock_edited_tests(test_suite_id)
+ if edits_locked:
+ st.info("Edited tests have been successfully locked.")
+
+ with options_container:
+ lst_generation_sets = test_suite_service.get_generation_set_choices()
+ if lst_generation_sets:
+ lst_generation_sets.insert(0, "(All Test Types)")
+ str_generation_set = st.selectbox("Generation Set", lst_generation_sets)
+ if str_generation_set == "(All Test Types)":
+ str_generation_set = ""
+ else:
+ str_generation_set = ""
+
+ test_generation_button = None
+ with button_container:
+ _, button_column = st.columns([.85, .15])
+ with button_column:
+ test_generation_button = st.button(start_process_button_message, use_container_width=True)
+
+ if test_generation_button:
+ button_container.empty()
+
+ table_group_id = selected_test_suite["table_groups_id"]
+ test_suite_key = selected_test_suite["test_suite"]
+ status_container.info("Executing Test Generation...")
+
+ try:
+ run_test_gen_queries(table_group_id, test_suite_key, str_generation_set)
+ except Exception as e:
+ status_container.empty()
+ status_container.error(f"Process had errors: {e!s}.")
+
+ status_container.empty()
+ status_container.success("Process has successfully finished.")
@st.dialog(title="Export to Observability")
def observability_export_dialog(selected_test_suite):
- container = st.empty()
- with container:
- st.markdown(":green[**Execute the test export for the current Test Suite**]")
+ project_key = selected_test_suite["project_code"]
+ test_suite_key = selected_test_suite["test_suite"]
+ start_process_button_message = "Start"
+
+ with st.container():
+ st.markdown(f"Execute the test export for test suite :green[{test_suite_key}]?")
+
+ if testgen.expander_toggle(expand_label="Show CLI command", key="test_suite:keys:export-tests-show-cli"):
+ st.code(
+ f"testgen export-observability --project-key {project_key} --test-suite-key {test_suite_key}",
+ language="shellSession"
+ )
button_container = st.empty()
status_container = st.empty()
+ test_generation_button = None
with button_container:
- start_process_button_message = "Start"
- test_generation_button = st.button(start_process_button_message)
+ _, button_column = st.columns([.85, .15])
+ with button_column:
+ test_generation_button = st.button(start_process_button_message, use_container_width=True)
if test_generation_button:
button_container.empty()
From e1f899eecb31345b46fd100a055b5e95377507ce Mon Sep 17 00:00:00 2001
From: Luis Trinidad
Date: Thu, 29 Aug 2024 21:59:51 -0400
Subject: [PATCH 32/78] fix(ui): count of test definitions for each test suite
card
---
testgen/ui/queries/test_suite_queries.py | 17 ++++++++++-------
testgen/ui/views/test_suites.py | 11 ++++++-----
2 files changed, 16 insertions(+), 12 deletions(-)
diff --git a/testgen/ui/queries/test_suite_queries.py b/testgen/ui/queries/test_suite_queries.py
index a406635..7293ecb 100644
--- a/testgen/ui/queries/test_suite_queries.py
+++ b/testgen/ui/queries/test_suite_queries.py
@@ -21,18 +21,21 @@ def get_by_table_group(schema, project_code, table_group_id):
suites.component_key,
suites.component_type,
suites.component_name,
- COALESCE(last_run.test_ct, 0) as test_ct,
- last_run.test_starttime as latest_run_start,
- last_run.passed_ct as last_run_passed_ct,
- last_run.warning_ct as last_run_warning_ct,
- last_run.failed_ct as last_run_failed_ct,
- last_run.error_ct as last_run_error_ct
+ COUNT(definitions.id) as test_ct,
+ MAX(last_run.test_starttime) as latest_run_start,
+ MAX(last_run.passed_ct) as last_run_passed_ct,
+ MAX(last_run.warning_ct) as last_run_warning_ct,
+ MAX(last_run.failed_ct) as last_run_failed_ct,
+ MAX(last_run.error_ct) as last_run_error_ct
FROM {schema}.test_suites as suites
LEFT OUTER JOIN (
SELECT * FROM {schema}.test_runs ORDER BY test_starttime DESC LIMIT 1
- ) AS last_run ON (last_run.project_code = suites.project_code AND last_run.test_suite = suites.test_suite)
+ ) AS last_run ON (last_run.test_suite_id = suites.id)
+ LEFT OUTER JOIN {schema}.test_definitions AS definitions
+ ON (definitions.test_suite_id = suites.id)
WHERE suites.project_code = '{project_code}'
AND suites.table_groups_id = '{table_group_id}'
+ GROUP BY suites.id
ORDER BY suites.test_suite;
"""
return db.retrieve_data(sql)
diff --git a/testgen/ui/views/test_suites.py b/testgen/ui/views/test_suites.py
index abc26c7..5e2f9b7 100644
--- a/testgen/ui/views/test_suites.py
+++ b/testgen/ui/views/test_suites.py
@@ -1,5 +1,6 @@
import time
import typing
+from functools import partial
import pandas as pd
import streamlit as st
@@ -76,7 +77,7 @@ def render(self, connection_id: str | None = None, table_group_id: str | None =
icon="output",
tooltip="Export results to observability",
tooltip_position="right",
- on_click=lambda: observability_export_dialog(test_suite),
+ on_click=partial(observability_export_dialog, test_suite),
key=f"test_suite:keys:export:{test_suite['id']}",
)
testgen.button(
@@ -84,7 +85,7 @@ def render(self, connection_id: str | None = None, table_group_id: str | None =
icon="edit",
tooltip="Edit test suite",
tooltip_position="right",
- on_click=lambda: edit_test_suite_dialog(project_code, connection, table_group, test_suite),
+ on_click=partial(edit_test_suite_dialog, project_code, connection, table_group, test_suite),
key=f"test_suite:keys:edit:{test_suite['id']}",
)
testgen.button(
@@ -92,7 +93,7 @@ def render(self, connection_id: str | None = None, table_group_id: str | None =
icon="delete",
tooltip="Delete test suite",
tooltip_position="right",
- on_click=lambda: delete_test_suite_dialog(test_suite),
+ on_click=partial(delete_test_suite_dialog, test_suite),
key=f"test_suite:keys:delete:{test_suite['id']}",
)
@@ -141,13 +142,13 @@ def render(self, connection_id: str | None = None, table_group_id: str | None =
testgen.button(
type="stroked",
label="Run Tests",
- on_click=lambda: run_tests_dialog(project_code, test_suite),
+ on_click=partial(run_tests_dialog, project_code, test_suite),
key=f"test_suite:keys:runtests:{test_suite['id']}",
)
testgen.button(
type="stroked",
label="Generate Tests",
- on_click=lambda: generate_tests_dialog(test_suite),
+ on_click=partial(generate_tests_dialog, test_suite),
key=f"test_suite:keys:generatetests:{test_suite['id']}",
)
From ce86ab302d3c5af41d6f267f44c0e58785116e29 Mon Sep 17 00:00:00 2001
From: Luis Trinidad
Date: Fri, 30 Aug 2024 10:38:59 -0400
Subject: [PATCH 33/78] misc: address linting issues
---
testgen/ui/components/widgets/__init__.py | 6 +++---
testgen/ui/components/widgets/button.py | 8 ++++----
testgen/ui/components/widgets/card.py | 2 +-
testgen/ui/components/widgets/link.py | 4 ++--
testgen/ui/views/test_suites.py | 14 +++++++-------
5 files changed, 17 insertions(+), 17 deletions(-)
diff --git a/testgen/ui/components/widgets/__init__.py b/testgen/ui/components/widgets/__init__.py
index 4c40b33..ee16b62 100644
--- a/testgen/ui/components/widgets/__init__.py
+++ b/testgen/ui/components/widgets/__init__.py
@@ -1,9 +1,9 @@
# ruff: noqa: F401
from testgen.ui.components.widgets.breadcrumbs import breadcrumbs
+from testgen.ui.components.widgets.button import button
+from testgen.ui.components.widgets.card import card
from testgen.ui.components.widgets.expander_toggle import expander_toggle
+from testgen.ui.components.widgets.link import link
from testgen.ui.components.widgets.sidebar import sidebar
from testgen.ui.components.widgets.summary_bar import summary_bar
-from testgen.ui.components.widgets.card import card
-from testgen.ui.components.widgets.link import link
-from testgen.ui.components.widgets.button import button
diff --git a/testgen/ui/components/widgets/button.py b/testgen/ui/components/widgets/button.py
index da28825..c248981 100644
--- a/testgen/ui/components/widgets/button.py
+++ b/testgen/ui/components/widgets/button.py
@@ -7,7 +7,7 @@
def button(
- type: ButtonType = "basic",
+ type_: ButtonType = "basic",
label: str | None = None,
icon: str | None = None,
tooltip: str | None = None,
@@ -24,10 +24,10 @@ def button(
:param on_click: click handler for this button
"""
- props = {"type": type}
- if type != "icon":
+ props = {"type": type_}
+ if type_ != "icon":
if not label:
- raise ValueError(f"A label is required for {type} buttons")
+ raise ValueError(f"A label is required for {type_} buttons")
props.update({"label": label})
if icon:
diff --git a/testgen/ui/components/widgets/card.py b/testgen/ui/components/widgets/card.py
index d43fc3c..afcd9ed 100644
--- a/testgen/ui/components/widgets/card.py
+++ b/testgen/ui/components/widgets/card.py
@@ -30,7 +30,7 @@ def card(
header_html += f'
{title}
'
if subtitle:
header_html += f'{subtitle}'
- header_html += ''
+ header_html += ""
st.html(header_html)
actions_column.html(f'')
diff --git a/testgen/ui/components/widgets/link.py b/testgen/ui/components/widgets/link.py
index 8f2dc95..0685c3f 100644
--- a/testgen/ui/components/widgets/link.py
+++ b/testgen/ui/components/widgets/link.py
@@ -1,7 +1,7 @@
import random
-from testgen.ui.navigation.router import Router
from testgen.ui.components.utils.component import component
+from testgen.ui.navigation.router import Router
def link(
@@ -18,7 +18,7 @@ def link(
key: str | None = None,
) -> None:
if not key:
- key = f"testgen:link:{round(random.random() * 10_000)}"
+ key = f"testgen:link:{round(random.random() * 10_000)}" # noqa: S311
props = {
"href": href,
diff --git a/testgen/ui/views/test_suites.py b/testgen/ui/views/test_suites.py
index 5e2f9b7..8663b7c 100644
--- a/testgen/ui/views/test_suites.py
+++ b/testgen/ui/views/test_suites.py
@@ -11,11 +11,11 @@
from testgen.commands.run_execute_tests import run_execution_steps_in_background
from testgen.commands.run_generate_tests import run_test_gen_queries
from testgen.commands.run_observability_exporter import export_test_results
+from testgen.ui.components import widgets as testgen
from testgen.ui.navigation.page import Page
from testgen.ui.services import connection_service, table_group_service
from testgen.ui.services.string_service import empty_if_null
from testgen.ui.session import session
-from testgen.ui.components import widgets as testgen
class TestSuitesPage(Page):
@@ -73,7 +73,7 @@ def render(self, connection_id: str | None = None, table_group_id: str | None =
with testgen.card(title=test_suite["test_suite"], subtitle=subtitle) as test_suite_card:
with test_suite_card.actions:
testgen.button(
- type="icon",
+ type_="icon",
icon="output",
tooltip="Export results to observability",
tooltip_position="right",
@@ -81,7 +81,7 @@ def render(self, connection_id: str | None = None, table_group_id: str | None =
key=f"test_suite:keys:export:{test_suite['id']}",
)
testgen.button(
- type="icon",
+ type_="icon",
icon="edit",
tooltip="Edit test suite",
tooltip_position="right",
@@ -89,7 +89,7 @@ def render(self, connection_id: str | None = None, table_group_id: str | None =
key=f"test_suite:keys:edit:{test_suite['id']}",
)
testgen.button(
- type="icon",
+ type_="icon",
icon="delete",
tooltip="Delete test suite",
tooltip_position="right",
@@ -114,7 +114,7 @@ def render(self, connection_id: str | None = None, table_group_id: str | None =
""")
- if (latest_run_start := test_suite['latest_run_start']) and not pd.isnull(latest_run_start):
+ if (latest_run_start := test_suite["latest_run_start"]) and not pd.isnull(latest_run_start):
with latest_run_section:
st.html('')
st.html('
Latest Run
')
@@ -140,13 +140,13 @@ def render(self, connection_id: str | None = None, table_group_id: str | None =
with actions_section:
testgen.button(
- type="stroked",
+ type_="stroked",
label="Run Tests",
on_click=partial(run_tests_dialog, project_code, test_suite),
key=f"test_suite:keys:runtests:{test_suite['id']}",
)
testgen.button(
- type="stroked",
+ type_="stroked",
label="Generate Tests",
on_click=partial(generate_tests_dialog, test_suite),
key=f"test_suite:keys:generatetests:{test_suite['id']}",
From a0700c1332430abeb725331a9bf4f7acd4ee3470 Mon Sep 17 00:00:00 2001
From: Ricardo Boni
Date: Fri, 30 Aug 2024 11:36:56 -0400
Subject: [PATCH 34/78] refactor(test_suites): Addressing code review feedback
---
.../030_initialize_new_schema_structure.sql | 21 ++-
.../dbsetup/060_create_standard_views.sql | 27 ++++
.../dbupgrade/0108_incremental_upgrade.sql | 70 +--------
.../dbupgrade/0109_incremental_upgrade.sql | 148 +-----------------
.../dbupgrade/0111_incremental_upgrade.sql | 13 ++
5 files changed, 61 insertions(+), 218 deletions(-)
create mode 100644 testgen/template/dbupgrade/0111_incremental_upgrade.sql
diff --git a/testgen/template/dbsetup/030_initialize_new_schema_structure.sql b/testgen/template/dbsetup/030_initialize_new_schema_structure.sql
index db24128..cf631bf 100644
--- a/testgen/template/dbsetup/030_initialize_new_schema_structure.sql
+++ b/testgen/template/dbsetup/030_initialize_new_schema_structure.sql
@@ -511,9 +511,7 @@ CREATE TABLE working_agg_cat_tests (
test_measures TEXT,
test_conditions TEXT,
CONSTRAINT working_agg_cat_tests_trid_sn_tn_cs
- PRIMARY KEY (test_run_id, schema_name, table_name, cat_sequence),
- CONSTRAINT working_agg_cat_tests_test_runs_fk
- FOREIGN KEY (test_run_id) REFERENCES test_runs
+ PRIMARY KEY (test_run_id, schema_name, table_name, cat_sequence)
);
CREATE TABLE working_agg_cat_results (
@@ -524,9 +522,7 @@ CREATE TABLE working_agg_cat_results (
measure_results TEXT,
test_results TEXT,
CONSTRAINT working_agg_cat_results_tri_sn_tn_cs
- PRIMARY KEY (test_run_id, schema_name, table_name, cat_sequence),
- CONSTRAINT working_agg_cat_results_test_runs_fk
- FOREIGN KEY (test_run_id) REFERENCES test_runs
+ PRIMARY KEY (test_run_id, schema_name, table_name, cat_sequence)
);
CREATE TABLE cat_test_conditions (
@@ -592,6 +588,10 @@ CREATE TABLE tg_revision (
revision INTEGER
);
+-- Index working table - ORIGINAL
+CREATE INDEX working_agg_cat_tests_test_run_id_index
+ ON working_agg_cat_tests(test_run_id);
+
-- Index Connections
CREATE UNIQUE INDEX uix_con_id
ON connections(id);
@@ -617,6 +617,9 @@ CREATE INDEX ix_ts_con
ON test_suites(connection_id);
-- Index test_definitions
+CREATE INDEX ix_td_ts_fk
+ ON test_definitions(test_suite_id);
+
CREATE INDEX ix_td_pc_stc_tst
ON test_definitions(test_suite_id, schema_name, table_name, column_name, test_type);
@@ -630,6 +633,9 @@ CREATE INDEX ix_td_ts_tc
ON test_definitions(test_suite_id, table_name, column_name, test_type);
-- Index test_runs
+CREATE INDEX ix_trun_ts_fk
+ ON test_runs(test_suite_id);
+
CREATE INDEX ix_trun_pc_ts_time
ON test_runs(test_suite_id, test_starttime);
@@ -640,6 +646,9 @@ CREATE INDEX ix_trun_time
CREATE UNIQUE INDEX uix_tr_id
ON test_results(id);
+CREATE INDEX ix_tr_pc_ts
+ ON test_results(test_suite_id);
+
CREATE INDEX ix_tr_trun
ON test_results(test_run_id);
diff --git a/testgen/template/dbsetup/060_create_standard_views.sql b/testgen/template/dbsetup/060_create_standard_views.sql
index f0b451b..2984bcf 100644
--- a/testgen/template/dbsetup/060_create_standard_views.sql
+++ b/testgen/template/dbsetup/060_create_standard_views.sql
@@ -80,6 +80,33 @@ GROUP BY r.id, r.project_code, cc.connection_name, r.connection_id,
r.profiling_starttime, r.profiling_endtime, r.status;
+DROP VIEW IF EXISTS v_test_runs;
+
+CREATE VIEW v_test_runs
+ AS
+SELECT r.id as test_run_id,
+ p.project_code,
+ p.project_name,
+ ts.test_suite,
+ r.test_starttime,
+ TO_CHAR(r.test_endtime - r.test_starttime, 'HH24:MI:SS') as duration,
+ r.status, r.log_message,
+ COUNT(*) as test_ct,
+ SUM(result_code) as passed_ct,
+ COALESCE(SUM(CASE WHEN tr.result_status = 'Failed' THEN 1 END), 0) as failed_ct,
+ COALESCE(SUM(CASE WHEN tr.result_status = 'Warning' THEN 1 END), 0) as warning_ct,
+ r.process_id
+ FROM test_runs r
+INNER JOIN test_suites ts
+ ON (r.test_suite_id = ts.id)
+INNER JOIN projects p
+ ON (ts.project_code = p.project_code)
+INNER JOIN test_results tr
+ ON (r.id = tr.test_run_id)
+GROUP BY r.id, p.project_code, ts.test_suite, r.test_starttime, r.test_endtime,
+ r.process_id, r.status, r.log_message, p.project_name;
+
+
DROP VIEW IF EXISTS v_test_results;
CREATE VIEW v_test_results
diff --git a/testgen/template/dbupgrade/0108_incremental_upgrade.sql b/testgen/template/dbupgrade/0108_incremental_upgrade.sql
index 39f0ed3..df8b7cb 100644
--- a/testgen/template/dbupgrade/0108_incremental_upgrade.sql
+++ b/testgen/template/dbupgrade/0108_incremental_upgrade.sql
@@ -3,7 +3,7 @@ SET SEARCH_PATH TO {SCHEMA_NAME};
-- Step 1: Drop everything that depends on the current state
DROP TABLE execution_queue;
-DROP VIEW v_test_results;
+DROP VIEW IF EXISTS v_test_results;
ALTER TABLE test_definitions DROP CONSTRAINT test_definitions_test_suites_project_code_test_suite_fk;
ALTER TABLE test_results DROP CONSTRAINT test_results_test_suites_project_code_test_suite_fk;
ALTER TABLE test_suites DROP CONSTRAINT test_suites_project_code_test_suite_pk;
@@ -37,71 +37,3 @@ ALTER TABLE test_definitions DROP COLUMN project_code;
CREATE INDEX ix_td_pc_stc_tst
ON test_definitions(test_suite_id, schema_name, table_name, column_name, test_type);
-
-CREATE VIEW v_test_results AS
- SELECT p.project_name,
- ts.test_suite,
- tg.table_groups_name,
- cn.connection_name, cn.project_host, cn.sql_flavor,
- tt.dq_dimension,
- r.schema_name, r.table_name, r.column_names,
- r.test_time as test_date,
- r.test_type, tt.id as test_type_id, tt.test_name_short, tt.test_name_long,
- r.test_description,
- tt.measure_uom, tt.measure_uom_description,
- c.test_operator,
- r.threshold_value::NUMERIC(16, 5) as threshold_value,
- r.result_measure::NUMERIC(16, 5),
- r.result_status,
- r.input_parameters,
- r.result_message,
- CASE WHEN result_code <> 1 THEN r.severity END as severity,
- CASE
- WHEN result_code <> 1 THEN r.disposition
- ELSE 'Passed'
- END AS disposition,
- r.result_code as passed_ct,
- (1 - r.result_code)::INTEGER as exception_ct,
- CASE
- WHEN result_status = 'Warning'
- AND result_message NOT ILIKE 'ERROR - TEST COLUMN MISSING%' THEN 1
- END::INTEGER as warning_ct,
- CASE
- WHEN result_status = 'Failed'
- AND result_message NOT ILIKE 'ERROR - TEST COLUMN MISSING%' THEN 1
- END::INTEGER as failed_ct,
- CASE
- WHEN result_message ILIKE 'ERROR - TEST COLUMN MISSING%' THEN 1
- END AS execution_error_ct,
- r.project_code,
- r.table_groups_id,
- r.id as test_result_id, c.id as connection_id,
- r.test_suite_id,
- r.test_definition_id as test_definition_id_runtime,
- CASE
- WHEN r.auto_gen = TRUE THEN d.id
- ELSE r.test_definition_id
- END as test_definition_id_current,
- r.test_run_id as test_run_id,
- r.auto_gen
- FROM test_results r
- INNER JOIN test_types tt
- ON r.test_type = tt.test_type
- LEFT JOIN test_definitions d
- ON r.test_suite_id = d.test_suite_id
- AND r.table_name = d.table_name
- AND r.column_names = COALESCE(d.column_name, 'N/A')
- AND r.test_type = d.test_type
- AND r.auto_gen = TRUE
- AND d.last_auto_gen_date IS NOT NULL
- INNER JOIN test_suites ts
- ON r.test_suite_id = ts.id
- INNER JOIN projects p
- ON r.project_code = p.project_code
- INNER JOIN table_groups tg
- ON r.table_groups_id = tg.id
- INNER JOIN connections cn
- ON tg.connection_id = cn.connection_id
- LEFT JOIN cat_test_conditions c
- ON cn.sql_flavor = c.sql_flavor
- AND r.test_type = c.test_type;
diff --git a/testgen/template/dbupgrade/0109_incremental_upgrade.sql b/testgen/template/dbupgrade/0109_incremental_upgrade.sql
index 1a74941..028dcc4 100644
--- a/testgen/template/dbupgrade/0109_incremental_upgrade.sql
+++ b/testgen/template/dbupgrade/0109_incremental_upgrade.sql
@@ -2,14 +2,14 @@ SET SEARCH_PATH TO {SCHEMA_NAME};
-- Step 1: Drop everything that depends on the current state
-DROP VIEW v_test_runs; -- Not needed, unused
-DROP VIEW v_test_results;
-DROP VIEW v_queued_observability_results;
+DROP VIEW IF EXISTS v_test_runs;
+DROP VIEW IF EXISTS v_test_results;
+DROP VIEW IF EXISTS v_queued_observability_results;
DROP INDEX cix_tr_pc_ts;
-DROP INDEX ix_tr_pc_ts; -- Not needed, replaced by a FK
+DROP INDEX ix_tr_pc_ts;
DROP INDEX ix_tr_pc_sctc_tt;
DROP INDEX ix_trun_pc_ts_time;
-DROP INDEX working_agg_cat_tests_test_run_id_index; -- Not needed, given the column is a FK
+DROP INDEX working_agg_cat_tests_test_run_id_index;
-- Step 2: Adjust the tables
@@ -74,141 +74,3 @@ CREATE INDEX cix_tr_pc_ts
CREATE INDEX ix_trun_pc_ts_time
ON test_runs(test_suite_id, test_starttime);
-
-CREATE VIEW v_test_results
-AS
-SELECT p.project_name,
- ts.test_suite,
- tg.table_groups_name,
- cn.connection_name, cn.project_host, cn.sql_flavor,
- tt.dq_dimension,
- r.schema_name, r.table_name, r.column_names,
- r.test_time as test_date,
- r.test_type, tt.id as test_type_id, tt.test_name_short, tt.test_name_long,
- r.test_description,
- tt.measure_uom, tt.measure_uom_description,
- c.test_operator,
- r.threshold_value::NUMERIC(16, 5) as threshold_value,
- r.result_measure::NUMERIC(16, 5),
- r.result_status,
- r.input_parameters,
- r.result_message,
- CASE WHEN result_code <> 1 THEN r.severity END as severity,
- CASE
- WHEN result_code <> 1 THEN r.disposition
- ELSE 'Passed'
- END as disposition,
- r.result_code as passed_ct,
- (1 - r.result_code)::INTEGER as exception_ct,
- CASE
- WHEN result_status = 'Warning'
- AND result_message NOT ILIKE 'ERROR - TEST COLUMN MISSING%' THEN 1
- END::INTEGER as warning_ct,
- CASE
- WHEN result_status = 'Failed'
- AND result_message NOT ILIKE 'ERROR - TEST COLUMN MISSING%' THEN 1
- END::INTEGER as failed_ct,
- CASE
- WHEN result_message ILIKE 'ERROR - TEST COLUMN MISSING%' THEN 1
- END as execution_error_ct,
- p.project_code,
- r.table_groups_id,
- r.id as test_result_id, c.id as connection_id,
- r.test_suite_id,
- r.test_definition_id as test_definition_id_runtime,
- CASE
- WHEN r.auto_gen = TRUE THEN d.id
- ELSE r.test_definition_id
- END as test_definition_id_current,
- r.test_run_id as test_run_id,
- r.auto_gen
- FROM test_results r
-INNER JOIN test_types tt
- ON (r.test_type = tt.test_type)
-LEFT JOIN test_definitions d
- ON (r.test_suite_id = d.test_suite_id
- AND r.table_name = d.table_name
- AND r.column_names = COALESCE(d.column_name, 'N/A')
- AND r.test_type = d.test_type
- AND r.auto_gen = TRUE
- AND d.last_auto_gen_date IS NOT NULL)
-INNER JOIN test_suites ts
- ON (r.test_suite_id = ts.id)
-INNER JOIN projects p
- ON (ts.project_code = p.project_code)
-INNER JOIN table_groups tg
- ON (r.table_groups_id = tg.id)
-INNER JOIN connections cn
- ON (tg.connection_id = cn.connection_id)
-LEFT JOIN cat_test_conditions c
- ON (cn.sql_flavor = c.sql_flavor
- AND r.test_type = c.test_type);
-
-CREATE VIEW v_queued_observability_results
- AS
-SELECT
- p.project_name,
- cn.sql_flavor as component_tool,
- ts.test_suite_schema as schema,
- cn.connection_name,
- cn.project_db,
-
- CASE
- WHEN tg.profile_use_sampling = 'Y' THEN tg.profile_sample_min_count
- END as sample_min_count,
- tg.id as group_id,
- tg.profile_use_sampling = 'Y' as uses_sampling,
- ts.project_code,
- CASE
- WHEN tg.profile_use_sampling = 'Y' THEN tg.profile_sample_percent
- END as sample_percentage,
-
- tg.profiling_table_set,
- tg.profiling_include_mask,
- tg.profiling_exclude_mask,
-
- COALESCE(ts.component_type, 'dataset') as component_type,
- COALESCE(ts.component_key, tg.id::VARCHAR) as component_key,
- COALESCE(ts.component_name, tg.table_groups_name) as component_name,
-
- r.column_names,
- r.table_name,
- ts.test_suite,
- ts.id AS test_suite_id,
- r.input_parameters,
- r.test_definition_id,
- tt.test_name_short as type,
- CASE
- WHEN c.test_operator IN ('>', '>=') THEN d.threshold_value
- END as min_threshold,
- CASE
- WHEN c.test_operator IN ('<', '<=') THEN d.threshold_value
- END as max_threshold,
- tt.test_name_long as name,
- tt.test_description as description,
- r.test_time as start_time,
- r.test_time as end_time,
- r.result_message as result_message,
- tt.dq_dimension,
- r.result_status,
- r.result_id,
- r.result_measure as metric_value,
- tt.measure_uom,
- tt.measure_uom_description
- FROM test_results r
-INNER JOIN test_types tt
- ON (r.test_type = tt.test_type)
-INNER JOIN test_definitions d
- ON (r.test_definition_id = d.id)
-INNER JOIN test_suites ts
- ON r.test_suite_id = ts.id
-INNER JOIN table_groups tg
- ON (d.table_groups_id = tg.id)
-INNER JOIN connections cn
- ON (tg.connection_id = cn.connection_id)
-INNER JOIN projects p
- ON (ts.project_code = p.project_code)
-INNER JOIN cat_test_conditions c
- ON (cn.sql_flavor = c.sql_flavor
- AND d.test_type = c.test_type)
-WHERE r.observability_status = 'Queued';
diff --git a/testgen/template/dbupgrade/0111_incremental_upgrade.sql b/testgen/template/dbupgrade/0111_incremental_upgrade.sql
new file mode 100644
index 0000000..b53065a
--- /dev/null
+++ b/testgen/template/dbupgrade/0111_incremental_upgrade.sql
@@ -0,0 +1,13 @@
+SET SEARCH_PATH TO {SCHEMA_NAME};
+
+CREATE INDEX working_agg_cat_tests_test_run_id_index
+ ON working_agg_cat_tests(test_run_id);
+
+CREATE INDEX ix_td_ts_fk
+ ON test_definitions(test_suite_id);
+
+CREATE INDEX ix_trun_ts_fk
+ ON test_runs(test_suite_id);
+
+CREATE INDEX ix_tr_pc_ts
+ ON test_results(test_suite_id);
From 6f49f82c8c2329737850ab897b3b8f2684af43f7 Mon Sep 17 00:00:00 2001
From: Aarthy Adityan
Date: Thu, 5 Sep 2024 02:25:18 -0400
Subject: [PATCH 35/78] feat(ui): add query parameters to all pages
---
testgen/common/date_service.py | 4 +-
testgen/ui/app.py | 20 +-
testgen/ui/assets/style.css | 25 +-
testgen/ui/bootstrap.py | 3 +-
.../frontend/js/components/breadcrumbs.js | 7 +-
.../components/frontend/js/components/link.js | 7 +-
.../frontend/js/components/summary_bar.js | 14 +-
testgen/ui/components/widgets/__init__.py | 8 +
testgen/ui/components/widgets/breadcrumbs.py | 10 +-
testgen/ui/components/widgets/card.py | 2 +-
testgen/ui/components/widgets/link.py | 11 +-
testgen/ui/components/widgets/page.py | 98 +++++
testgen/ui/components/widgets/sidebar.py | 3 +-
testgen/ui/components/widgets/summary_bar.py | 3 +-
testgen/ui/navigation/page.py | 15 +-
testgen/ui/navigation/router.py | 8 +
testgen/ui/queries/profiling_queries.py | 4 +-
testgen/ui/queries/test_suite_queries.py | 44 ++-
testgen/ui/services/form_service.py | 41 +-
testgen/ui/services/project_service.py | 27 ++
testgen/ui/services/table_group_service.py | 2 +-
.../ui/services/test_definition_service.py | 4 +-
testgen/ui/services/test_suite_service.py | 9 +-
testgen/ui/services/toolbar_service.py | 77 ----
testgen/ui/views/connections.py | 41 +-
testgen/ui/views/login.py | 2 +-
testgen/ui/views/overview.py | 6 +-
testgen/ui/views/profiling_anomalies.py | 352 +++++++++---------
testgen/ui/views/profiling_results.py | 296 +++++++--------
testgen/ui/views/profiling_summary.py | 78 ++--
testgen/ui/views/project_settings.py | 23 +-
testgen/ui/views/table_groups.py | 55 ++-
testgen/ui/views/test_definitions.py | 328 +++++-----------
testgen/ui/views/test_results.py | 218 +++++------
testgen/ui/views/test_runs.py | 109 +++---
testgen/ui/views/test_suites.py | 101 ++---
36 files changed, 969 insertions(+), 1086 deletions(-)
create mode 100644 testgen/ui/components/widgets/page.py
create mode 100644 testgen/ui/services/project_service.py
delete mode 100644 testgen/ui/services/toolbar_service.py
diff --git a/testgen/common/date_service.py b/testgen/common/date_service.py
index 510bbdc..e0e37f2 100644
--- a/testgen/common/date_service.py
+++ b/testgen/common/date_service.py
@@ -52,13 +52,13 @@ def create_timezoned_column_in_dataframe(streamlit_session, df, new_column_name,
)
-def get_timezoned_timestamp(streamlit_session, value):
+def get_timezoned_timestamp(streamlit_session, value, dateformat="%b %-d, %-I:%M %p"):
ret = None
if value and "browser_timezone" in streamlit_session:
data = {"value": [value]}
df = pd.DataFrame(data)
timezone = streamlit_session["browser_timezone"]
- df["value"] = df["value"].dt.tz_localize("UTC").dt.tz_convert(timezone).dt.strftime("%Y-%m-%d %H:%M:%S")
+ df["value"] = df["value"].dt.tz_localize("UTC").dt.tz_convert(timezone).dt.strftime(dateformat)
ret = df.iloc[0, 0]
return ret
diff --git a/testgen/ui/app.py b/testgen/ui/app.py
index 437f483..4308d40 100644
--- a/testgen/ui/app.py
+++ b/testgen/ui/app.py
@@ -8,9 +8,8 @@
from testgen.common.docker_service import check_basic_configuration
from testgen.ui import bootstrap
from testgen.ui.components import widgets as testgen
-from testgen.ui.queries import project_queries
from testgen.ui.services import database_service as db
-from testgen.ui.services import javascript_service, user_session_service
+from testgen.ui.services import javascript_service, project_service, user_session_service
from testgen.ui.session import session
@@ -33,9 +32,9 @@ def render(log_level: int = logging.INFO):
session.dbschema = db.get_schema()
- projects = get_projects()
+ projects = project_service.get_projects()
if not session.project and len(projects) > 0:
- set_current_project(projects[0]["code"])
+ project_service.set_current_project(projects[0]["code"])
if session.authentication_status is None and not session.logging_out:
user_session_service.load_user_session()
@@ -68,19 +67,6 @@ def set_locale():
st.session_state["browser_timezone"] = timezone
-@st.cache_data(show_spinner=False)
-def get_projects():
- projects = project_queries.get_projects()
- projects = [
- {"code": project["project_code"], "name": project["project_name"]} for project in projects.to_dict("records")
- ]
-
- return projects
-
-def set_current_project(project_code: str) -> None:
- session.project = project_code
-
-
def get_image_path(path: str) -> str:
return str(Path(__file__).parent / path)
diff --git a/testgen/ui/assets/style.css b/testgen/ui/assets/style.css
index 61396ae..c3a39c5 100644
--- a/testgen/ui/assets/style.css
+++ b/testgen/ui/assets/style.css
@@ -59,6 +59,14 @@ section[data-testid="stSidebar"] {
section.main > :nth-child(1 of div).block-container {
padding: 24px;
}
+
+div[data-testid="stVerticalBlock"] {
+ gap: 0.5rem;
+}
+
+div[data-testid="collapsedControl"] {
+ top: 0.5rem;
+}
/* */
/* Dialog - sets the width of all st.dialog */
@@ -139,19 +147,26 @@ button[title="Show password text"] {
color: var(--caption-text-color);
font-style: italic;
}
+/* ... */
-[data-testid="column"]:has(> div[data-testid="stVerticalBlockBorderWrapper"] > div > div[data-testid="stVerticalBlock"] > div.element-container > div.stHtml > i.testgen_card-actions) [data-testid="stVerticalBlock"] {
+[data-testid="column"]:has(> div[data-testid="stVerticalBlockBorderWrapper"] > div > div[data-testid="stVerticalBlock"] > div.element-container > div.stHtml > i.flex-row) [data-testid="stVerticalBlock"] {
width: 100%;
flex-direction: row;
- justify-content: flex-end;
}
-[data-testid="column"]:has(> div[data-testid="stVerticalBlockBorderWrapper"] > div > div[data-testid="stVerticalBlock"] > div.element-container > div.stHtml > i.testgen_card-actions) [data-testid="stVerticalBlock"] > div[data-testid="element-container"],
-[data-testid="column"]:has(> div[data-testid="stVerticalBlockBorderWrapper"] > div > div[data-testid="stVerticalBlock"] > div.element-container > div.stHtml > i.testgen_card-actions) [data-testid="stVerticalBlock"] > div[data-testid="element-container"] > div[data-testid] {
+[data-testid="column"]:has(> div[data-testid="stVerticalBlockBorderWrapper"] > div > div[data-testid="stVerticalBlock"] > div.element-container > div.stHtml > i.flex-row) [data-testid="stVerticalBlock"] > div[data-testid="element-container"],
+[data-testid="column"]:has(> div[data-testid="stVerticalBlockBorderWrapper"] > div > div[data-testid="stVerticalBlock"] > div.element-container > div.stHtml > i.flex-row) [data-testid="stVerticalBlock"] > div[data-testid="element-container"] > div[data-testid] {
width: auto !important;
max-height: 40px;
}
-/* ... */
+
+[data-testid="column"]:has(> div[data-testid="stVerticalBlockBorderWrapper"] > div > div[data-testid="stVerticalBlock"] > div.element-container > div.stHtml > i.flex-start) [data-testid="stVerticalBlock"] {
+ justify-content: flex-start;
+}
+
+[data-testid="column"]:has(> div[data-testid="stVerticalBlockBorderWrapper"] > div > div[data-testid="stVerticalBlock"] > div.element-container > div.stHtml > i.flex-end) [data-testid="stVerticalBlock"] {
+ justify-content: flex-end;
+}
[data-testid="stVerticalBlock"]:has(> div.element-container > div.stHtml > i.no-flex-gap) {
gap: unset;
diff --git a/testgen/ui/bootstrap.py b/testgen/ui/bootstrap.py
index 1f9a8f2..03a95f5 100644
--- a/testgen/ui/bootstrap.py
+++ b/testgen/ui/bootstrap.py
@@ -20,7 +20,7 @@
from testgen.ui.views.profiling_summary import DataProfilingPage
from testgen.ui.views.project_settings import ProjectSettingsPage
from testgen.ui.views.table_groups import TableGroupsPage
-from testgen.ui.views.test_definitions import TestDefinitionsPage, TestDefinitionsPageFromSuite
+from testgen.ui.views.test_definitions import TestDefinitionsPage
from testgen.ui.views.test_results import TestResultsPage
from testgen.ui.views.test_runs import TestRunsPage
from testgen.ui.views.test_suites import TestSuitesPage
@@ -38,7 +38,6 @@
TableGroupsPage,
TestSuitesPage,
TestDefinitionsPage,
- TestDefinitionsPageFromSuite,
ProjectSettingsPage,
]
diff --git a/testgen/ui/components/frontend/js/components/breadcrumbs.js b/testgen/ui/components/frontend/js/components/breadcrumbs.js
index e8ba99e..d6976c8 100644
--- a/testgen/ui/components/frontend/js/components/breadcrumbs.js
+++ b/testgen/ui/components/frontend/js/components/breadcrumbs.js
@@ -2,6 +2,7 @@
* @typedef Breadcrumb
* @type {object}
* @property {string} path
+ * @property {object} params
* @property {string} label
*
* @typedef Properties
@@ -30,7 +31,7 @@ const Breadcrumbs = (/** @type Properties */ props) => {
{ class: 'tg-breadcrumbs' },
breadcrumbs.reduce((items, b, idx) => {
const isLastItem = idx === breadcrumbs.length - 1;
- items.push(a({ class: `tg-breadcrumbs--${ isLastItem ? 'current' : 'active'}`, href: `#/${b.path}`, onclick: () => navigate(b.path) }, b.label))
+ items.push(a({ class: `tg-breadcrumbs--${ isLastItem ? 'current' : 'active'}`, href: `#/${b.path}`, onclick: () => navigate(b.path, b.params) }, b.label))
if (!isLastItem) {
items.push(span({class: 'tg-breadcrumbs--arrow'}, '>'));
}
@@ -41,8 +42,8 @@ const Breadcrumbs = (/** @type Properties */ props) => {
)
};
-function navigate(/** @type string */ path) {
- Streamlit.sendData(path);
+function navigate(/** @type string */ path, /** @type object */ params) {
+ Streamlit.sendData({ path, params });
return false;
}
diff --git a/testgen/ui/components/frontend/js/components/link.js b/testgen/ui/components/frontend/js/components/link.js
index 09500ff..17463d4 100644
--- a/testgen/ui/components/frontend/js/components/link.js
+++ b/testgen/ui/components/frontend/js/components/link.js
@@ -2,6 +2,7 @@
* @typedef Properties
* @type {object}
* @property {string} href
+ * @property {object} params
* @property {string} label
* @property {boolean} underline
* @property {string?} left_icon
@@ -28,7 +29,7 @@ const Link = (/** @type Properties */ props) => {
{
class: `tg-link ${props.underline.val ? 'tg-link--underline' : ''}`,
style: props.style,
- onclick: () => navigate(props.href.val),
+ onclick: () => navigate(props.href.val, props.params.val),
},
div(
{class: 'tg-link--wrapper'},
@@ -50,8 +51,8 @@ const LinkIcon = (
);
};
-function navigate(href) {
- Streamlit.sendData({ href });
+function navigate(href, params) {
+ Streamlit.sendData({ href, params });
}
const stylesheet = new CSSStyleSheet();
diff --git a/testgen/ui/components/frontend/js/components/summary_bar.js b/testgen/ui/components/frontend/js/components/summary_bar.js
index 6049a2b..ec67e01 100644
--- a/testgen/ui/components/frontend/js/components/summary_bar.js
+++ b/testgen/ui/components/frontend/js/components/summary_bar.js
@@ -8,6 +8,7 @@
* @typedef Properties
* @type {object}
* @property {Array.} items
+ * @property {string} label
* @property {number} height
* @property {number} width
*/
@@ -30,9 +31,10 @@ const SummaryBar = (/** @type Properties */ props) => {
const height = props.height.val || 24;
const width = props.width.val;
const summaryItems = props.items.val;
+ const label = props.label.val;
const total = summaryItems.reduce((sum, item) => sum + item.value, 0);
- Streamlit.setFrameHeight(height + 24);
+ Streamlit.setFrameHeight(height + 24 + (label ? 24 : 0));
if (!window.testgen.loadedStylesheets.summaryBar) {
document.adoptedStyleSheets.push(stylesheet);
@@ -41,6 +43,12 @@ const SummaryBar = (/** @type Properties */ props) => {
return div(
{ class: 'tg-summary-bar-wrapper' },
+ () => {
+ return label ? div(
+ { class: 'tg-summary-bar--label' },
+ label,
+ ) : null;
+ },
div(
{
class: 'tg-summary-bar',
@@ -62,6 +70,10 @@ const SummaryBar = (/** @type Properties */ props) => {
const stylesheet = new CSSStyleSheet();
stylesheet.replace(`
+.tg-summary-bar--label {
+ margin-bottom: 4px;
+}
+
.tg-summary-bar {
height: 100%;
display: flex;
diff --git a/testgen/ui/components/widgets/__init__.py b/testgen/ui/components/widgets/__init__.py
index ee16b62..7c25862 100644
--- a/testgen/ui/components/widgets/__init__.py
+++ b/testgen/ui/components/widgets/__init__.py
@@ -5,5 +5,13 @@
from testgen.ui.components.widgets.card import card
from testgen.ui.components.widgets.expander_toggle import expander_toggle
from testgen.ui.components.widgets.link import link
+from testgen.ui.components.widgets.page import (
+ flex_row_end,
+ flex_row_start,
+ no_flex_gap,
+ page_header,
+ toolbar_select,
+ whitespace,
+)
from testgen.ui.components.widgets.sidebar import sidebar
from testgen.ui.components.widgets.summary_bar import summary_bar
diff --git a/testgen/ui/components/widgets/breadcrumbs.py b/testgen/ui/components/widgets/breadcrumbs.py
index f997e9a..bb258d1 100644
--- a/testgen/ui/components/widgets/breadcrumbs.py
+++ b/testgen/ui/components/widgets/breadcrumbs.py
@@ -1,11 +1,8 @@
-import logging
import typing
from testgen.ui.components.utils.component import component
from testgen.ui.navigation.router import Router
-LOG = logging.getLogger("testgen")
-
def breadcrumbs(
key: str = "testgen:breadcrumbs",
@@ -20,14 +17,15 @@ def breadcrumbs(
:param breadcrumbs: list of dicts with label and path
"""
- path = component(
+ data = component(
id_="breadcrumbs",
key=key,
props={"breadcrumbs": breadcrumbs},
)
- if path:
- Router().navigate(to=path)
+ if data:
+ Router().navigate(to=data["path"], with_args=data["params"])
class Breadcrumb(typing.TypedDict):
path: str | None
+ params: dict
label: str
diff --git a/testgen/ui/components/widgets/card.py b/testgen/ui/components/widgets/card.py
index afcd9ed..97677c8 100644
--- a/testgen/ui/components/widgets/card.py
+++ b/testgen/ui/components/widgets/card.py
@@ -33,7 +33,7 @@ def card(
header_html += ""
st.html(header_html)
- actions_column.html(f'')
+ actions_column.html(f'')
yield CardContext(actions=actions_column)
diff --git a/testgen/ui/components/widgets/link.py b/testgen/ui/components/widgets/link.py
index 0685c3f..14511a8 100644
--- a/testgen/ui/components/widgets/link.py
+++ b/testgen/ui/components/widgets/link.py
@@ -1,5 +1,3 @@
-import random
-
from testgen.ui.components.utils.component import component
from testgen.ui.navigation.router import Router
@@ -8,6 +6,7 @@ def link(
href: str,
label: str,
*,
+ params: dict = {}, # noqa: B006
underline: bool = True,
left_icon: str | None = None,
left_icon_size: float = 20.0,
@@ -15,13 +14,11 @@ def link(
right_icon_size: float = 20.0,
height: float | None = 21.0,
style: str | None = None,
- key: str | None = None,
+ key: str = "testgen:link",
) -> None:
- if not key:
- key = f"testgen:link:{round(random.random() * 10_000)}" # noqa: S311
-
props = {
"href": href,
+ "params": params,
"label": label,
"height": height,
"underline": underline,
@@ -37,4 +34,4 @@ def link(
clicked = component(id_="link", key=key, props=props)
if clicked:
- Router().navigate(to=href, with_args={})
+ Router().navigate(to=href, with_args=params)
diff --git a/testgen/ui/components/widgets/page.py b/testgen/ui/components/widgets/page.py
new file mode 100644
index 0000000..2715aff
--- /dev/null
+++ b/testgen/ui/components/widgets/page.py
@@ -0,0 +1,98 @@
+import pandas as pd
+import streamlit as st
+from streamlit.delta_generator import DeltaGenerator
+from streamlit_extras.no_default_selectbox import selectbox
+
+from testgen.ui.components.widgets.breadcrumbs import Breadcrumb
+from testgen.ui.components.widgets.breadcrumbs import breadcrumbs as tg_breadcrumbs
+from testgen.ui.navigation.router import Router
+
+
+def page_header(
+ title: str,
+ help_link:str | None = None,
+ breadcrumbs: list["Breadcrumb"] | None = None,
+):
+ hcol1, hcol2 = st.columns([0.95, 0.05])
+ hcol1.subheader(title, anchor=False)
+ if help_link:
+ with hcol2:
+ whitespace(0.8)
+ st.page_link(help_link, label=" ", icon=":material/help:")
+
+ if breadcrumbs:
+ tg_breadcrumbs(breadcrumbs=breadcrumbs)
+
+ st.write(
+ '',
+ unsafe_allow_html=True,
+ )
+ if "last_page" in st.session_state:
+ if title != st.session_state["last_page"]:
+ st.cache_data.clear()
+ st.session_state["last_page"] = title
+
+
+def toolbar_select(
+ options: pd.DataFrame | list[str],
+ value_column: str | None = None,
+ display_column: str | None = None,
+ default_value = None,
+ required: bool = False,
+ bind_to_query: str | None = None,
+ **kwargs,
+):
+ kwargs = {**kwargs}
+
+ if isinstance(options, pd.DataFrame):
+ value_column = value_column or options.columns[0]
+ display_column = display_column or value_column
+ kwargs["options"] = options[display_column]
+ if default_value in options[value_column].values:
+ kwargs["index"] = int(options[options[value_column] == default_value].index[0]) + (0 if required else 1)
+ else:
+ kwargs["options"] = options
+ if default_value in options:
+ kwargs["index"] = options.index(default_value)
+
+ if bind_to_query:
+ kwargs["key"] = kwargs.get("key", f"toolbar_select_{bind_to_query}")
+
+ def update_query_params():
+ query_value = st.session_state[kwargs["key"]]
+ if isinstance(options, pd.DataFrame):
+ query_value = options.loc[options[display_column] == query_value, value_column].iloc[0] if query_value != "---" else None
+ Router().set_query_params({ bind_to_query: query_value })
+
+ kwargs["on_change"] = update_query_params
+
+ selected = st.selectbox(**kwargs) if required else selectbox(**kwargs)
+
+ if selected and isinstance(options, pd.DataFrame):
+ return options.loc[options[display_column] == selected, value_column].iloc[0]
+
+ return selected
+
+
+def whitespace(size: float, container: DeltaGenerator | None = None):
+ _apply_html(f'', container)
+
+
+def flex_row_start(container: DeltaGenerator | None = None):
+ _apply_html('', container)
+
+
+def flex_row_end(container: DeltaGenerator | None = None):
+ _apply_html('', container)
+
+
+def no_flex_gap(container: DeltaGenerator | None = None):
+ _apply_html('', container)
+
+
+def _apply_html(html: str, container: DeltaGenerator | None = None):
+ if container:
+ container.html(html)
+ else:
+ st.html(html)
diff --git a/testgen/ui/components/widgets/sidebar.py b/testgen/ui/components/widgets/sidebar.py
index 2a9e880..5644c98 100644
--- a/testgen/ui/components/widgets/sidebar.py
+++ b/testgen/ui/components/widgets/sidebar.py
@@ -32,7 +32,8 @@ def sidebar(
if session.page_pending_sidebar is not None:
path = session.page_pending_sidebar
session.page_pending_sidebar = None
- Router().navigate(to=path)
+ params = { "project_code": session.project } if path != "" else {}
+ Router().navigate(to=path, with_args=params)
component(
id_="sidebar",
diff --git a/testgen/ui/components/widgets/summary_bar.py b/testgen/ui/components/widgets/summary_bar.py
index ccc80f3..fe1576a 100644
--- a/testgen/ui/components/widgets/summary_bar.py
+++ b/testgen/ui/components/widgets/summary_bar.py
@@ -8,6 +8,7 @@
def summary_bar(
items: list["SummaryItem"],
+ label: str | None = None,
height: int | None = None,
width: int | None = None,
key: str = "testgen:summary_bar",
@@ -26,7 +27,7 @@ def summary_bar(
id_="summary_bar",
key=key,
default={},
- props={"items": items, "height": height, "width": width},
+ props={"items": items, "label": label, "height": height, "width": width},
)
diff --git a/testgen/ui/navigation/page.py b/testgen/ui/navigation/page.py
index c29f9c3..b7a53cc 100644
--- a/testgen/ui/navigation/page.py
+++ b/testgen/ui/navigation/page.py
@@ -8,6 +8,7 @@
import testgen.ui.navigation.router
from testgen.ui.navigation.menu import MenuItem
+from testgen.ui.services import project_service
from testgen.ui.session import session
CanActivateGuard = typing.Callable[[], bool | str]
@@ -37,7 +38,19 @@ def _navigate(self) -> None:
session.page_pending_login = self.path
return self.router.navigate(to="")
- self.render(**(session.current_page_args or {}))
+ session.current_page_args = session.current_page_args or {}
+ self._validate_project_query_param()
+
+ self.render(**session.current_page_args)
+
+ def _validate_project_query_param(self) -> None:
+ if self.path != "" and ":" not in self.path:
+ valid_project_codes = [ project["code"] for project in project_service.get_projects() ]
+ if session.current_page_args.get("project_code") not in valid_project_codes: # Ensure top-level pages have valid project_code
+ session.current_page_args.update({ "project_code": session.project})
+ self.router.set_query_params({ "project_code": session.project})
+ else:
+ session.current_page_args.pop("project_code", None)
@abc.abstractmethod
def render(self, **kwargs) -> None:
diff --git a/testgen/ui/navigation/router.py b/testgen/ui/navigation/router.py
index dc375c9..8480ec5 100644
--- a/testgen/ui/navigation/router.py
+++ b/testgen/ui/navigation/router.py
@@ -28,6 +28,7 @@ def run(self, hide_sidebar=False) -> None:
# The default [data-testid="stSidebarNav"] element seems to be needed to keep the sidebar DOM stable
# Otherwise anything custom in the sidebar randomly flickers on page navigation
current_page = st.navigation(streamlit_pages, position="hidden" if hide_sidebar else "sidebar")
+ session.current_page_args = st.query_params
# This hack is needed because the auth cookie is not retrieved on the first run
# We have to store the page and wait for the second run
@@ -64,3 +65,10 @@ def navigate(self, /, to: str, with_args: dict = {}) -> None: # noqa: B006
error_message = f"{to}: {e!s}"
st.error(error_message)
LOG.exception(error_message)
+
+
+ def set_query_params(self, with_args: dict = {}) -> None: # noqa: B006
+ params = st.query_params
+ params.update(with_args)
+ params = {k: v for k, v in params.items() if v not in [None, "None", ""]}
+ st.query_params.from_dict(params)
diff --git a/testgen/ui/queries/profiling_queries.py b/testgen/ui/queries/profiling_queries.py
index d33a7e0..8f6c089 100644
--- a/testgen/ui/queries/profiling_queries.py
+++ b/testgen/ui/queries/profiling_queries.py
@@ -67,7 +67,7 @@ def lookup_db_parentage_from_run(str_profile_run_id):
str_schema = st.session_state["dbschema"]
# Define the query
str_sql = f"""
- SELECT profiling_starttime as profile_run_date, g.table_groups_name
+ SELECT profiling_starttime as profile_run_date, table_groups_id, g.table_groups_name, g.project_code
FROM {str_schema}.profiling_runs pr
INNER JOIN {str_schema}.table_groups g
ON pr.table_groups_id = g.id
@@ -75,7 +75,7 @@ def lookup_db_parentage_from_run(str_profile_run_id):
"""
df = db.retrieve_data(str_sql)
if not df.empty:
- return df.at[0, "profile_run_date"], df.at[0, "table_groups_name"]
+ return df.at[0, "profile_run_date"], df.at[0, "table_groups_id"], df.at[0, "table_groups_name"], df.at[0, "project_code"]
@st.cache_data(show_spinner="Retrieving Data")
diff --git a/testgen/ui/queries/test_suite_queries.py b/testgen/ui/queries/test_suite_queries.py
index 7293ecb..57ffa16 100644
--- a/testgen/ui/queries/test_suite_queries.py
+++ b/testgen/ui/queries/test_suite_queries.py
@@ -5,14 +5,16 @@
@st.cache_data(show_spinner=False)
-def get_by_table_group(schema, project_code, table_group_id):
+def get_by_project(schema, project_code, table_group_id=None):
sql = f"""
SELECT
suites.id::VARCHAR(50),
suites.project_code,
suites.test_suite,
suites.connection_id::VARCHAR(50),
+ connections.connection_name,
suites.table_groups_id::VARCHAR(50),
+ groups.table_groups_name,
suites.test_suite_description,
suites.test_action,
CASE WHEN suites.severity IS NULL THEN 'Inherit' ELSE suites.severity END,
@@ -22,6 +24,7 @@ def get_by_table_group(schema, project_code, table_group_id):
suites.component_type,
suites.component_name,
COUNT(definitions.id) as test_ct,
+ last_run.id as latest_run_id,
MAX(last_run.test_starttime) as latest_run_start,
MAX(last_run.passed_ct) as last_run_passed_ct,
MAX(last_run.warning_ct) as last_run_warning_ct,
@@ -33,11 +36,46 @@ def get_by_table_group(schema, project_code, table_group_id):
) AS last_run ON (last_run.test_suite_id = suites.id)
LEFT OUTER JOIN {schema}.test_definitions AS definitions
ON (definitions.test_suite_id = suites.id)
+ LEFT OUTER JOIN {schema}.connections AS connections
+ ON (connections.connection_id = suites.connection_id)
+ LEFT OUTER JOIN {schema}.table_groups as groups
+ ON (groups.id = suites.table_groups_id)
WHERE suites.project_code = '{project_code}'
- AND suites.table_groups_id = '{table_group_id}'
- GROUP BY suites.id
+ """
+
+ if table_group_id:
+ sql += f"""
+ AND suites.table_groups_id = '{table_group_id}'
+ """
+
+ sql += """
+ GROUP BY suites.id, groups.table_groups_name, connections.connection_id, last_run.id
ORDER BY suites.test_suite;
"""
+
+ return db.retrieve_data(sql)
+
+
+@st.cache_data(show_spinner=False)
+def get_by_id(schema, test_suite_id):
+ sql = f"""
+ SELECT
+ suites.id::VARCHAR(50),
+ suites.project_code,
+ suites.test_suite,
+ suites.connection_id::VARCHAR(50),
+ suites.table_groups_id::VARCHAR(50),
+ suites.test_suite_description,
+ suites.test_action,
+ CASE WHEN suites.severity IS NULL THEN 'Inherit' ELSE suites.severity END,
+ suites.export_to_observability,
+ suites.test_suite_schema,
+ suites.component_key,
+ suites.component_type,
+ suites.component_name
+ FROM {schema}.test_suites as suites
+ WHERE suites.id = '{test_suite_id}';
+ """
return db.retrieve_data(sql)
diff --git a/testgen/ui/services/form_service.py b/testgen/ui/services/form_service.py
index 41d51f9..a1a56de 100644
--- a/testgen/ui/services/form_service.py
+++ b/testgen/ui/services/form_service.py
@@ -19,7 +19,6 @@
import testgen.common.date_service as date_service
import testgen.ui.services.authentication_service as authentication_service
import testgen.ui.services.database_service as db
-from testgen.ui.components import widgets as testgen
"""
Shared rendering of UI elements
@@ -260,7 +259,7 @@ def render_excel_export(
df, lst_export_columns, str_export_title=None, str_caption=None, lst_wrap_columns=None, lst_column_headers=None
):
- if st.button(label=":blue[**⤓**]", use_container_width=True):
+ if st.button(label=":material/download: Export", help="Download to Excel"):
download_excel(df, lst_export_columns, str_export_title, str_caption, lst_wrap_columns, lst_column_headers)
@@ -271,7 +270,7 @@ def download_excel(
st.write(f'**Are you sure you want to download "{str_export_title}.xlsx"?**')
st.download_button(
- label="Yes",
+ label="Download",
data=_generate_excel_export(
df, lst_export_columns, str_export_title, str_caption, lst_wrap_columns, lst_column_headers
),
@@ -279,10 +278,9 @@ def download_excel(
mime="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
)
-
def render_refresh_button(button_container):
with button_container:
- do_refresh = st.button(":blue[**⟳**]", help="Refresh page data", use_container_width=False)
+ do_refresh = st.button(":material/refresh:", help="Refresh page data", use_container_width=False)
if do_refresh:
reset_post_updates("Refreshing page", True, True)
@@ -400,39 +398,6 @@ def reset_post_updates(str_message=None, as_toast=False, clear_cache=True, lst_c
st.rerun()
-def render_page_header(
- str_page_title, str_help_link=None, str_description=None, lst_breadcrumbs=None, boo_show_refresh=False
-):
- hcol1, hcol2 = st.columns([9, 1])
- hcol1.subheader(str_page_title, anchor=False)
- if str_help_link:
- with hcol2:
- st.caption(" ")
- render_icon_link(str_help_link)
- st.write(
- '',
- unsafe_allow_html=True,
- )
- if str_description:
- st.caption(str_description)
-
- if "last_page" in st.session_state:
- if str_page_title != st.session_state["last_page"]:
- st.cache_data.clear()
- st.session_state["last_page"] = str_page_title
-
- if lst_breadcrumbs:
- if boo_show_refresh:
- bcol1, bcol2, bcol3, _ = st.columns([875, 60, 60, 5])
- render_refresh_button(bcol3)
- else:
- bcol1, bcol2, _ = st.columns([95, 4, 1])
- with bcol1:
- testgen.breadcrumbs(breadcrumbs=lst_breadcrumbs)
- return bcol2
-
-
def render_select(
str_label, df_options, str_show_column, str_return_column, boo_required=True, str_default=None, boo_disabled=False
):
diff --git a/testgen/ui/services/project_service.py b/testgen/ui/services/project_service.py
new file mode 100644
index 0000000..24a41ab
--- /dev/null
+++ b/testgen/ui/services/project_service.py
@@ -0,0 +1,27 @@
+import streamlit as st
+
+from testgen.ui.queries import project_queries
+from testgen.ui.services import query_service
+from testgen.ui.session import session
+
+
+@st.cache_data(show_spinner=False)
+def get_projects():
+ projects = project_queries.get_projects()
+ projects = [
+ {"code": project["project_code"], "name": project["project_name"]} for project in projects.to_dict("records")
+ ]
+
+ return projects
+
+
+def set_current_project(project_code: str) -> None:
+ session.project = project_code
+
+
+@st.cache_data(show_spinner=False)
+def get_project_by_code(code: str):
+ if not code:
+ return None
+ return query_service.get_project_by_code(session.dbschema, code)
+
\ No newline at end of file
diff --git a/testgen/ui/services/table_group_service.py b/testgen/ui/services/table_group_service.py
index c78f1b5..57ea6bd 100644
--- a/testgen/ui/services/table_group_service.py
+++ b/testgen/ui/services/table_group_service.py
@@ -8,7 +8,7 @@
def get_by_id(table_group_id: str):
schema = st.session_state["dbschema"]
- return table_group_queries.get_by_id(schema, table_group_id)
+ return table_group_queries.get_by_id(schema, table_group_id).iloc[0]
def get_by_connection(project_code, connection_id):
diff --git a/testgen/ui/services/test_definition_service.py b/testgen/ui/services/test_definition_service.py
index 036a7d7..3d7d64b 100644
--- a/testgen/ui/services/test_definition_service.py
+++ b/testgen/ui/services/test_definition_service.py
@@ -95,9 +95,9 @@ def validate_test(test_definition):
sql_query = sql_query.replace("{DATA_SCHEMA}", schema)
table_group_id = test_definition["table_groups_id"]
- table_group_df = table_group_service.get_by_id(table_group_id)
+ table_group = table_group_service.get_by_id(table_group_id)
- connection_id = table_group_df.iloc[0]["connection_id"]
+ connection_id = table_group["connection_id"]
connection = connection_service.get_by_id(connection_id, hide_passwords=False)
diff --git a/testgen/ui/services/test_suite_service.py b/testgen/ui/services/test_suite_service.py
index f71e125..720695e 100644
--- a/testgen/ui/services/test_suite_service.py
+++ b/testgen/ui/services/test_suite_service.py
@@ -4,9 +4,14 @@
import testgen.ui.services.test_definition_service as test_definition_service
-def get_by_table_group(project_code, table_group_id):
+def get_by_project(project_code, table_group_id=None):
schema = st.session_state["dbschema"]
- return test_suite_queries.get_by_table_group(schema, project_code, table_group_id)
+ return test_suite_queries.get_by_project(schema, project_code, table_group_id)
+
+
+def get_by_id(test_suite_id):
+ schema = st.session_state["dbschema"]
+ return test_suite_queries.get_by_id(schema, test_suite_id).iloc[0]
def edit(test_suite):
diff --git a/testgen/ui/services/toolbar_service.py b/testgen/ui/services/toolbar_service.py
deleted file mode 100644
index d89f4b4..0000000
--- a/testgen/ui/services/toolbar_service.py
+++ /dev/null
@@ -1,77 +0,0 @@
-from time import sleep
-
-import streamlit as st
-
-
-class ToolBar:
- slot_count = 5
- toolbar_prompt = None
- action_prompt = None
- help_link = "https://docs.datakitchen.io/article/dataops-testgen-help/dataops-testgen-help"
-
- long_slots = None
- short_slots = None
- button_slots = None
- status_bar = None
- action_container = None
-
- def __init__(self, long_slot_count=5, short_slot_count=0, button_slot_count=0, prompt=None, multiline=False):
- self.toolbar_prompt = prompt
-
- lst_slots_line2 = []
- slots_line2 = None
-
- # Initialize Toolbar Slots for widgets at right size ratio
- lst_slots_line1 = [10] * long_slot_count
- if multiline:
- lst_slots_line2 = [7] * short_slot_count
- lst_slots_line2 += [2] * button_slot_count
- else:
- lst_slots_line1 += [7] * short_slot_count
- lst_slots_line1 += [2] * button_slot_count
-
- slots_line1 = st.columns(lst_slots_line1)
- if multiline:
- slots_line2 = st.columns(lst_slots_line2)
-
- if long_slot_count > 0:
- self.long_slots = slots_line1[:long_slot_count]
- if multiline:
- if short_slot_count > 0:
- self.short_slots = slots_line2[0:short_slot_count]
- if button_slot_count > 0:
- self.button_slots = slots_line2[-1 * button_slot_count :]
- else:
- if short_slot_count > 0:
- self.short_slots = slots_line1[long_slot_count : long_slot_count + short_slot_count]
- if button_slot_count > 0:
- self.button_slots = slots_line1[-1 * button_slot_count :]
-
- # Add vertical space to short slots
- for i in range(short_slot_count):
- self.short_slots[i].markdown(" ", unsafe_allow_html=True)
-
- # Add vertical space to button slots
- for i in range(button_slot_count):
- self.button_slots[i].markdown(" ", unsafe_allow_html=True)
-
- self.status_bar = st.empty()
- self.set_prompt()
-
- def set_prompt(self, str_new_prompt=None):
- str_prompt = self.toolbar_prompt if str_new_prompt is None else str_new_prompt
- if str_prompt:
- self.toolbar_prompt = str_prompt
- self.status_bar.markdown(f":green[**{str_prompt}**]")
- else:
- self.status_bar.empty()
-
- def show_status(self, str_message, str_type):
- if str_type == "success":
- self.status_bar.success(str_message, icon="✅")
- elif str_type == "error":
- self.status_bar.error(str_message, icon="❌")
- elif str_type == "info":
- self.status_bar.info(str_message, icon="💡")
- sleep(2)
- self.set_prompt()
diff --git a/testgen/ui/views/connections.py b/testgen/ui/views/connections.py
index 99e0aa2..31e50b6 100644
--- a/testgen/ui/views/connections.py
+++ b/testgen/ui/views/connections.py
@@ -3,8 +3,7 @@
import streamlit as st
-import testgen.ui.services.form_service as fm
-import testgen.ui.services.toolbar_service as tb
+from testgen.ui.components import widgets as testgen
from testgen.ui.navigation.menu import MenuItem
from testgen.ui.navigation.page import Page
from testgen.ui.services import connection_service
@@ -21,21 +20,17 @@ class ConnectionsPage(Page):
]
menu_item = MenuItem(icon="database", label="Data Configuration", order=3)
- def render(self) -> None:
- fm.render_page_header(
+ def render(self, project_code: str, **_kwargs) -> None:
+ dataframe = connection_service.get_connections(project_code)
+ connection = dataframe.iloc[0]
+
+ testgen.page_header(
"Connection",
"https://docs.datakitchen.io/article/dataops-testgen-help/connect-your-database",
- lst_breadcrumbs=[
- {"label": "Overview", "path": "overview"},
- {"label": "Connection", "path": None},
- ],
)
- project_code = session.project
- dataframe = connection_service.get_connections(project_code)
- connection = dataframe.iloc[0]
-
- tool_bar = tb.ToolBar(long_slot_count=6, short_slot_count=0, button_slot_count=0, prompt=None)
+ _, actions_column = st.columns([.1, .9])
+ testgen.flex_row_end(actions_column)
enable_table_groups = connection["project_host"] and connection["project_db"] and connection["project_qc_schema"]
@@ -44,23 +39,17 @@ def render(self) -> None:
mode = "edit"
show_connection_form(connection, mode, project_code)
- if tool_bar.long_slots[-1].button(
+ if actions_column.button(
+ "Configure QC Utility Schema",
+ help="Creates the required Utility schema and related functions in the target database",
+ ):
+ create_qc_schema_dialog(connection)
+
+ if actions_column.button(
f":{'gray' if not enable_table_groups else 'green'}[Table Groups →]",
help="Create or edit Table Groups for the Connection",
- use_container_width=True,
):
- st.session_state["connection"] = connection.to_dict()
-
self.router.navigate(
"connections:table-groups",
{"connection_id": connection["connection_id"]},
)
-
- _, col2 = st.columns([70, 30])
-
- if col2.button(
- "Configure QC Utility Schema",
- help="Creates the required Utility schema and related functions in the target database",
- use_container_width=True,
- ):
- create_qc_schema_dialog(connection)
diff --git a/testgen/ui/views/login.py b/testgen/ui/views/login.py
index 728a214..13e08fa 100644
--- a/testgen/ui/views/login.py
+++ b/testgen/ui/views/login.py
@@ -17,7 +17,7 @@ class LoginPage(Page):
lambda: not session.authentication_status or session.logging_in or "overview",
]
- def render(self) -> None:
+ def render(self, **_kwargs) -> None:
auth_data = user_session_service.get_auth_data()
authenticator = stauth.Authenticate(
diff --git a/testgen/ui/views/overview.py b/testgen/ui/views/overview.py
index 43e2b6e..901fa7f 100644
--- a/testgen/ui/views/overview.py
+++ b/testgen/ui/views/overview.py
@@ -3,9 +3,9 @@
import streamlit as st
+from testgen.ui.components import widgets as testgen
from testgen.ui.navigation.menu import MenuItem
from testgen.ui.navigation.page import Page
-from testgen.ui.services import form_service
from testgen.ui.session import session
LOG = logging.getLogger("testgen")
@@ -18,8 +18,8 @@ class OverviewPage(Page):
]
menu_item = MenuItem(icon="home", label="Overview", order=0)
- def render(self):
- form_service.render_page_header(
+ def render(self, **_kwargs):
+ testgen.page_header(
"Welcome to DataOps TestGen",
"https://docs.datakitchen.io/article/dataops-testgen-help/introduction-to-dataops-testgen",
)
diff --git a/testgen/ui/views/profiling_anomalies.py b/testgen/ui/views/profiling_anomalies.py
index 52250b6..fe8e5c4 100644
--- a/testgen/ui/views/profiling_anomalies.py
+++ b/testgen/ui/views/profiling_anomalies.py
@@ -3,221 +3,207 @@
import plotly.express as px
import streamlit as st
+import testgen.ui.queries.profiling_queries as profiling_queries
import testgen.ui.services.database_service as db
import testgen.ui.services.form_service as fm
import testgen.ui.services.query_service as dq
-import testgen.ui.services.toolbar_service as tb
+from testgen.common import date_service
from testgen.ui.components import widgets as testgen
from testgen.ui.navigation.page import Page
+from testgen.ui.services import project_service
from testgen.ui.session import session
from testgen.ui.views.profiling_modal import view_profiling_button
class ProfilingAnomaliesPage(Page):
- path = "profiling:hygiene"
+ path = "profiling-runs:hygiene"
can_activate: typing.ClassVar = [
lambda: session.authentication_status,
+ lambda: "run_id" in session.current_page_args or "profiling-runs",
]
- def render(self) -> None:
- export_container = fm.render_page_header(
+ def render(self, run_id: str, issue_class: str | None = None, **_kwargs) -> None:
+ run_date, _table_group_id, table_group_name, project_code = profiling_queries.lookup_db_parentage_from_run(
+ run_id
+ )
+ run_date = date_service.get_timezoned_timestamp(st.session_state, run_date)
+ project_service.set_current_project(project_code)
+
+ testgen.page_header(
"Hygiene Issues",
"https://docs.datakitchen.io/article/dataops-testgen-help/profile-anomalies",
- lst_breadcrumbs=[
- {"label": "Overview", "path": "overview"},
- {"label": "Data Profiling", "path": "profiling"},
- {"label": "Hygiene Issues", "path": None},
+ breadcrumbs=[
+ { "label": "Profiling Runs", "path": "profiling-runs", "params": { "project_code": project_code } },
+ { "label": f"{table_group_name} | {run_date}" },
],
)
- if "project" not in st.session_state:
- st.write("Select a Project from the Overview page.")
- else:
- str_project = st.session_state["project"]
+ others_summary_column, pii_summary_column, _ = st.columns([.3, .3, .4])
+ liklihood_filter_column, actions_column, export_button_column = st.columns([.3, .5, .2], vertical_alignment="bottom")
+ testgen.flex_row_end(actions_column)
+ testgen.flex_row_end(export_button_column)
+
+ with liklihood_filter_column:
+ # Likelihood selection - optional filter
+ status_options = ["All Likelihoods", "Definite", "Likely", "Possible", "Potential PII"]
+ issue_class = testgen.toolbar_select(
+ options=status_options,
+ default_value=issue_class,
+ required=True,
+ bind_to_query="issue_class",
+ label="Issue Class",
+ )
- # Setup Toolbar
- tool_bar = tb.ToolBar(3, 1, 4, None)
+ with actions_column:
+ str_help = "Toggle on to perform actions on multiple Hygiene Issues"
+ do_multi_select = st.toggle("Multi-Select", help=str_help)
+
+ # Get hygiene issue list
+ df_pa = get_profiling_anomalies(run_id, issue_class)
+
+ # Retrieve disposition action (cache refreshed)
+ df_action = get_anomaly_disposition(run_id)
+ # Update action from disposition df
+ action_map = df_action.set_index("id")["action"].to_dict()
+ df_pa["action"] = df_pa["id"].map(action_map).fillna(df_pa["action"])
+
+ if not df_pa.empty:
+ summaries = get_profiling_anomaly_summary(run_id)
+ others_summary = [summary for summary in summaries if summary.get("type") != "PII"]
+ with others_summary_column:
+ testgen.summary_bar(
+ items=others_summary,
+ label="Hygiene Issues",
+ key="test_results_summary:others",
+ height=40,
+ width=400,
+ )
- # Look for drill-down from another page
- # No need to clear -- will be sent every time page is accessed
- str_drill_tg = st.session_state.get("drill_profile_tg")
- str_drill_prun = st.session_state.get("drill_profile_run")
+ anomalies_pii_summary = [summary for summary in summaries if summary.get("type") == "PII"]
+ if anomalies_pii_summary:
+ with pii_summary_column:
+ testgen.summary_bar(
+ items=anomalies_pii_summary,
+ label="Potential PII",
+ key="test_results_summary:pii",
+ height=40,
+ width=400,
+ )
+ # write_frequency_graph(df_pa)
+
+ lst_show_columns = [
+ "table_name",
+ "column_name",
+ "issue_likelihood",
+ "action",
+ "anomaly_name",
+ "detail",
+ ]
- with tool_bar.long_slots[0]:
- # Table Groups selection
- df_tg = get_db_table_group_choices(str_project)
- str_drill_tg_name = (
- df_tg[df_tg["id"] == str_drill_tg]["table_groups_name"].values[0] if str_drill_tg else None
- )
- str_table_groups_id = fm.render_select(
- "Table Group", df_tg, "table_groups_name", "id", str_default=str_drill_tg_name, boo_disabled=True
+ # Show main grid and retrieve selections
+ selected = fm.render_grid_select(
+ df_pa, lst_show_columns, int_height=400, do_multi_select=do_multi_select
+ )
+
+ with export_button_column:
+ lst_export_columns = [
+ "schema_name",
+ "table_name",
+ "column_name",
+ "anomaly_name",
+ "issue_likelihood",
+ "anomaly_description",
+ "action",
+ "detail",
+ "suggested_action",
+ ]
+ lst_wrap_columns = ["anomaly_description", "suggested_action"]
+ fm.render_excel_export(
+ df_pa, lst_export_columns, "Hygiene Screen", "{TIMESTAMP}", lst_wrap_columns
)
- str_profile_run_id = str_drill_prun
-
- with tool_bar.long_slots[1]:
- # Likelihood selection - optional filter
- lst_status_options = ["All Likelihoods", "Definite", "Likely", "Possible", "Potential PII"]
- str_likelihood = st.selectbox("Issue Class", lst_status_options)
-
- with tool_bar.short_slots[0]:
- str_help = "Toggle on to perform actions on multiple Hygiene Issues"
- do_multi_select = st.toggle("Multi-Select", help=str_help)
-
- if str_table_groups_id:
- # Get hygiene issue list
- df_pa = get_profiling_anomalies(str_profile_run_id, str_likelihood)
-
- # Retrieve disposition action (cache refreshed)
- df_action = get_anomaly_disposition(str_profile_run_id)
- # Update action from disposition df
- action_map = df_action.set_index("id")["action"].to_dict()
- df_pa["action"] = df_pa["id"].map(action_map).fillna(df_pa["action"])
-
- if not df_pa.empty:
- others_summary_column, pii_summary_column, _ = st.columns([.3, .3, .4])
- summaries = get_profiling_anomaly_summary(str_profile_run_id)
- others_summary = [summary for summary in summaries if summary.get("type") != "PII"]
- with others_summary_column:
- st.html("Hygiene Issues")
- testgen.summary_bar(
- items=others_summary,
- key="test_results_summary:others",
- height=40,
- width=400,
- )
-
- anomalies_pii_summary = [summary for summary in summaries if summary.get("type") == "PII"]
- if anomalies_pii_summary:
- with pii_summary_column:
- st.html("Potential PII")
- testgen.summary_bar(
- items=anomalies_pii_summary,
- key="test_results_summary:pii",
- height=40,
- width=400,
- )
- # write_frequency_graph(df_pa)
-
- lst_show_columns = [
- "table_name",
- "column_name",
- "issue_likelihood",
- "action",
- "anomaly_name",
- "detail",
- ]
- # TODO: Can we reintegrate percents below:
- # tool_bar.set_prompt(
- # f"Hygiene Issues Found: {df_sum.at[0, 'issue_ct']} issues in {df_sum.at[0, 'column_ct']} columns, {df_sum.at[0, 'table_ct']} tables in schema {df_pa.loc[0, 'schema_name']}"
- # )
- # Show main grid and retrieve selections
- selected = fm.render_grid_select(
- df_pa, lst_show_columns, int_height=400, do_multi_select=do_multi_select
- )
+ if selected:
+ # Always show details for last selected row
+ selected_row = selected[len(selected) - 1]
+ else:
+ selected_row = None
- with export_container:
- lst_export_columns = [
- "schema_name",
+ # Display hygiene issue detail for selected row
+ if not selected_row:
+ st.markdown(":orange[Select a record to see more information.]")
+ else:
+ col1, col2 = st.columns([0.7, 0.3])
+ with col1:
+ fm.render_html_list(
+ selected_row,
+ [
+ "anomaly_name",
"table_name",
"column_name",
- "anomaly_name",
- "issue_likelihood",
+ "column_type",
"anomaly_description",
- "action",
"detail",
+ "likelihood_explanation",
"suggested_action",
- ]
- lst_wrap_columns = ["anomaly_description", "suggested_action"]
- fm.render_excel_export(
- df_pa, lst_export_columns, "Hygiene Screen", "{TIMESTAMP}", lst_wrap_columns
- )
-
- if selected:
- # Always show details for last selected row
- selected_row = selected[len(selected) - 1]
- else:
- selected_row = None
-
- # Display hygiene issue detail for selected row
- if not selected_row:
- st.markdown(":orange[Select a record to see more information.]")
- else:
- col1, col2 = st.columns([0.7, 0.3])
- with col1:
- fm.render_html_list(
- selected_row,
- [
- "anomaly_name",
- "table_name",
- "column_name",
- "column_type",
- "anomaly_description",
- "detail",
- "likelihood_explanation",
- "suggested_action",
- ],
- "Hygiene Issue Detail",
- int_data_width=700,
- )
- with col2:
- # _, v_col2 = st.columns([0.3, 0.7])
- v_col1, v_col2 = st.columns([0.5, 0.5])
- view_profiling_button(
- v_col1, selected_row["table_name"], selected_row["column_name"],
- str_profile_run_id=str_profile_run_id
- )
- with v_col2:
- if st.button(
- ":green[Source Data →]", help="Review current source data for highlighted issue", use_container_width=True
- ):
- source_data_dialog(selected_row)
-
- # Need to render toolbar buttons after grid, so selection status is maintained
- if tool_bar.button_slots[0].button(
- "✓", help="Confirm this issue as relevant for this run", disabled=not selected
- ):
- fm.reset_post_updates(
- do_disposition_update(selected, "Confirmed"),
- as_toast=True,
- clear_cache=True,
- lst_cached_functions=[get_anomaly_disposition, get_profiling_anomaly_summary],
- )
- if tool_bar.button_slots[1].button(
- "✘", help="Dismiss this issue as not relevant for this run", disabled=not selected
- ):
- fm.reset_post_updates(
- do_disposition_update(selected, "Dismissed"),
- as_toast=True,
- clear_cache=True,
- lst_cached_functions=[get_anomaly_disposition, get_profiling_anomaly_summary],
- )
- if tool_bar.button_slots[2].button(
- "🔇", help="Mute this test to deactivate it for future runs", disabled=not selected
+ ],
+ "Hygiene Issue Detail",
+ int_data_width=700,
+ )
+ with col2:
+ # _, v_col2 = st.columns([0.3, 0.7])
+ v_col1, v_col2 = st.columns([0.5, 0.5])
+ view_profiling_button(
+ v_col1, selected_row["table_name"], selected_row["column_name"],
+ str_profile_run_id=run_id
+ )
+ with v_col2:
+ if st.button(
+ ":green[Source Data →]", help="Review current source data for highlighted issue", use_container_width=True
):
- fm.reset_post_updates(
- do_disposition_update(selected, "Inactive"),
- as_toast=True,
- clear_cache=True,
- lst_cached_functions=[get_anomaly_disposition, get_profiling_anomaly_summary],
- )
- if tool_bar.button_slots[3].button("↩︎", help="Clear action", disabled=not selected):
- fm.reset_post_updates(
- do_disposition_update(selected, "No Decision"),
- as_toast=True,
- clear_cache=True,
- lst_cached_functions=[get_anomaly_disposition, get_profiling_anomaly_summary],
- )
- else:
- tool_bar.set_prompt("No Hygiene Issues Found")
-
- # Help Links
- st.markdown(
- "[Help on Hygiene Issues](https://docs.datakitchen.io/article/dataops-testgen-help/profile-anomalies)"
- )
+ source_data_dialog(selected_row)
+
+ # Need to render toolbar buttons after grid, so selection status is maintained
+ if actions_column.button(
+ "✓", help="Confirm this issue as relevant for this run", disabled=not selected
+ ):
+ fm.reset_post_updates(
+ do_disposition_update(selected, "Confirmed"),
+ as_toast=True,
+ clear_cache=True,
+ lst_cached_functions=[get_anomaly_disposition, get_profiling_anomaly_summary],
+ )
+ if actions_column.button(
+ "✘", help="Dismiss this issue as not relevant for this run", disabled=not selected
+ ):
+ fm.reset_post_updates(
+ do_disposition_update(selected, "Dismissed"),
+ as_toast=True,
+ clear_cache=True,
+ lst_cached_functions=[get_anomaly_disposition, get_profiling_anomaly_summary],
+ )
+ if actions_column.button(
+ "🔇", help="Mute this test to deactivate it for future runs", disabled=not selected
+ ):
+ fm.reset_post_updates(
+ do_disposition_update(selected, "Inactive"),
+ as_toast=True,
+ clear_cache=True,
+ lst_cached_functions=[get_anomaly_disposition, get_profiling_anomaly_summary],
+ )
+ if actions_column.button("↩︎", help="Clear action", disabled=not selected):
+ fm.reset_post_updates(
+ do_disposition_update(selected, "No Decision"),
+ as_toast=True,
+ clear_cache=True,
+ lst_cached_functions=[get_anomaly_disposition, get_profiling_anomaly_summary],
+ )
+ else:
+ st.markdown(":green[**No Hygiene Issues Found**]")
- # with st.sidebar:
- # st.divider()
+ # Help Links
+ st.markdown(
+ "[Help on Hygiene Issues](https://docs.datakitchen.io/article/dataops-testgen-help/profile-anomalies)"
+ )
@st.cache_data(show_spinner=False)
diff --git a/testgen/ui/views/profiling_results.py b/testgen/ui/views/profiling_results.py
index 10ed4ad..d6445e0 100644
--- a/testgen/ui/views/profiling_results.py
+++ b/testgen/ui/views/profiling_results.py
@@ -4,9 +4,10 @@
import testgen.ui.queries.profiling_queries as profiling_queries
import testgen.ui.services.form_service as fm
-import testgen.ui.services.toolbar_service as tb
from testgen.common import date_service
+from testgen.ui.components import widgets as testgen
from testgen.ui.navigation.page import Page
+from testgen.ui.services import project_service
from testgen.ui.session import session
from testgen.ui.views.profiling_details import show_profiling_detail
@@ -14,174 +15,149 @@
class ProfilingResultsPage(Page):
- path = "profiling:results"
+ path = "profiling-runs:results"
can_activate: typing.ClassVar = [
lambda: session.authentication_status,
+ lambda: "run_id" in session.current_page_args or "profiling-runs",
]
- def render(self) -> None:
- export_container = fm.render_page_header(
+ def render(self, run_id: str, table_name: str | None = None, column_name: str | None = None, **_kwargs) -> None:
+ run_date, table_group_id, table_group_name, project_code = profiling_queries.lookup_db_parentage_from_run(
+ run_id
+ )
+ run_date = date_service.get_timezoned_timestamp(st.session_state, run_date)
+ project_service.set_current_project(project_code)
+
+ testgen.page_header(
"Data Profiling Results",
"https://docs.datakitchen.io/article/dataops-testgen-help/investigate-profiling",
- lst_breadcrumbs=[
- {"label": "Overview", "path": "overview"},
- {"label": "Data Profiling", "path": "profiling"},
- {"label": "Profiling Results", "path": None},
+ breadcrumbs=[
+ { "label": "Profiling Runs", "path": "profiling-runs", "params": { "project_code": project_code } },
+ { "label": f"{table_group_name} | {run_date}" },
],
)
-
- if "project" not in st.session_state:
- st.write("Select a Project from the Overview page.")
+
+ table_filter_column, column_filter_column, export_button_column = st.columns([.3, .3, .4], vertical_alignment="bottom")
+
+ with table_filter_column:
+ # Table Name filter
+ df = profiling_queries.run_table_lookup_query(table_group_id)
+ table_name = testgen.toolbar_select(
+ options=df,
+ value_column="table_name",
+ default_value=table_name,
+ bind_to_query="table_name",
+ label="Table Name",
+ )
+
+ with column_filter_column:
+ # Column Name filter
+ df = profiling_queries.run_column_lookup_query(table_group_id, table_name)
+ column_name = testgen.toolbar_select(
+ options=df,
+ value_column="column_name",
+ default_value=column_name,
+ bind_to_query="column_name",
+ label="Column Name",
+ disabled=not table_name,
+ )
+
+ # Use SQL wildcard to match all values
+ if not table_name:
+ table_name = "%%"
+ if not column_name:
+ column_name = "%%"
+
+ # Display main results grid
+ df = profiling_queries.get_profiling_detail(run_id, table_name, column_name)
+ show_columns = [
+ "schema_name",
+ "table_name",
+ "column_name",
+ "column_type",
+ "semantic_data_type",
+ "anomalies",
+ ]
+
+ # Show CREATE script button
+ if len(df) > 0 and table_name != "%%":
+ with st.expander("📜 **Table CREATE script with suggested datatypes**"):
+ st.code(generate_create_script(df), "sql")
+
+ selected_row = fm.render_grid_select(df, show_columns)
+
+ with export_button_column:
+ testgen.flex_row_end()
+ render_export_button(df)
+
+ # Display profiling for selected row
+ if not selected_row:
+ st.markdown(":orange[Select a row to see profiling details.]")
else:
- # Retrieve State Variables
-
- str_project = st.session_state["project"]
- # Look for drill-down from another page
- if "drill_profile_run" in st.session_state:
- str_profile_run_id = st.session_state["drill_profile_run"]
- else:
- str_profile_run_id = ""
-
- # Setup Toolbar
- tool_bar = tb.ToolBar(4, 0, 1, None)
-
- # Retrieve Choices data
- if str_profile_run_id:
- # Lookup profiling run date and table group name from passed profile run
- str_lookfor_run_date, str_lookfor_table_group = profiling_queries.lookup_db_parentage_from_run(
- str_profile_run_id
- )
- str_lookfor_run_date = date_service.get_timezoned_timestamp(st.session_state, str_lookfor_run_date)
- else:
- str_lookfor_run_date = ""
- str_lookfor_table_group = ""
-
- with tool_bar.long_slots[0]:
- # Prompt for Table Group (with passed default)
- df = profiling_queries.run_table_groups_lookup_query(str_project)
- str_table_groups_id = fm.render_select(
- "Table Group", df, "table_groups_name", "id", True, str_lookfor_table_group, True
- )
-
- with tool_bar.long_slots[1]:
- # Prompt for Profile Run (with passed default)
- df = profiling_queries.get_db_profile_run_choices(str_table_groups_id)
- date_service.create_timezoned_column_in_dataframe(
- st.session_state, df, "profile_run_date_with_timezone", "profile_run_date"
- )
- str_profile_run_id = fm.render_select(
- "Profile Run", df, "profile_run_date_with_timezone", "id", True, str_lookfor_run_date, True
- )
-
- # Reset passed parameter
- # st.session_state["drill_profile_run"] = None
-
- with tool_bar.long_slots[2]:
- # Prompt for Table Name
- df = profiling_queries.run_table_lookup_query(str_table_groups_id)
- str_table_name = fm.render_select("Table Name", df, "table_name", "table_name", False)
-
- with tool_bar.long_slots[3]:
- # Prompt for Column Name
- if str_table_name:
- df = profiling_queries.run_column_lookup_query(str_table_groups_id, str_table_name)
- str_column_name = fm.render_select("Column Name", df, "column_name", "column_name", False)
- if not str_column_name:
- # Use SQL wildcard to match all values
- str_column_name = "%%"
- else:
- # Use SQL wildcard to match all values
- str_table_name = "%%"
- str_column_name = "%%"
-
- # Display main results grid
- if str_profile_run_id:
- df = profiling_queries.get_profiling_detail(str_profile_run_id, str_table_name, str_column_name)
- show_columns = [
- "schema_name",
- "table_name",
- "column_name",
- "column_type",
- "semantic_data_type",
- "anomalies",
- ]
-
- # Show CREATE script button
- if len(df) > 0 and str_table_name != "%%":
- with st.expander("📜 **Table CREATE script with suggested datatypes**"):
- st.code(generate_create_script(df), "sql")
-
- selected_row = fm.render_grid_select(df, show_columns)
-
- with export_container:
- lst_export_columns = [
- "schema_name",
- "table_name",
- "column_name",
- "position",
- "column_type",
- "general_type",
- "semantic_table_type",
- "semantic_data_type",
- "datatype_suggestion",
- "anomalies",
- "record_ct",
- "value_ct",
- "distinct_value_ct",
- "top_freq_values",
- "null_value_ct",
- "min_length",
- "max_length",
- "avg_length",
- "distinct_std_value_ct",
- "numeric_ct",
- "date_ct",
- "dummy_value_ct",
- "zero_length_ct",
- "lead_space_ct",
- "quoted_value_ct",
- "includes_digit_ct",
- "embedded_space_ct",
- "avg_embedded_spaces",
- "min_text",
- "max_text",
- "std_pattern_match",
- "distinct_pattern_ct",
- "top_patterns",
- "distinct_value_hash",
- "min_value",
- "min_value_over_0",
- "max_value",
- "avg_value",
- "stdev_value",
- "percentile_25",
- "percentile_50",
- "percentile_75",
- "zero_value_ct",
- "fractional_sum",
- "min_date",
- "max_date",
- "before_1yr_date_ct",
- "before_5yr_date_ct",
- "within_1yr_date_ct",
- "within_1mo_date_ct",
- "future_date_ct",
- "date_days_present",
- "date_weeks_present",
- "date_months_present",
- "boolean_true_ct",
- ]
- lst_wrap_columns = ["top_freq_values", "top_patterns"]
- str_caption = "{TIMESTAMP}"
- fm.render_excel_export(df, lst_export_columns, "Profiling Results", str_caption, lst_wrap_columns)
-
- # Display profiling for selected row
- if not selected_row:
- st.markdown(":orange[Select a row to see profiling details.]")
- else:
- show_profiling_detail(selected_row[0], FORM_DATA_WIDTH)
- else:
- st.markdown(":orange[Select a profiling run.]")
+ show_profiling_detail(selected_row[0], FORM_DATA_WIDTH)
+
+
+def render_export_button(df):
+ export_columns = [
+ "schema_name",
+ "table_name",
+ "column_name",
+ "position",
+ "column_type",
+ "general_type",
+ "semantic_table_type",
+ "semantic_data_type",
+ "datatype_suggestion",
+ "anomalies",
+ "record_ct",
+ "value_ct",
+ "distinct_value_ct",
+ "top_freq_values",
+ "null_value_ct",
+ "min_length",
+ "max_length",
+ "avg_length",
+ "distinct_std_value_ct",
+ "numeric_ct",
+ "date_ct",
+ "dummy_value_ct",
+ "zero_length_ct",
+ "lead_space_ct",
+ "quoted_value_ct",
+ "includes_digit_ct",
+ "embedded_space_ct",
+ "avg_embedded_spaces",
+ "min_text",
+ "max_text",
+ "std_pattern_match",
+ "distinct_pattern_ct",
+ "top_patterns",
+ "distinct_value_hash",
+ "min_value",
+ "min_value_over_0",
+ "max_value",
+ "avg_value",
+ "stdev_value",
+ "percentile_25",
+ "percentile_50",
+ "percentile_75",
+ "zero_value_ct",
+ "fractional_sum",
+ "min_date",
+ "max_date",
+ "before_1yr_date_ct",
+ "before_5yr_date_ct",
+ "within_1yr_date_ct",
+ "within_1mo_date_ct",
+ "future_date_ct",
+ "date_days_present",
+ "date_weeks_present",
+ "date_months_present",
+ "boolean_true_ct",
+ ]
+ wrap_columns = ["top_freq_values", "top_patterns"]
+ caption = "{TIMESTAMP}"
+ fm.render_excel_export(df, export_columns, "Profiling Results", caption, wrap_columns)
def generate_create_script(df):
diff --git a/testgen/ui/views/profiling_summary.py b/testgen/ui/views/profiling_summary.py
index db1eb0d..6fa7dea 100644
--- a/testgen/ui/views/profiling_summary.py
+++ b/testgen/ui/views/profiling_summary.py
@@ -6,9 +6,9 @@
import testgen.ui.services.database_service as db
import testgen.ui.services.form_service as fm
import testgen.ui.services.query_service as dq
-import testgen.ui.services.toolbar_service as tb
from testgen.commands.run_profiling_bridge import update_profile_run_status
from testgen.common import date_service
+from testgen.ui.components import widgets as testgen
from testgen.ui.navigation.menu import MenuItem
from testgen.ui.navigation.page import Page
from testgen.ui.session import session
@@ -17,52 +17,51 @@
class DataProfilingPage(Page):
- path = "profiling"
+ path = "profiling-runs"
can_activate: typing.ClassVar = [
lambda: session.authentication_status,
]
menu_item = MenuItem(icon="problem", label="Data Profiling", order=1)
- def render(self) -> None:
- fm.render_page_header(
+ def render(self, project_code: str | None = None, table_group_id: str | None = None, **_kwargs) -> None:
+ project_code = project_code or session.project
+
+ testgen.page_header(
"Profiling Runs",
"https://docs.datakitchen.io/article/dataops-testgen-help/investigate-profiling",
- lst_breadcrumbs=[
- {"label": "Overview", "path": "overview"},
- {"label": "Data Profiling", "path": None},
- ],
- boo_show_refresh=True,
)
- if "project" not in st.session_state:
- st.write("Select a Project from the Overview page.")
- else:
- str_project = st.session_state["project"]
-
- # Setup Toolbar
- tool_bar = tb.ToolBar(3, 2, 0, None)
+ # Setup Toolbar
+ group_filter_column, actions_column = st.columns([.3, .7], vertical_alignment="bottom")
+ testgen.flex_row_end(actions_column)
- with tool_bar.long_slots[0]:
- # Table Groups selection -- optional criterion
- df_tg = get_db_table_group_choices(str_project)
- str_table_groups_id = fm.render_select(
- "Table Group", df_tg, "table_groups_name", "id", boo_required=False, str_default=None
- )
+ with group_filter_column:
+ # Table Groups selection -- optional criterion
+ df_tg = get_db_table_group_choices(project_code)
+ table_groups_id = testgen.toolbar_select(
+ options=df_tg,
+ value_column="id",
+ display_column="table_groups_name",
+ default_value=table_group_id,
+ bind_to_query="table_group_id",
+ label="Table Group",
+ )
- df, show_columns = get_db_profiling_runs(str_project, str_table_groups_id)
+ df, show_columns = get_db_profiling_runs(project_code, table_groups_id)
- time_columns = ["start_time"]
- date_service.accommodate_dataframe_to_timezone(df, st.session_state, time_columns)
+ time_columns = ["start_time"]
+ date_service.accommodate_dataframe_to_timezone(df, st.session_state, time_columns)
- dct_selected_rows = fm.render_grid_select(df, show_columns)
+ dct_selected_rows = fm.render_grid_select(df, show_columns)
- open_drill_downs(dct_selected_rows, tool_bar.short_slots, self.router)
+ open_drill_downs(dct_selected_rows, actions_column, self.router)
+ fm.render_refresh_button(actions_column)
- if dct_selected_rows:
- show_record_detail(dct_selected_rows[0])
- st.markdown(":orange[Click a button to view profiling outcomes for the selected run.]")
- else:
- st.markdown(":orange[Select a run to see more information.]")
+ if dct_selected_rows:
+ show_record_detail(dct_selected_rows[0])
+ st.markdown(":orange[Click a button to view profiling outcomes for the selected run.]")
+ else:
+ st.markdown(":orange[Select a run to see more information.]")
@st.cache_data(show_spinner=False)
@@ -107,29 +106,24 @@ def get_db_profiling_runs(str_project_code, str_tg=None):
return db.retrieve_data(str_sql), show_columns
-def open_drill_downs(dct_selected_rows, button_slots, router):
+def open_drill_downs(dct_selected_rows, container, router):
dct_selected_row = None
if dct_selected_rows:
dct_selected_row = dct_selected_rows[0]
- if button_slots[0].button(
+ if container.button(
f":{'gray' if not dct_selected_rows else 'green'}[Profiling →]",
help="Review profiling characteristics for each data column",
- use_container_width=True,
disabled=not dct_selected_rows,
):
- st.session_state["drill_profile_run"] = dct_selected_row["profiling_run_id"]
- router.navigate("profiling:results")
+ router.navigate("profiling-runs:results", { "run_id": dct_selected_row["profiling_run_id"] })
- if button_slots[1].button(
+ if container.button(
f":{'gray' if not dct_selected_rows else 'green'}[Hygiene →]",
help="Review potential data problems identified in profiling",
- use_container_width=True,
disabled=not dct_selected_rows,
):
- st.session_state["drill_profile_run"] = dct_selected_row["profiling_run_id"]
- st.session_state["drill_profile_tg"] = dct_selected_row["table_groups_id"]
- router.navigate("profiling:hygiene")
+ router.navigate("profiling-runs:hygiene", { "run_id": dct_selected_row["profiling_run_id"] })
def show_record_detail(dct_selected_row):
diff --git a/testgen/ui/views/project_settings.py b/testgen/ui/views/project_settings.py
index 1aece63..603d104 100644
--- a/testgen/ui/views/project_settings.py
+++ b/testgen/ui/views/project_settings.py
@@ -3,9 +3,10 @@
import streamlit as st
from testgen.commands.run_observability_exporter import test_observability_exporter
+from testgen.ui.components import widgets as testgen
from testgen.ui.navigation.menu import MenuItem
from testgen.ui.navigation.page import Page
-from testgen.ui.services import form_service, query_service
+from testgen.ui.services import form_service, project_service
from testgen.ui.session import session
from testgen.ui.views.app_log_modal import view_log_file
@@ -18,18 +19,15 @@ class ProjectSettingsPage(Page):
]
menu_item = MenuItem(icon="settings", label="Settings", order=100)
- def render(self) -> None:
- form_service.render_page_header(
+ def render(self, project_code: str | None = None, **_kwargs) -> None:
+ project = project_service.get_project_by_code(project_code or session.project)
+
+ testgen.page_header(
"Settings",
"https://docs.datakitchen.io/article/dataops-testgen-help/configuration",
- lst_breadcrumbs=[
- {"label": "Overview", "path": "overview"},
- {"label": "Settings", "path": None},
- ],
)
- project = get_current_project(session.project)
-
+ testgen.whitespace(1)
form_service.render_edit_form(
"",
project,
@@ -59,13 +57,6 @@ def render(self) -> None:
view_log_file(col3)
-@st.cache_data(show_spinner=False)
-def get_current_project(code: str):
- if not code:
- return None
- return query_service.get_project_by_code(session.dbschema, code)
-
-
def set_add_new_project():
session.add_project = True
diff --git a/testgen/ui/views/table_groups.py b/testgen/ui/views/table_groups.py
index 43bbc0c..8880c57 100644
--- a/testgen/ui/views/table_groups.py
+++ b/testgen/ui/views/table_groups.py
@@ -8,9 +8,10 @@
import testgen.ui.services.connection_service as connection_service
import testgen.ui.services.form_service as fm
import testgen.ui.services.table_group_service as table_group_service
-import testgen.ui.services.toolbar_service as tb
from testgen.commands.run_profiling_bridge import run_profiling_in_background
+from testgen.ui.components import widgets as testgen
from testgen.ui.navigation.page import Page
+from testgen.ui.services import project_service
from testgen.ui.services.string_service import empty_if_null
from testgen.ui.session import session
@@ -20,32 +21,25 @@ class TableGroupsPage(Page):
can_activate: typing.ClassVar = [
lambda: authentication_service.current_user_has_admin_role() or "overview",
lambda: session.authentication_status,
+ lambda: "connection_id" in session.current_page_args or "connections",
]
- def render(self, connection_id: int | None = None) -> None:
- fm.render_page_header(
+ def render(self, connection_id: str, **_kwargs) -> None:
+ connection = connection_service.get_by_id(connection_id, hide_passwords=False)
+ project_code = connection["project_code"]
+ project_service.set_current_project(project_code)
+
+ testgen.page_header(
"Table Groups",
"https://docs.datakitchen.io/article/dataops-testgen-help/create-a-table-group",
- lst_breadcrumbs=[
- {"label": "Overview", "path": "overview"},
- {"label": "Connections", "path": "connections"},
- {"label": "Table Groups", "path": None},
+ breadcrumbs=[
+ { "label": "Connections", "path": "connections", "params": { "project_code": project_code } },
+ { "label": connection["connection_name"] },
],
)
- # Get page parameters from session
- project_code = st.session_state["project"]
- connection = (
- connection_service.get_by_id(connection_id, hide_passwords=False)
- if connection_id
- else st.session_state["connection"]
- )
- connection_id = connection["connection_id"]
-
- tool_bar = tb.ToolBar(1, 5, 0, None)
-
- with tool_bar.long_slots[0]:
- st.selectbox("Connection", [connection["connection_name"]], disabled=True)
+ _, actions_column = st.columns([.1, .9], vertical_alignment="bottom")
+ testgen.flex_row_end(actions_column)
df = table_group_service.get_by_connection(project_code, connection_id)
@@ -71,33 +65,30 @@ def render(self, connection_id: int | None = None) -> None:
selected = fm.render_grid_select(df, show_columns, show_column_headers=show_column_headers)
- if tool_bar.short_slots[1].button(
- "➕ Add", help="Add a new Table Group", use_container_width=True # NOQA RUF001
+ if actions_column.button(
+ ":material/add: Add", help="Add a new Table Group"
):
add_table_group_dialog(project_code, connection)
disable_buttons = selected is None
- if tool_bar.short_slots[2].button(
- "🖊️ Edit", help="Edit the selected Table Group", disabled=disable_buttons, use_container_width=True
+ if actions_column.button(
+ ":material/edit: Edit", help="Edit the selected Table Group", disabled=disable_buttons
):
edit_table_group_dialog(project_code, connection, selected)
- if tool_bar.short_slots[3].button(
- "❌ Delete", help="Delete the selected Table Group", disabled=disable_buttons, use_container_width=True
+ if actions_column.button(
+ ":material/delete: Delete", help="Delete the selected Table Group", disabled=disable_buttons
):
delete_table_group_dialog(selected)
- if tool_bar.short_slots[4].button(
+ if actions_column.button(
f":{'gray' if disable_buttons else 'green'}[Test Suites →]",
help="Create or edit Test Suites for the selected Table Group",
disabled=disable_buttons,
- use_container_width=True,
):
- st.session_state["table_group"] = selected[0]
-
self.router.navigate(
- "connections:test-suites",
- {"connection_id": connection_id, "table_group_id": selected[0]["id"]},
+ "test-suites",
+ {"table_group_id": selected[0]["id"]},
)
if not selected:
diff --git a/testgen/ui/views/test_definitions.py b/testgen/ui/views/test_definitions.py
index e0f58cd..f88148a 100644
--- a/testgen/ui/views/test_definitions.py
+++ b/testgen/ui/views/test_definitions.py
@@ -7,12 +7,13 @@
import testgen.ui.services.form_service as fm
import testgen.ui.services.query_service as dq
+import testgen.ui.services.table_group_service as table_group_service
import testgen.ui.services.test_definition_service as test_definition_service
-import testgen.ui.services.toolbar_service as tb
+import testgen.ui.services.test_suite_service as test_suite_service
from testgen.common import date_service
-from testgen.ui.navigation.menu import MenuItem
+from testgen.ui.components import widgets as testgen
from testgen.ui.navigation.page import Page
-from testgen.ui.services import authentication_service
+from testgen.ui.services import authentication_service, project_service
from testgen.ui.services.string_service import empty_if_null, snake_case_to_title_case
from testgen.ui.session import session
from testgen.ui.views.profiling_modal import view_profiling_button
@@ -21,138 +22,119 @@
class TestDefinitionsPage(Page):
- path = "test-definitions"
+ path = "test-suites:definitions"
can_activate: typing.ClassVar = [
lambda: session.authentication_status,
+ lambda: "test_suite_id" in session.current_page_args or "test-suites",
]
- breadcrumbs: typing.ClassVar = [
- {"label": "Overview", "path": "overview"},
- {"label": "Tests Definitions", "path": None},
- ]
- menu_item = MenuItem(icon="list_alt", label="Tests Definitions", order=4)
-
- def render(self, **_) -> None:
- # Get page parameters from session
- project_code = st.session_state["project"]
-
- connection = st.session_state["connection"] if "connection" in st.session_state.keys() else None
- table_group = st.session_state["table_group"] if "table_group" in st.session_state.keys() else None
- test_suite = st.session_state["test_suite"] if "test_suite" in st.session_state.keys() else None
-
- str_table_name = st.session_state["table_name"] if "table_name" in st.session_state.keys() else None
- str_column_name = None
+ def render(self, test_suite_id: str, table_name: str | None = None, column_name: str | None = None, **_kwargs) -> None:
+ test_suite = test_suite_service.get_by_id(test_suite_id)
+ table_group = table_group_service.get_by_id(test_suite["table_groups_id"])
+ project_code = table_group["project_code"]
+ project_service.set_current_project(project_code)
- export_container = fm.render_page_header(
+ testgen.page_header(
"Test Definitions",
"https://docs.datakitchen.io/article/dataops-testgen-help/testgen-test-types",
- lst_breadcrumbs=self.breadcrumbs,
- boo_show_refresh=True,
+ breadcrumbs=[
+ { "label": "Test Suites", "path": "test-suites", "params": { "project_code": project_code } },
+ { "label": test_suite["test_suite"] },
+ ],
)
- tool_bar = tb.ToolBar(5, 6, 4, None, multiline=True)
+ table_filter_column, column_filter_column, table_actions_column = st.columns([.3, .3, .4], vertical_alignment="bottom")
+ testgen.flex_row_end(table_actions_column)
+
+ actions_column, disposition_column = st.columns([.5, .5])
+ testgen.flex_row_start(actions_column)
+ testgen.flex_row_end(disposition_column)
+
+ with table_filter_column:
+ table_options = run_table_lookup_query(table_group["id"])
+ table_name = testgen.toolbar_select(
+ options=table_options,
+ value_column="table_name",
+ default_value=table_name,
+ bind_to_query="table_name",
+ required=True,
+ label="Table Name",
+ )
+ with column_filter_column:
+ column_options = get_column_names(table_group["id"], table_name)
+ column_name = testgen.toolbar_select(
+ options=column_options,
+ default_value=column_name,
+ bind_to_query="column_name",
+ label="Column Name",
+ disabled=not table_name,
+ )
- with tool_bar.long_slots[0]:
- str_connection_id, connection = prompt_for_connection(session.project, connection)
+ with disposition_column:
+ str_help = "Toggle on to perform actions on multiple test definitions"
+ do_multi_select = st.toggle("Multi-Select", help=str_help)
- # Prompt for Table Group
- with tool_bar.long_slots[1]:
- str_table_groups_id, str_connection_id, str_schema, table_group = prompt_for_table_group(
- session.project, table_group, str_connection_id
- )
+ if actions_column.button(
+ ":material/add: Add", help="Add a new Test Definition"
+ ):
+ add_test_dialog(project_code, table_group, test_suite, table_name, column_name)
- # Prompt for Test Suite
- if str_table_groups_id:
- with tool_bar.long_slots[2]:
- str_test_suite, test_suite = prompt_for_test_suite(str_table_groups_id, test_suite)
- with tool_bar.long_slots[3]:
- str_table_name = prompt_for_table_name(str_table_groups_id, str_table_name)
- if str_table_name:
- with tool_bar.long_slots[4]:
- str_column_name = prompt_for_column_name(str_table_groups_id, str_table_name)
-
- if str_test_suite and str_table_name:
- with tool_bar.short_slots[5]:
- str_help = "Toggle on to perform actions on multiple test definitions"
- do_multi_select = st.toggle("Multi-Select", help=str_help)
-
- if tool_bar.short_slots[0].button(
- "➕ Add", help="Add a new Test Definition", use_container_width=True # NOQA RUF001
- ):
- add_test_dialog(project_code, table_group, test_suite, str_table_name, str_column_name)
-
- selected = show_test_defs_grid(
- session.project, str_test_suite, str_table_name, str_column_name, do_multi_select, export_container,
- str_table_groups_id
- )
-
- # Display buttons
- if tool_bar.button_slots[0].button("✓", help="Activate for future runs", disabled=not selected):
- fm.reset_post_updates(
- update_test_definition(selected, "test_active", True, "Activated"),
- as_toast=True,
- clear_cache=True,
- lst_cached_functions=[],
- )
- if tool_bar.button_slots[1].button("✘", help="Inactivate Test for future runs", disabled=not selected):
- fm.reset_post_updates(
- update_test_definition(selected, "test_active", False, "Inactivated"),
- as_toast=True,
- clear_cache=True,
- lst_cached_functions=[],
- )
- if tool_bar.button_slots[2].button(
- "🔒", help="Protect from future test generation", disabled=not selected
- ):
- fm.reset_post_updates(
- update_test_definition(selected, "lock_refresh", True, "Locked"),
- as_toast=True,
- clear_cache=True,
- lst_cached_functions=[],
- )
- if tool_bar.button_slots[3].button(
- "🔐", help="Unlock for future test generation", disabled=not selected
- ):
- fm.reset_post_updates(
- update_test_definition(selected, "lock_refresh", False, "Unlocked"),
- as_toast=True,
- clear_cache=True,
- lst_cached_functions=[],
- )
-
- if selected:
- selected_test_def = selected[0]
-
- if tool_bar.short_slots[1].button(
- "🖊️ Edit", # RUF001
- help="Edit the Test Definition",
- use_container_width=True,
- disabled=not selected,
- ):
- edit_test_dialog(project_code, table_group, test_suite, str_table_name, str_column_name, selected_test_def)
-
- if tool_bar.short_slots[2].button(
- "❌ Delete",
- help="Delete the selected Test Definition",
- use_container_width=True,
- disabled=not selected,
- ):
- delete_test_dialog(selected_test_def)
+ selected = show_test_defs_grid(
+ session.project, test_suite["test_suite"], table_name, column_name, do_multi_select, table_actions_column,
+ table_group["id"]
+ )
+ fm.render_refresh_button(table_actions_column)
+
+ # Display buttons
+ if disposition_column.button("✓", help="Activate for future runs", disabled=not selected):
+ fm.reset_post_updates(
+ update_test_definition(selected, "test_active", True, "Activated"),
+ as_toast=True,
+ clear_cache=True,
+ lst_cached_functions=[],
+ )
+ if disposition_column.button("✘", help="Inactivate Test for future runs", disabled=not selected):
+ fm.reset_post_updates(
+ update_test_definition(selected, "test_active", False, "Inactivated"),
+ as_toast=True,
+ clear_cache=True,
+ lst_cached_functions=[],
+ )
+ if disposition_column.button(
+ "🔒", help="Protect from future test generation", disabled=not selected
+ ):
+ fm.reset_post_updates(
+ update_test_definition(selected, "lock_refresh", True, "Locked"),
+ as_toast=True,
+ clear_cache=True,
+ lst_cached_functions=[],
+ )
+ if disposition_column.button(
+ "🔐", help="Unlock for future test generation", disabled=not selected
+ ):
+ fm.reset_post_updates(
+ update_test_definition(selected, "lock_refresh", False, "Unlocked"),
+ as_toast=True,
+ clear_cache=True,
+ lst_cached_functions=[],
+ )
- else:
- st.markdown(":orange[Select a Test Suite and Table Name to view Test Definition details.]")
+ if selected:
+ selected_test_def = selected[0]
+ if actions_column.button(
+ ":material/edit: Edit",
+ help="Edit the Test Definition",
+ disabled=not selected,
+ ):
+ edit_test_dialog(project_code, table_group, test_suite, table_name, column_name, selected_test_def)
-class TestDefinitionsPageFromSuite(TestDefinitionsPage):
- path = "connections:test-definitions"
- breadcrumbs: typing.ClassVar = [
- {"label": "Overview", "path": "overview"},
- {"label": "Connections", "path": "connections"},
- {"label": "Table Groups", "path": "connections:table-groups"},
- {"label": "Test Suites", "path": "connections:test-suites"},
- {"label": "Test Definitions", "path": None},
- ]
- menu_item = None
+ if actions_column.button(
+ ":material/delete: Delete",
+ help="Delete the selected Test Definition",
+ disabled=not selected,
+ ):
+ delete_test_dialog(selected_test_def)
@st.dialog("Delete Test")
@@ -200,7 +182,6 @@ def show_test_form_by_id(test_definition_id):
selected_test_raw = test_definition_service.get_test_definitions(test_definition_ids=[test_definition_id])
test_definition = selected_test_raw.iloc[0].to_dict()
- mode = "edit"
project_code = test_definition["project_code"]
table_group_id = test_definition["table_groups_id"]
test_suite_name = test_definition["test_suite"]
@@ -855,12 +836,6 @@ def generate_test_defs_help(str_test_type):
return str_help
-@st.cache_data(show_spinner=False)
-def run_project_lookup_query():
- str_schema = st.session_state["dbschema"]
- return dq.run_project_lookup_query(str_schema)
-
-
@st.cache_data(show_spinner=False)
def run_test_type_lookup_query(str_test_type=None, boo_show_referential=True, boo_show_table=True,
boo_show_column=True, boo_show_custom=True):
@@ -869,12 +844,6 @@ def run_test_type_lookup_query(str_test_type=None, boo_show_referential=True, bo
boo_show_column, boo_show_custom)
-@st.cache_data(show_spinner=False)
-def run_connections_lookup_query(str_project_code):
- str_schema = st.session_state["dbschema"]
- return dq.run_connections_lookup_query(str_schema, str_project_code)
-
-
@st.cache_data(show_spinner=False)
def run_table_groups_lookup_query(str_project_code, str_connection_id=None, table_group_id=None):
str_schema = st.session_state["dbschema"]
@@ -899,99 +868,6 @@ def run_test_suite_lookup_query(str_table_groups_id, test_suite_name=None):
return dq.run_test_suite_lookup_by_tgroup_query(str_schema, str_table_groups_id, test_suite_name)
-def prompt_for_connection(str_project_code, selected_connection):
- str_id = None
-
- df = run_connections_lookup_query(str_project_code)
- lst_choices = df["connection_name"].tolist()
-
- if selected_connection:
- connection_name = selected_connection["connection_name"]
- selected_connection_index = lst_choices.index(connection_name)
- else:
- selected_connection_index = 0
-
- str_name = st.selectbox("Connection", lst_choices, index=selected_connection_index)
- if str_name:
- str_id = df.loc[df["connection_name"] == str_name, "id"].iloc[0]
- connection = df.loc[df["connection_name"] == str_name].iloc[0]
- return str_id, connection
-
-
-def prompt_for_table_group(str_project_code, selected_table_group, str_connection_id):
- str_id = None
- str_schema = None
- table_group = None
-
- df = run_table_groups_lookup_query(str_project_code, str_connection_id)
- lst_choices = df["table_groups_name"].tolist()
-
- table_group_name = None
- if selected_table_group:
- table_group_name = selected_table_group["table_groups_name"]
-
- if table_group_name and table_group_name in lst_choices:
- selected_table_group_index = lst_choices.index(table_group_name)
- else:
- selected_table_group_index = 0
-
- str_name = st.selectbox("Table Group", lst_choices, index=selected_table_group_index)
- if str_name:
- str_id = df.loc[df["table_groups_name"] == str_name, "id"].iloc[0]
- str_connection_id = df.loc[df["table_groups_name"] == str_name, "connection_id"].iloc[0]
- str_schema = df.loc[df["table_groups_name"] == str_name, "table_group_schema"].iloc[0]
- table_group = df.loc[df["table_groups_name"] == str_name].iloc[0]
- return str_id, str_connection_id, str_schema, table_group
-
-
-def prompt_for_test_suite(str_table_groups_id, selected_test_suite):
- df = run_test_suite_lookup_query(str_table_groups_id)
- lst_choices = df["test_suite"].tolist()
-
- test_suite = None
- test_suite_name = None
- if selected_test_suite:
- test_suite_name = selected_test_suite["test_suite"]
-
- if test_suite_name and test_suite_name in lst_choices:
- test_suite_index = lst_choices.index(test_suite_name)
- else:
- test_suite_index = 0
-
- str_name = st.selectbox("Test Suite", lst_choices, index=test_suite_index)
- if str_name:
- test_suite = df.loc[df["test_suite"] == str_name].iloc[0]
-
- return str_name, test_suite
-
-
-def prompt_for_table_name(str_table_groups_id, selected_table_name):
- df = run_table_lookup_query(str_table_groups_id)
- lst_choices = df["table_name"].tolist()
-
- if selected_table_name and selected_table_name in lst_choices:
- table_name_index = lst_choices.index(selected_table_name) + 1
- else:
- table_name_index = 0
-
- def table_name_callback():
- st.session_state["table_name"] = st.session_state.new_table_name
-
- str_name = selectbox(
- "Table Name", lst_choices, index=table_name_index, key="new_table_name", on_change=table_name_callback
- )
-
- return str_name
-
-
-def prompt_for_column_name(str_table_groups_id, str_table_name):
- lst_choices = get_column_names(str_table_groups_id, str_table_name)
- # Using extras selectbox to allow no entry
- str_name = selectbox("Column Name", lst_choices, key="column-name-main-drop-down")
-
- return str_name
-
-
def get_column_names(str_table_groups_id, str_table_name):
df = run_column_lookup_query(str_table_groups_id, str_table_name)
lst_choices = df["column_name"].tolist()
diff --git a/testgen/ui/views/test_results.py b/testgen/ui/views/test_results.py
index 8f2e815..833f82f 100644
--- a/testgen/ui/views/test_results.py
+++ b/testgen/ui/views/test_results.py
@@ -9,10 +9,10 @@
import testgen.ui.services.database_service as db
import testgen.ui.services.form_service as fm
import testgen.ui.services.query_service as dq
-import testgen.ui.services.toolbar_service as tb
from testgen.common import ConcatColumnList, date_service
from testgen.ui.components import widgets as testgen
from testgen.ui.navigation.page import Page
+from testgen.ui.services import project_service
from testgen.ui.services.string_service import empty_if_null
from testgen.ui.session import session
from testgen.ui.views.profiling_modal import view_profiling_button
@@ -25,143 +25,121 @@ class TestResultsPage(Page):
path = "test-runs:results"
can_activate: typing.ClassVar = [
lambda: session.authentication_status,
- lambda: session.project != None or "overview",
+ lambda: "run_id" in session.current_page_args or "test-runs",
]
- def render(self) -> None:
- export_container = fm.render_page_header(
+ def render(self, run_id: str, status: str | None = None, **_kwargs) -> None:
+ run_date, test_suite_name, project_code = get_drill_test_run(run_id)
+ run_date = date_service.get_timezoned_timestamp(st.session_state, run_date)
+ project_service.set_current_project(project_code)
+
+ testgen.page_header(
"Test Results",
"https://docs.datakitchen.io/article/dataops-testgen-help/test-results",
- lst_breadcrumbs=[
- {"label": "Overview", "path": "overview"},
- {"label": "Test Runs", "path": "test-runs"},
- {"label": "Test Results", "path": None},
+ breadcrumbs=[
+ { "label": "Test Runs", "path": "test-runs", "params": { "project_code": project_code } },
+ { "label": f"{test_suite_name} | {run_date}" },
],
)
- str_project = st.session_state["project"] if "project" in st.session_state else None
-
- # Look for drill-down from another page
- if "drill_test_run" in st.session_state:
- str_sel_test_run = st.session_state["drill_test_run"]
- else:
- str_sel_test_run = None
-
- if not str_project:
- st.write("Choose a Project from the menu.")
- else:
- # Setup Toolbar
- tool_bar = tb.ToolBar(3, 1, 4, None)
-
- # Lookup Test Run
- if str_sel_test_run:
- df = get_drill_test_run(str_sel_test_run)
- if not df.empty:
- with tool_bar.long_slots[0]:
- time_columns = ["test_date"]
- date_service.accommodate_dataframe_to_timezone(df, st.session_state, time_columns)
- df["description"] = df["test_date"] + " | " + df["test_suite_description"]
- str_sel_test_run = fm.render_select(
- "Test Run", df, "description", "test_run_id", boo_required=True, boo_disabled=True
- )
-
- if str_sel_test_run:
- with tool_bar.long_slots[1]:
- lst_status_options = [
- "Failures and Warnings",
- "Failed Tests",
- "Tests with Warnings",
- "Passed Tests",
- ]
- str_sel_status = st.selectbox("Result Priority", lst_status_options)
-
- with tool_bar.short_slots[0]:
- str_help = "Toggle on to perform actions on multiple results"
- do_multi_select = st.toggle("Multi-Select", help=str_help)
-
- match str_sel_status:
- case "Failures and Warnings":
- str_sel_status = "'Failed','Warning'"
- case "Failed Tests":
- str_sel_status = "'Failed'"
- case "Tests with Warnings":
- str_sel_status = "'Warning'"
- case "Passed Tests":
- str_sel_status = "'Passed'"
-
- # Display main grid and retrieve selection
- selected = show_result_detail(str_sel_test_run, str_sel_status, do_multi_select, export_container)
-
- # Need to render toolbar buttons after grid, so selection status is maintained
- disable_dispo = True if not selected or str_sel_status == "'Passed'" else False
- if tool_bar.button_slots[0].button(
- "✓", help="Confirm this issue as relevant for this run", disabled=disable_dispo
- ):
- fm.reset_post_updates(
- do_disposition_update(selected, "Confirmed"),
- as_toast=True,
- clear_cache=True,
- lst_cached_functions=[get_test_disposition],
- )
- if tool_bar.button_slots[1].button(
- "✘", help="Dismiss this issue as not relevant for this run", disabled=disable_dispo
- ):
- fm.reset_post_updates(
- do_disposition_update(selected, "Dismissed"),
- as_toast=True,
- clear_cache=True,
- lst_cached_functions=[get_test_disposition],
- )
- if tool_bar.button_slots[2].button(
- "🔇", help="Mute this test to deactivate it for future runs", disabled=not selected
- ):
- fm.reset_post_updates(
- do_disposition_update(selected, "Inactive"),
- as_toast=True,
- clear_cache=True,
- lst_cached_functions=[get_test_disposition],
- )
- if tool_bar.button_slots[3].button("⟲", help="Clear action", disabled=not selected):
- fm.reset_post_updates(
- do_disposition_update(selected, "No Decision"),
- as_toast=True,
- clear_cache=True,
- lst_cached_functions=[get_test_disposition],
- )
-
- # Help Links
- st.markdown(
- "[Help on Test Types](https://docs.datakitchen.io/article/dataops-testgen-help/testgen-test-types)"
+ # Display summary bar
+ tests_summary = get_test_result_summary(run_id)
+ testgen.summary_bar(items=tests_summary, key="test_results", height=40, width=800)
+
+ # Setup Toolbar
+ status_filter_column, actions_column, export_button_column = st.columns([.3, .5, .2], vertical_alignment="bottom")
+ testgen.flex_row_end(actions_column)
+ testgen.flex_row_end(export_button_column)
+
+ with status_filter_column:
+ status_options = [
+ "Failures and Warnings",
+ "Failed Tests",
+ "Tests with Warnings",
+ "Passed Tests",
+ ]
+ status = testgen.toolbar_select(
+ options=status_options,
+ default_value=status,
+ required=True,
+ bind_to_query="status",
+ label="Result Status",
)
- # with st.sidebar:
- # st.divider()
-
-
-@st.cache_data(show_spinner=ALWAYS_SPIN)
-def run_test_suite_lookup_by_project_query(str_project_code):
- str_schema = st.session_state["dbschema"]
- return dq.run_test_suite_lookup_by_project_query(str_schema, str_project_code)
-
+ with actions_column:
+ str_help = "Toggle on to perform actions on multiple results"
+ do_multi_select = st.toggle("Multi-Select", help=str_help)
+
+ match status:
+ case "Failures and Warnings":
+ status = "'Failed','Warning'"
+ case "Failed Tests":
+ status = "'Failed'"
+ case "Tests with Warnings":
+ status = "'Warning'"
+ case "Passed Tests":
+ status = "'Passed'"
+
+ # Display main grid and retrieve selection
+ selected = show_result_detail(run_id, status, do_multi_select, export_button_column)
+
+ # Need to render toolbar buttons after grid, so selection status is maintained
+ disable_dispo = True if not selected or status == "'Passed'" else False
+ if actions_column.button(
+ "✓", help="Confirm this issue as relevant for this run", disabled=disable_dispo
+ ):
+ fm.reset_post_updates(
+ do_disposition_update(selected, "Confirmed"),
+ as_toast=True,
+ clear_cache=True,
+ lst_cached_functions=[get_test_disposition],
+ )
+ if actions_column.button(
+ "✘", help="Dismiss this issue as not relevant for this run", disabled=disable_dispo
+ ):
+ fm.reset_post_updates(
+ do_disposition_update(selected, "Dismissed"),
+ as_toast=True,
+ clear_cache=True,
+ lst_cached_functions=[get_test_disposition],
+ )
+ if actions_column.button(
+ "🔇", help="Mute this test to deactivate it for future runs", disabled=not selected
+ ):
+ fm.reset_post_updates(
+ do_disposition_update(selected, "Inactive"),
+ as_toast=True,
+ clear_cache=True,
+ lst_cached_functions=[get_test_disposition],
+ )
+ if actions_column.button("⟲", help="Clear action", disabled=not selected):
+ fm.reset_post_updates(
+ do_disposition_update(selected, "No Decision"),
+ as_toast=True,
+ clear_cache=True,
+ lst_cached_functions=[get_test_disposition],
+ )
-@st.cache_data(show_spinner=ALWAYS_SPIN)
-def run_test_run_lookup_by_date(str_project_code, str_run_date):
- str_schema = st.session_state["dbschema"]
- return dq.run_test_run_lookup_by_date(str_schema, str_project_code, str_run_date)
+ # Help Links
+ st.markdown(
+ "[Help on Test Types](https://docs.datakitchen.io/article/dataops-testgen-help/testgen-test-types)"
+ )
@st.cache_data(show_spinner=ALWAYS_SPIN)
def get_drill_test_run(str_test_run_id):
str_schema = st.session_state["dbschema"]
str_sql = f"""
- SELECT tr.id::VARCHAR as test_run_id,
- tr.test_starttime as test_date,
- ts.test_suite as test_suite_description
+ SELECT tr.test_starttime as test_date,
+ ts.test_suite,
+ ts.project_code
FROM {str_schema}.test_runs tr
INNER JOIN {str_schema}.test_suites ts ON tr.test_suite_id = ts.id
WHERE tr.id = '{str_test_run_id}'::UUID;
"""
- return db.retrieve_data(str_sql)
+ df = db.retrieve_data(str_sql)
+ if not df.empty:
+ return df.at[0, "test_date"], df.at[0, "test_suite"], df.at[0, "project_code"]
@st.cache_data(show_spinner="Retrieving Results")
@@ -574,10 +552,6 @@ def show_test_def_detail(str_test_def_id):
def show_result_detail(str_run_id, str_sel_test_status, do_multi_select, export_container):
- # Display summary bar
- tests_summary = get_test_result_summary(str_run_id)
- testgen.summary_bar(items=tests_summary, key="test_results", height=40, width=800)
-
# Retrieve test results (always cached, action as null)
df = get_test_results(str_run_id, str_sel_test_status)
# Retrieve disposition action (cache refreshed)
diff --git a/testgen/ui/views/test_runs.py b/testgen/ui/views/test_runs.py
index bdaa407..3f957d2 100644
--- a/testgen/ui/views/test_runs.py
+++ b/testgen/ui/views/test_runs.py
@@ -7,8 +7,8 @@
import testgen.ui.services.form_service as fm
import testgen.ui.services.query_service as dq
import testgen.ui.services.test_run_service as test_run_service
-import testgen.ui.services.toolbar_service as tb
from testgen.common import date_service
+from testgen.ui.components import widgets as testgen
from testgen.ui.navigation.menu import MenuItem
from testgen.ui.navigation.page import Page
from testgen.ui.session import session
@@ -22,63 +22,66 @@ class TestRunsPage(Page):
]
menu_item = MenuItem(icon="labs", label="Data Quality Testing", order=2)
- def render(self) -> None:
- fm.render_page_header(
+ def render(self, project_code: str | None = None, table_group_id: str | None = None, test_suite_id: str | None = None, **_kwargs) -> None:
+ project_code = project_code or st.session_state["project"]
+
+ testgen.page_header(
"Test Runs",
"https://docs.datakitchen.io/article/dataops-testgen-help/test-results",
- lst_breadcrumbs=[
- {"label": "Overview", "path": "overview"},
- {"label": "Test Runs", "path": None},
- ],
- boo_show_refresh=True,
)
- if "project" not in st.session_state:
- st.write("You must select a Project in the Home Page.")
+ # Setup Toolbar
+ group_filter_column, suite_filter_column, actions_column = st.columns([.3, .3, .4], vertical_alignment="bottom")
+ testgen.flex_row_end(actions_column)
+
+ with group_filter_column:
+ # Table Groups selection -- optional criterion
+ df_tg = get_db_table_group_choices(project_code)
+ table_groups_id = testgen.toolbar_select(
+ options=df_tg,
+ value_column="id",
+ display_column="table_groups_name",
+ default_value=table_group_id,
+ bind_to_query="table_group_id",
+ label="Table Group",
+ )
+
+ with suite_filter_column:
+ # Table Groups selection -- optional criterion
+ df_ts = get_db_test_suite_choices(project_code, table_groups_id)
+ test_suite_id = testgen.toolbar_select(
+ options=df_ts,
+ value_column="id",
+ display_column="test_suite",
+ default_value=test_suite_id,
+ bind_to_query="test_suite_id",
+ label="Test Suite",
+ )
+
+ df, show_columns = get_db_test_runs(project_code, table_groups_id, test_suite_id)
+
+ time_columns = ["run_date"]
+ date_service.accommodate_dataframe_to_timezone(df, st.session_state, time_columns)
+
+ dct_selected_rows = fm.render_grid_select(df, show_columns)
+ dct_selected_row = dct_selected_rows[0] if dct_selected_rows else None
+
+ if actions_column.button(
+ f":{'gray' if not dct_selected_row else 'green'}[Test Results →]",
+ help="Review test results for the selected run",
+ disabled=not dct_selected_row,
+ ):
+ self.router.navigate("test-runs:results", { "run_id": dct_selected_row["test_run_id"] })
+
+ fm.render_refresh_button(actions_column)
+
+ if dct_selected_rows:
+ open_record_detail(
+ dct_selected_rows[0],
+ )
+ st.markdown(":orange[Click button to access test results for selected run.]")
else:
- str_project = st.session_state["project"]
-
- # Setup Toolbar
- tool_bar = tb.ToolBar(4, 1, 0, None)
-
- with tool_bar.long_slots[0]:
- # Table Groups selection -- optional criterion
- df_tg = get_db_table_group_choices(str_project)
- str_table_groups_id = fm.render_select(
- "Table Group", df_tg, "table_groups_name", "id", boo_required=False, str_default=None
- )
-
- with tool_bar.long_slots[1]:
- # Table Groups selection -- optional criterion
- df_ts = get_db_test_suite_choices(str_project, str_table_groups_id)
- str_test_suite_id = fm.render_select(
- "Test Suite", df_ts, "test_suite_description", "id", boo_required=False, str_default=None
- )
-
- df, show_columns = get_db_test_runs(str_project, str_table_groups_id, str_test_suite_id)
-
- time_columns = ["run_date"]
- date_service.accommodate_dataframe_to_timezone(df, st.session_state, time_columns)
-
- dct_selected_rows = fm.render_grid_select(df, show_columns)
- dct_selected_row = dct_selected_rows[0] if dct_selected_rows else None
-
- if tool_bar.short_slots[0].button(
- f":{'gray' if not dct_selected_row else 'green'}[Test Results →]",
- help="Review test results for the selected run",
- use_container_width=True,
- disabled=not dct_selected_row,
- ):
- st.session_state["drill_test_run"] = dct_selected_row["test_run_id"]
- self.router.navigate("test-runs:results")
-
- if dct_selected_rows:
- open_record_detail(
- dct_selected_rows[0],
- )
- st.markdown(":orange[Click button to access test results for selected run.]")
- else:
- st.markdown(":orange[Select a run to access test results.]")
+ st.markdown(":orange[Select a run to access test results.]")
@st.cache_data(show_spinner=False)
diff --git a/testgen/ui/views/test_suites.py b/testgen/ui/views/test_suites.py
index 8663b7c..7915909 100644
--- a/testgen/ui/views/test_suites.py
+++ b/testgen/ui/views/test_suites.py
@@ -7,69 +7,60 @@
import testgen.ui.services.authentication_service as authentication_service
import testgen.ui.services.form_service as fm
+import testgen.ui.services.query_service as dq
import testgen.ui.services.test_suite_service as test_suite_service
from testgen.commands.run_execute_tests import run_execution_steps_in_background
from testgen.commands.run_generate_tests import run_test_gen_queries
from testgen.commands.run_observability_exporter import export_test_results
from testgen.ui.components import widgets as testgen
+from testgen.ui.navigation.menu import MenuItem
from testgen.ui.navigation.page import Page
-from testgen.ui.services import connection_service, table_group_service
from testgen.ui.services.string_service import empty_if_null
from testgen.ui.session import session
class TestSuitesPage(Page):
- path = "connections:test-suites"
+ path = "test-suites"
can_activate: typing.ClassVar = [
lambda: authentication_service.current_user_has_admin_role() or "overview",
lambda: session.authentication_status,
]
+ menu_item = MenuItem(icon="list_alt", label="Test Suites", order=4)
- def render(self, connection_id: str | None = None, table_group_id: str | None = None) -> None:
- fm.render_page_header(
+ def render(self, project_code: str | None = None, table_group_id: str | None = None, **_kwargs) -> None:
+ project_code = st.session_state["project"]
+
+ testgen.page_header(
"Test Suites",
"https://docs.datakitchen.io/article/dataops-testgen-help/create-a-test-suite",
- lst_breadcrumbs=[
- {"label": "Overview", "path": "overview"},
- {"label": "Connections", "path": "connections"},
- {"label": "Table Groups", "path": "connections:table-groups"},
- {"label": "Test Suites", "path": None},
- ],
)
- # Get page parameters from session
- project_code = st.session_state["project"]
- connection = connection_service.get_by_id(connection_id) if connection_id else st.session_state["connection"]
-
- table_group = st.session_state.get("table_group")
- if table_group_id:
- table_group = table_group_service.get_by_id(table_group_id)
- table_group = table_group.iloc[0]
-
- connection_id = connection["connection_id"]
- table_group_id = table_group["id"]
-
- tool_bar = st.columns([.2, .2, .4, .2], vertical_alignment="bottom")
-
- with tool_bar[0]:
- st.selectbox("Connection", [connection["connection_name"]], disabled=True)
+ group_filter_column, actions_column = st.columns([.2, .8], vertical_alignment="bottom")
+ testgen.flex_row_end(actions_column)
+
+ with group_filter_column:
+ df_tg = get_db_table_group_choices(project_code)
+ table_group_id = testgen.toolbar_select(
+ options=df_tg,
+ value_column="id",
+ display_column="table_groups_name",
+ default_value=table_group_id,
+ label="Table Group",
+ bind_to_query="table_group_id",
+ )
- with tool_bar[1]:
- st.selectbox("Table Group", [table_group["table_groups_name"]], disabled=True)
+ df = test_suite_service.get_by_project(project_code, table_group_id)
- with tool_bar[3]:
+ with actions_column:
st.button(
":material/add: Add Test Suite",
key="test_suite:keys:add",
help="Add a new test suite",
- use_container_width=True,
- on_click=lambda: add_test_suite_dialog(project_code, connection, table_group),
+ on_click=lambda: add_test_suite_dialog(project_code, df_tg),
)
- df = test_suite_service.get_by_table_group(project_code, table_group_id)
-
for _, test_suite in df.iterrows():
- subtitle = f"{connection['connection_name']} > {table_group['table_groups_name']}"
+ subtitle = f"{test_suite['connection_name']} > {test_suite['table_groups_name']}"
with testgen.card(title=test_suite["test_suite"], subtitle=subtitle) as test_suite_card:
with test_suite_card.actions:
testgen.button(
@@ -85,7 +76,7 @@ def render(self, connection_id: str | None = None, table_group_id: str | None =
icon="edit",
tooltip="Edit test suite",
tooltip_position="right",
- on_click=partial(edit_test_suite_dialog, project_code, connection, table_group, test_suite),
+ on_click=partial(edit_test_suite_dialog, project_code, df_tg, test_suite),
key=f"test_suite:keys:edit:{test_suite['id']}",
)
testgen.button(
@@ -102,7 +93,8 @@ def render(self, connection_id: str | None = None, table_group_id: str | None =
with main_section:
testgen.link(
label=f"{test_suite['test_ct']} tests definitions",
- href="test-definitions",
+ href="test-suites:definitions",
+ params={ "test_suite_id": test_suite["id"] },
right_icon="chevron_right",
key=f"test_suite:keys:go-to-definitions:{test_suite['id']}",
)
@@ -116,11 +108,12 @@ def render(self, connection_id: str | None = None, table_group_id: str | None =
if (latest_run_start := test_suite["latest_run_start"]) and not pd.isnull(latest_run_start):
with latest_run_section:
- st.html('')
+ testgen.no_flex_gap()
st.html('
Latest Run
')
testgen.link(
label=latest_run_start.strftime("%B %d, %H:%M %p"),
- href="test-runs",
+ href="test-runs:results",
+ params={ "run_id": str(test_suite["latest_run_id"]) },
right_icon="chevron_right",
style="margin-bottom: 8px;",
height=29,
@@ -153,19 +146,23 @@ def render(self, connection_id: str | None = None, table_group_id: str | None =
)
+@st.cache_data(show_spinner=False)
+def get_db_table_group_choices(project_code):
+ schema = st.session_state["dbschema"]
+ return dq.run_table_groups_lookup_query(schema, project_code)
+
+
@st.dialog(title="Add Test Suite")
-def add_test_suite_dialog(project_code, connection, table_group):
- show_test_suite("add", project_code, connection, table_group)
+def add_test_suite_dialog(project_code, table_groups_df):
+ show_test_suite("add", project_code, table_groups_df)
@st.dialog(title="Edit Test Suite")
-def edit_test_suite_dialog(project_code, connection, table_group, selected):
- show_test_suite("edit", project_code, connection, table_group, selected)
+def edit_test_suite_dialog(project_code, table_groups_df, selected):
+ show_test_suite("edit", project_code, table_groups_df, selected)
-def show_test_suite(mode, project_code, connection, table_group, selected=None):
- connection_id = connection["connection_id"]
- table_group_id = table_group["id"]
+def show_test_suite(mode, project_code, table_groups_df, selected=None):
severity_options = ["Inherit", "Failed", "Warning"]
selected_test_suite = selected if mode == "edit" else None
@@ -175,8 +172,8 @@ def show_test_suite(mode, project_code, connection, table_group, selected=None):
# establish default values
test_suite_id = selected_test_suite["id"] if mode == "edit" else None
test_suite = empty_if_null(selected_test_suite["test_suite"]) if mode == "edit" else ""
- connection_id = selected_test_suite["connection_id"] if mode == "edit" else connection_id
- table_groups_id = selected_test_suite["table_groups_id"] if mode == "edit" else table_group_id
+ connection_id = selected_test_suite["connection_id"] if mode == "edit" else None
+ table_groups_id = selected_test_suite["table_groups_id"] if mode == "edit" else None
test_suite_description = empty_if_null(selected_test_suite["test_suite_description"]) if mode == "edit" else ""
test_action = empty_if_null(selected_test_suite["test_action"]) if mode == "edit" else ""
severity_index = severity_options.index(selected_test_suite["severity"]) if mode == "edit" else 0
@@ -200,6 +197,12 @@ def show_test_suite(mode, project_code, connection, table_group, selected=None):
),
"connection_id": connection_id,
"table_groups_id": table_groups_id,
+ "table_groups_name": right_column.selectbox(
+ label="Table Group",
+ options=table_groups_df["table_groups_name"],
+ index=int(table_groups_df[table_groups_df["id"] == table_groups_id].index[0]) if table_groups_id else 0,
+ disabled=(mode != "add"),
+ ),
"test_suite_description": left_column.text_input(
label="Test Suite Description", max_chars=40, value=test_suite_description
),
@@ -253,6 +256,10 @@ def show_test_suite(mode, project_code, connection, table_group, selected=None):
if mode == "edit":
test_suite_service.edit(entity)
else:
+ selected_table_group_name = entity["table_groups_name"]
+ selected_table_group = table_groups_df[table_groups_df["table_groups_name"] == selected_table_group_name].iloc[0]
+ entity["connection_id"] = selected_table_group["connection_id"]
+ entity["table_groups_id"] = selected_table_group["id"]
test_suite_service.add(entity)
success_message = (
"Changes have been saved successfully. "
From 4951576092cb0c500e6ae9ab8387e36d5ab10139 Mon Sep 17 00:00:00 2001
From: Aarthy Adityan
Date: Thu, 5 Sep 2024 17:48:04 -0400
Subject: [PATCH 36/78] fix: fix bugs in query param handling and test suite
query
---
testgen/ui/components/widgets/page.py | 10 +++++--
testgen/ui/queries/test_suite_queries.py | 37 +++++++++++++-----------
testgen/ui/views/test_suites.py | 2 +-
3 files changed, 28 insertions(+), 21 deletions(-)
diff --git a/testgen/ui/components/widgets/page.py b/testgen/ui/components/widgets/page.py
index 2715aff..6f429b8 100644
--- a/testgen/ui/components/widgets/page.py
+++ b/testgen/ui/components/widgets/page.py
@@ -54,15 +54,19 @@ def toolbar_select(
else:
kwargs["options"] = options
if default_value in options:
- kwargs["index"] = options.index(default_value)
+ kwargs["index"] = options.index(default_value) + (0 if required else 1)
if bind_to_query:
kwargs["key"] = kwargs.get("key", f"toolbar_select_{bind_to_query}")
+ if default_value is not None and kwargs.get("index") is None:
+ Router().set_query_params({ bind_to_query: None }) # Unset the query params if the current value is not valid
def update_query_params():
query_value = st.session_state[kwargs["key"]]
- if isinstance(options, pd.DataFrame):
- query_value = options.loc[options[display_column] == query_value, value_column].iloc[0] if query_value != "---" else None
+ if not required and query_value == "---":
+ query_value = None
+ elif isinstance(options, pd.DataFrame):
+ query_value = options.loc[options[display_column] == query_value, value_column].iloc[0]
Router().set_query_params({ bind_to_query: query_value })
kwargs["on_change"] = update_query_params
diff --git a/testgen/ui/queries/test_suite_queries.py b/testgen/ui/queries/test_suite_queries.py
index 57ffa16..48cd8b2 100644
--- a/testgen/ui/queries/test_suite_queries.py
+++ b/testgen/ui/queries/test_suite_queries.py
@@ -7,6 +7,10 @@
@st.cache_data(show_spinner=False)
def get_by_project(schema, project_code, table_group_id=None):
sql = f"""
+ WITH last_run_date
+ AS (SELECT test_suite_id, MAX(test_starttime) as test_starttime
+ FROM testgen.test_runs
+ GROUP BY test_suite_id)
SELECT
suites.id::VARCHAR(50),
suites.project_code,
@@ -23,23 +27,23 @@ def get_by_project(schema, project_code, table_group_id=None):
suites.component_key,
suites.component_type,
suites.component_name,
- COUNT(definitions.id) as test_ct,
- last_run.id as latest_run_id,
- MAX(last_run.test_starttime) as latest_run_start,
- MAX(last_run.passed_ct) as last_run_passed_ct,
- MAX(last_run.warning_ct) as last_run_warning_ct,
- MAX(last_run.failed_ct) as last_run_failed_ct,
- MAX(last_run.error_ct) as last_run_error_ct
+ last_run.id as latest_run_id,
+ last_run.test_starttime as latest_run_start,
+ last_run.passed_ct + last_run.warning_ct + last_run.failed_ct + last_run.error_ct as last_run_test_ct,
+ last_run.passed_ct as last_run_passed_ct,
+ last_run.warning_ct as last_run_warning_ct,
+ last_run.failed_ct as last_run_failed_ct,
+ last_run.error_ct as last_run_error_ct
FROM {schema}.test_suites as suites
- LEFT OUTER JOIN (
- SELECT * FROM {schema}.test_runs ORDER BY test_starttime DESC LIMIT 1
- ) AS last_run ON (last_run.test_suite_id = suites.id)
- LEFT OUTER JOIN {schema}.test_definitions AS definitions
- ON (definitions.test_suite_id = suites.id)
- LEFT OUTER JOIN {schema}.connections AS connections
- ON (connections.connection_id = suites.connection_id)
- LEFT OUTER JOIN {schema}.table_groups as groups
- ON (groups.id = suites.table_groups_id)
+ LEFT JOIN last_run_date lrd
+ ON (suites.id = lrd.test_suite_id)
+ LEFT JOIN {schema}.test_runs last_run
+ ON (lrd.test_suite_id = last_run.test_suite_id
+ AND lrd.test_starttime = last_run.test_starttime)
+ LEFT JOIN {schema}.connections AS connections
+ ON (connections.connection_id = suites.connection_id)
+ LEFT JOIN {schema}.table_groups as groups
+ ON (groups.id = suites.table_groups_id)
WHERE suites.project_code = '{project_code}'
"""
@@ -49,7 +53,6 @@ def get_by_project(schema, project_code, table_group_id=None):
"""
sql += """
- GROUP BY suites.id, groups.table_groups_name, connections.connection_id, last_run.id
ORDER BY suites.test_suite;
"""
diff --git a/testgen/ui/views/test_suites.py b/testgen/ui/views/test_suites.py
index 7915909..caa497e 100644
--- a/testgen/ui/views/test_suites.py
+++ b/testgen/ui/views/test_suites.py
@@ -92,7 +92,7 @@ def render(self, project_code: str | None = None, table_group_id: str | None = N
with main_section:
testgen.link(
- label=f"{test_suite['test_ct']} tests definitions",
+ label=f"{test_suite['last_run_test_ct']} tests definitions",
href="test-suites:definitions",
params={ "test_suite_id": test_suite["id"] },
right_icon="chevron_right",
From 85436f79ce6f48fc36f1cb2d34f0d5bcdbc0129a Mon Sep 17 00:00:00 2001
From: Luis Trinidad
Date: Thu, 5 Sep 2024 15:53:59 -0400
Subject: [PATCH 37/78] fix: fetch latest version from docker or pypi
with proper configuration, both the UI and CLI can now fetch the latest
version from dockerhub or pypi, and, optionally, authenticate the
request sent to dockerhub.
---
Dockerfile | 1 +
testgen/__main__.py | 3 +-
testgen/common/docker_service.py | 48 ---------
testgen/common/version_service.py | 79 +++++++++++++++
testgen/settings.py | 42 +++++++-
testgen/ui/bootstrap.py | 28 ++----
testgen/ui/session.py | 6 +-
tests/unit/test_version_service.py | 150 +++++++++++++++++++++++++++++
8 files changed, 281 insertions(+), 76 deletions(-)
create mode 100644 testgen/common/version_service.py
create mode 100644 tests/unit/test_version_service.py
diff --git a/Dockerfile b/Dockerfile
index 9c6f0ef..e436ca4 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -30,6 +30,7 @@ ENV PATH="$PATH:/dk/bin:/opt/mssql-tools/bin/"
ARG TESTGEN_VERSION
ENV TESTGEN_VERSION=v$TESTGEN_VERSION
+ENV TG_RELEASE_CHECK=docker
ENV STREAMLIT_SERVER_MAX_UPLOAD_SIZE=200
diff --git a/testgen/__main__.py b/testgen/__main__.py
index 234428a..285e949 100644
--- a/testgen/__main__.py
+++ b/testgen/__main__.py
@@ -43,6 +43,7 @@
get_tg_host,
get_tg_schema,
logs,
+ version_service,
)
from testgen.utils import plugins
@@ -60,7 +61,7 @@ class Configuration:
@tui()
@click.group(
- help=f"This version: {settings.VERSION} \n\nLatest version: {docker_service.check_for_new_docker_release()} \n\nSchema revision: {get_schema_revision()}"
+ help=f"This version: {settings.VERSION} \n\nLatest version: {version_service.get_latest_version()} \n\nSchema revision: {get_schema_revision()}"
)
@click.option(
"-v",
diff --git a/testgen/common/docker_service.py b/testgen/common/docker_service.py
index 7b4fcfa..4d864d1 100644
--- a/testgen/common/docker_service.py
+++ b/testgen/common/docker_service.py
@@ -1,58 +1,10 @@
import logging
-import requests
-
-from testgen import settings
from testgen.common import get_tg_db, get_tg_host, get_tg_password, get_tg_schema, get_tg_username
LOG = logging.getLogger("testgen")
-
-def check_for_new_docker_release() -> str:
- if not settings.CHECK_FOR_LATEST_VERSION:
- return "unknown"
-
- try:
- tags = get_docker_tags()
-
- if len(tags) == 0:
- LOG.debug("docker_service: No tags to parse, skipping check.")
- return "unknown"
-
- ordered_tags = sorted(tags, key=lambda item: item[1], reverse=True)
- latest_tag = ordered_tags[0][0]
-
- if latest_tag != settings.VERSION:
- LOG.warning(
- f"A new TestGen upgrade is available. Please update to version {latest_tag} for new features and improvements."
- )
-
- return latest_tag # noqa: TRY300
- except Exception:
- LOG.warning("Unable to check for latest release", exc_info=True, stack_info=True)
-
-
-def get_docker_tags(url: str = "https://hub.docker.com/v2/repositories/datakitchen/dataops-testgen/tags/"):
- params = {"page_size": 25, "page": 1, "ordering": "last_updated"}
- response = requests.get(url, params=params, timeout=3)
-
- tags_to_return = []
- if not response.status_code == 200:
- LOG.warning(f"docker_service: Failed to fetch docker tags. Status code: {response.status_code}")
- return tags_to_return
-
- tags_data = response.json()
- results = tags_data.get("results", [])
- for result in results:
- tag_name = result["name"]
- last_pushed = result["tag_last_pushed"]
- if tag_name.count(".") >= 2 and "experimental" not in tag_name:
- tags_to_return.append((tag_name, last_pushed))
-
- return tags_to_return
-
-
def check_basic_configuration():
ret = True
message = ""
diff --git a/testgen/common/version_service.py b/testgen/common/version_service.py
new file mode 100644
index 0000000..c2317b1
--- /dev/null
+++ b/testgen/common/version_service.py
@@ -0,0 +1,79 @@
+import logging
+
+import requests
+
+from testgen import settings
+
+LOG = logging.getLogger("testgen")
+
+
+def get_latest_version() -> str:
+ try:
+ return {
+ "pypi": _get_last_pypi_release,
+ "docker": _get_last_docker_release,
+ "yes": _get_last_docker_release, # NOTE: kept for retrocompatibility
+ }.get(settings.CHECK_FOR_LATEST_VERSION, lambda: "unknown")()
+ except:
+ return "unknown"
+
+
+def _get_last_pypi_release() -> str:
+ response = requests.get("https://pypi.org/pypi/dataops-testgen/json", timeout=3)
+ if response.status_code != 200:
+ LOG.warning(f"version_service: Failed to fetch PyPi releases. Status code: {response.status_code}")
+ return "unknown"
+
+ package_data = response.json()
+ package_releases = list((package_data.get("releases") or {}).keys())
+
+ return _sorted_tags(package_releases)[0]
+
+
+def _get_last_docker_release() -> str:
+ headers = {}
+ if settings.DOCKER_HUB_USERNAME and settings.DOCKER_HUB_PASSWORD:
+ auth_response = requests.post(
+ "https://hub.docker.com/v2/users/login",
+ json={"username": settings.DOCKER_HUB_USERNAME, "password": settings.DOCKER_HUB_PASSWORD},
+ timeout=5,
+ )
+ if auth_response.status_code != 200:
+ LOG.warning(
+ "version_service: unable to login against https://hub.docker.com."
+ f" Status code: {auth_response.status_code}"
+ )
+ return "unknown"
+ headers["Authorization"] = f"Bearer {auth_response.json()['token']}"
+
+ response = requests.get(
+ f"https://hub.docker.com/v2/repositories/{settings.DOCKER_HUB_REPOSITORY}/tags",
+ headers=headers,
+ params={"page_size": 25, "page": 1, "ordering": "last_updated"},
+ timeout=3,
+ )
+
+ if response.status_code != 200:
+ LOG.warning(f"version_service: Failed to fetch docker tags. Status code: {response.status_code}")
+ return "unknown"
+
+ tags_to_return = []
+ tags_data = response.json()
+ results = tags_data.get("results", [])
+ for result in results:
+ tag_name = result["name"]
+ if tag_name.count(".") >= 2 and "experimental" not in tag_name:
+ tags_to_return.append(tag_name)
+
+ if len(tags_to_return) <= 0:
+ return "unkown"
+
+ return _sorted_tags(tags_to_return)[0]
+
+
+def _sorted_tags(tags: list[str]) -> list[str]:
+ sorted_tags_as_tuples = sorted(
+ [tuple([ int(i) for i in tag.replace("v", "").split(".") ]) for tag in tags],
+ reverse=True,
+ )
+ return [".".join([str(i) for i in tag_tuple]) for tag_tuple in sorted_tags_as_tuples]
diff --git a/testgen/settings.py b/testgen/settings.py
index 627ae61..595e402 100644
--- a/testgen/settings.py
+++ b/testgen/settings.py
@@ -1,4 +1,5 @@
import os
+import typing
IS_DEBUG_LOG_LEVEL: bool = os.getenv("TESTGEN_DEBUG_LOG_LEVEL", "no").lower() == "yes"
"""
@@ -415,13 +416,48 @@
defaults to: `default`
"""
-CHECK_FOR_LATEST_VERSION: bool = os.getenv("TG_DOCKER_RELEASE_CHECK_ENABLED", "yes").lower() == "yes"
+CHECK_FOR_LATEST_VERSION: typing.Literal["pypi", "docker", "no"] = typing.cast(
+ typing.Literal["pypi", "docker", "no"],
+ os.getenv("TG_RELEASE_CHECK", os.getenv("TG_DOCKER_RELEASE_CHECK_ENABLED", "pypi")).lower(),
+)
"""
-When True, enables calling Docker Hub API to fetch the latest released
+When set to, enables calling Docker Hub API to fetch the latest released
image tag. The fetched tag is displayed in the UI menu.
from env variable: `TG_DOCKER_RELEASE_CHECK_ENABLED`
-defaults to: `True`
+choices: `pypi`, `docker`, `no`
+defaults to: `pypi`
+"""
+
+DOCKER_HUB_REPOSITORY: str = os.getenv(
+ "TESTGEN_DOCKER_HUB_REPO",
+ "datakitchen/dataops-testgen",
+)
+"""
+URL to the docker hub repository containing the dataops testgen image.
+Used to check for new releases when `CHECK_FOR_LATEST_VERSION` is set to
+`docker`.
+
+from env variable: `TESTGEN_DOCKER_HUB_URL`
+defaults to: datakitchen/dataops-testgen
+"""
+
+DOCKER_HUB_USERNAME: str | None = os.getenv("TESTGEN_DOCKER_HUB_USERNAME", None)
+"""
+Username to authenticate against Docker Hub API before fetching the list
+of tags. Required if `DOCKER_HUB_REPOSITORY` is a private repository.
+
+from env variable: `TESTGEN_DOCKER_HUB_USERNAME`
+defaults to: None
+"""
+
+DOCKER_HUB_PASSWORD: str | None = os.getenv("TESTGEN_DOCKER_HUB_PASSWORD", None)
+"""
+Password to authenticate against Docker Hub API before fetching the list
+of tags. Required if `DOCKER_HUB_REPOSITORY` is a private repository.
+
+from env variable: `TESTGEN_DOCKER_HUB_PASSWORD`
+defaults to: None
"""
VERSION: str = os.getenv("TESTGEN_VERSION", "unknown")
diff --git a/testgen/ui/bootstrap.py b/testgen/ui/bootstrap.py
index 03a95f5..05b943f 100644
--- a/testgen/ui/bootstrap.py
+++ b/testgen/ui/bootstrap.py
@@ -3,11 +3,9 @@
import inspect
import logging
-import streamlit
-
from testgen import settings
from testgen.commands.run_upgrade_db_config import get_schema_revision
-from testgen.common import configure_logging, docker_service
+from testgen.common import configure_logging, version_service
from testgen.ui.navigation.menu import Menu, Version
from testgen.ui.navigation.page import Page
from testgen.ui.navigation.router import Router
@@ -51,10 +49,14 @@ def __init__(self, router: Router, menu: Menu, logger: logging.Logger) -> None:
self.logger = logger
def get_version(self) -> Version:
+ latest_version = self.menu.version.latest
+ if not session.latest_version:
+ latest_version = version_service.get_latest_version()
+
return Version(
current=settings.VERSION,
- latest=check_for_upgrade(),
- schema=_get_schema_rev(),
+ latest=latest_version,
+ schema=get_schema_revision(),
)
@@ -86,22 +88,8 @@ def run(log_level: int = logging.INFO) -> Application:
version=Version(
current=settings.VERSION,
latest="...",
- schema=_get_schema_rev(),
+ schema=get_schema_revision(),
),
),
logger=LOG,
)
-
-
-@streamlit.cache_resource(show_spinner=False)
-def _get_schema_rev() -> str:
- revision = session.sb_schema_rev
- if not revision:
- revision = session.sb_schema_rev = get_schema_revision()
- return revision
-
-
-@streamlit.cache_resource(show_spinner=False)
-def check_for_upgrade():
- return docker_service.check_for_new_docker_release()
-
diff --git a/testgen/ui/session.py b/testgen/ui/session.py
index 2aaeba9..b10e251 100644
--- a/testgen/ui/session.py
+++ b/testgen/ui/session.py
@@ -27,9 +27,7 @@ class TestgenSession(Singleton):
project: str
add_project: bool
-
- sb_latest_rel: str
- sb_schema_rev: str
+ latest_version: str | None
def __init__(self, state: SessionStateProxy) -> None:
super().__setattr__("_state", state)
@@ -49,4 +47,4 @@ def __delattr__(self, key: str) -> None:
del state[key]
-session = TestgenSession(st.session_state)
+session: TestgenSession = TestgenSession(st.session_state)
diff --git a/tests/unit/test_version_service.py b/tests/unit/test_version_service.py
new file mode 100644
index 0000000..b97890c
--- /dev/null
+++ b/tests/unit/test_version_service.py
@@ -0,0 +1,150 @@
+from unittest import mock
+
+import pytest
+
+from testgen.common.version_service import get_latest_version
+
+
+@pytest.mark.unit
+@mock.patch("testgen.common.version_service.settings")
+@mock.patch("testgen.common.version_service.requests")
+def test_calls_pypi_api(requests: mock.Mock, settings: mock.Mock):
+ settings.CHECK_FOR_LATEST_VERSION = "pypi"
+ get_latest_version()
+ requests.get.assert_called_with("https://pypi.org/pypi/dataops-testgen/json", timeout=3)
+
+
+@pytest.mark.unit
+@mock.patch("testgen.common.version_service.settings")
+@mock.patch("testgen.common.version_service.requests")
+def test_return_unknown_when_pypi_request_fails(requests: mock.Mock, settings: mock.Mock):
+ response = mock.Mock()
+ response.status_code = 400
+ requests.get.return_value = response
+ settings.CHECK_FOR_LATEST_VERSION = "pypi"
+
+ assert get_latest_version() == "unknown"
+
+
+@pytest.mark.unit
+@mock.patch("testgen.common.version_service.settings")
+@mock.patch("testgen.common.version_service.requests")
+def test_get_the_latest_version_from_pypi(requests: mock.Mock, settings: mock.Mock):
+ response = mock.Mock()
+ response.status_code = 200
+ requests.get.return_value = response
+ response.json.return_value = {
+ "releases": {
+ "0.0.1": "",
+ "0.1.0": "",
+ "1.0.0": "",
+ "1.1.0": "",
+ "v1.2.3": "",
+ "v1.2.0": "",
+ }
+ }
+ settings.CHECK_FOR_LATEST_VERSION = "pypi"
+
+ assert get_latest_version() == "1.2.3"
+
+
+@pytest.mark.unit
+@mock.patch("testgen.common.version_service.settings")
+@mock.patch("testgen.common.version_service.requests")
+def test_calls_docker_tags_api(requests: mock.Mock, settings: mock.Mock):
+ settings.DOCKER_HUB_USERNAME = None
+ settings.DOCKER_HUB_PASSWORD = None
+ settings.DOCKER_HUB_REPOSITORY = "datakitchen/testgen-a"
+ settings.CHECK_FOR_LATEST_VERSION = "docker"
+ get_latest_version()
+
+ requests.get.assert_called_with(
+ "https://hub.docker.com/v2/repositories/datakitchen/testgen-a/tags",
+ headers={},
+ params={"page_size": 25, "page": 1, "ordering": "last_updated"},
+ timeout=3,
+ )
+
+
+@pytest.mark.unit
+@mock.patch("testgen.common.version_service.settings")
+@mock.patch("testgen.common.version_service.requests")
+def test_return_unknown_when_docker_request_fails(requests: mock.Mock, settings: mock.Mock):
+ response = mock.Mock()
+ response.status_code = 400
+ requests.get.return_value = response
+ settings.DOCKER_HUB_USERNAME = None
+ settings.DOCKER_HUB_PASSWORD = None
+ settings.CHECK_FOR_LATEST_VERSION = "docker"
+
+ assert get_latest_version() == "unknown"
+
+
+@pytest.mark.unit
+@mock.patch("testgen.common.version_service.settings")
+@mock.patch("testgen.common.version_service.requests")
+def test_get_the_latest_version_from_dockerhub(requests: mock.Mock, settings: mock.Mock):
+ settings.DOCKER_HUB_USERNAME = None
+ settings.DOCKER_HUB_PASSWORD = None
+ settings.CHECK_FOR_LATEST_VERSION = "docker"
+
+ response = mock.Mock()
+ response.status_code = 200
+ requests.get.return_value = response
+ response.json.return_value = {
+ "results": [
+ {"name": "v0.0.1"},
+ {"name": "v0.1.0"},
+ {"name": "v1.0.0"},
+ {"name": "v1.1.0"},
+ {"name": "v1.2.0"},
+ {"name": "v1.2.3-experimental"},
+ ],
+ }
+
+ assert get_latest_version() == "1.2.0"
+
+@pytest.mark.unit
+@mock.patch("testgen.common.version_service.settings")
+@mock.patch("testgen.common.version_service.requests")
+def test_authenticates_docker_request(requests: mock.Mock, settings: mock.Mock):
+ username = settings.DOCKER_HUB_USERNAME = "docker-username"
+ password = settings.DOCKER_HUB_PASSWORD = "docker-password" # noqa: S105
+ docker_auth_token = "docker-auth-token" # noqa: S105
+ settings.CHECK_FOR_LATEST_VERSION = "docker"
+ settings.DOCKER_HUB_REPOSITORY = "datakitchen/testgen-b"
+
+ response = mock.Mock()
+ response.status_code = 200
+ response.json.return_value = {"token": docker_auth_token}
+ requests.post.return_value = response
+
+ get_latest_version()
+
+ requests.post.assert_called_with(
+ "https://hub.docker.com/v2/users/login",
+ json={"username": username, "password": password},
+ timeout=5,
+ )
+ requests.get.assert_called_with(
+ "https://hub.docker.com/v2/repositories/datakitchen/testgen-b/tags",
+ headers={"Authorization": f"Bearer {docker_auth_token}"},
+ params={"page_size": 25, "page": 1, "ordering": "last_updated"},
+ timeout=3,
+ )
+
+
+@pytest.mark.unit
+@mock.patch("testgen.common.version_service.settings")
+@mock.patch("testgen.common.version_service.requests")
+def test_return_unknown_when_docker_auth_request_fails(requests: mock.Mock, settings: mock.Mock):
+ settings.DOCKER_HUB_USERNAME = "docker-username"
+ settings.DOCKER_HUB_PASSWORD = "docker-password" # noqa: S105
+ settings.CHECK_FOR_LATEST_VERSION = "docker"
+ settings.DOCKER_HUB_REPOSITORY = "datakitchen/testgen-b"
+
+ response = mock.Mock()
+ response.status_code = 400
+ requests.post.return_value = response
+
+ assert get_latest_version() == "unknown"
From 89d1db158c84ff3967d16f23ab8df60ed5e228de Mon Sep 17 00:00:00 2001
From: Astor
Date: Wed, 11 Sep 2024 10:37:07 -0300
Subject: [PATCH 38/78] astor/TG-770
---
testgen/ui/views/test_definitions.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/testgen/ui/views/test_definitions.py b/testgen/ui/views/test_definitions.py
index f88148a..ee25183 100644
--- a/testgen/ui/views/test_definitions.py
+++ b/testgen/ui/views/test_definitions.py
@@ -510,7 +510,8 @@ def show_test_form(
if i >= dynamic_attributes_half_length:
current_column = mid_right_column
- value = empty_if_null(selected_test_def[dynamic_attribute]) if mode == "edit" else ""
+ default_value = "" if dynamic_attribute != "threshold_value" else 0
+ value = empty_if_null(selected_test_def[dynamic_attribute]) if mode == "edit" else default_value
actual_dynamic_attributes_labels = (
dynamic_attributes_labels[i]
From 322a0f3155ed1e507c31d9dd885dd95d7c9ff534 Mon Sep 17 00:00:00 2001
From: Ricardo Boni
Date: Thu, 5 Sep 2024 11:47:11 -0400
Subject: [PATCH 39/78] feat(ui): Adding sort selector component
---
.../js/components/sorting_selector.js | 216 ++++++++++++++++++
testgen/ui/components/frontend/js/main.js | 4 +-
testgen/ui/components/widgets/__init__.py | 1 +
.../ui/components/widgets/sorting_selector.py | 39 ++++
testgen/ui/views/profiling_summary.py | 2 +-
5 files changed, 260 insertions(+), 2 deletions(-)
create mode 100644 testgen/ui/components/frontend/js/components/sorting_selector.js
create mode 100644 testgen/ui/components/widgets/sorting_selector.py
diff --git a/testgen/ui/components/frontend/js/components/sorting_selector.js b/testgen/ui/components/frontend/js/components/sorting_selector.js
new file mode 100644
index 0000000..6564335
--- /dev/null
+++ b/testgen/ui/components/frontend/js/components/sorting_selector.js
@@ -0,0 +1,216 @@
+import {Streamlit} from "../streamlit.js";
+import van from '../van.min.js';
+
+/**
+ *
+ * @typedef Properties
+ * @type {object}
+ * @property {Array} columns
+ * @property {Array} default
+ */
+const { a, hr, button, div, i, img, label, option, select, span } = van.tags;
+
+const SortingSelector = (/** @type {Properties} */ props) => {
+
+ let defaultDirection = "ASC";
+
+ if (!window.testgen.loadedStylesheets.sortingSelector) {
+ document.adoptedStyleSheets.push(stylesheet);
+ window.testgen.loadedStylesheets.sortSelector = true;
+ }
+
+ const columns = props.columns.val;
+ const prevComponentState = props.state.val || [];
+
+ const columnLabel = columns.reduce((acc, [colLabel, colId]) => ({ ...acc, [colId]: colLabel}), {});
+
+ Streamlit.setFrameHeight(130 + 30 * columns.length);
+
+ const componentState = columns.reduce(
+ (state, [colLabel, colId]) => (
+ { ...state, [colId]: van.state(prevComponentState[colId] || { direction: "ASC", order: null })}
+ ),
+ {}
+ );
+
+ const selectedDiv = div(
+ {
+ class: 'tg-sort-selector--column-list',
+ style: `flex-grow: 1`,
+ },
+ );
+
+ const directionIcons = {
+ ASC: `arrow_downward`,
+ DESC: `arrow_upward`,
+ }
+
+ const activeColumnItem = (colId) => {
+ const state = componentState[colId];
+ const directionIcon = van.derive(() => directionIcons[state.val.direction]);
+ return button(
+ {
+ onclick: () => {
+ state.val = { ...state.val, direction: state.val.direction === "DESC" ? "ASC" : "DESC" };
+ },
+ },
+ i(
+ { class: `material-symbols-rounded` },
+ directionIcon,
+ ),
+ span(columnLabel[colId]),
+ )
+ }
+
+ const selectColumn = (colId, direction) => {
+ componentState[colId].val = { direction: direction, order: selectedDiv.childElementCount }
+ van.add(selectedDiv, activeColumnItem(colId));
+ }
+
+ prevComponentState.forEach(([colId, direction]) => selectColumn(colId, direction));
+
+ const reset = () => {
+ columns.map(([colLabel, colId]) => (componentState[colId].val = { direction: defaultDirection, order: null }));
+ selectedDiv.innerHTML = ``;
+ }
+
+ const apply = () => {
+ Streamlit.sendData(
+ Object.entries(componentState).filter(
+ ([colId, colState]) => colState.val.order !== null
+ ).sort(
+ ([colIdA, colStateA], [colIdB, colStateB]) => colStateA.val.order - colStateB.val.order
+ ).map(
+ ([colId, colState]) => [colId, colState.val.direction]
+ )
+ );
+ }
+
+ const columnItem = (colId) => {
+ const state = componentState[colId];
+ return button(
+ {
+ onclick: () => selectColumn(colId, defaultDirection),
+ hidden: state.val.order !== null,
+ },
+ i(
+ {
+ class: `material-symbols-rounded`,
+ style: `color: var(--disabled-text-color);`,
+ },
+ `expand_all`
+ ),
+ span(columnLabel[colId]),
+ )
+ }
+
+ const optionsDiv = div(
+ {
+ class: 'tg-sort-selector--column-list',
+ },
+ columns.map(([colLabel, colId]) => van.derive(() => columnItem(colId))),
+ )
+
+ return div(
+ { class: 'tg-sort-selector' },
+ div(
+ {
+ class: `tg-sort-selector--header`,
+ },
+ span("Selected columns")
+ ),
+ selectedDiv,
+ div(
+ { class: `tg-sort-selector--header` },
+ span("Available columns")
+ ),
+ optionsDiv,
+ div(
+ { class: `tg-sort-selector--footer` },
+ button(
+ { onclick: reset },
+ span(`Reset`),
+ ),
+ button(
+ { onclick: apply },
+ span(`Apply`),
+ )
+ )
+ );
+};
+
+
+const stylesheet = new CSSStyleSheet();
+stylesheet.replace(`
+
+.tg-sort-selector {
+ height: 100vh;
+ display: flex;
+ flex-direction: column;
+ align-content: flex-end;
+ justify-content: space-between;
+}
+
+.tg-sort-selector--column-list {
+ display: flex;
+ flex-direction: column;
+}
+
+.tg-sort-selector--column-list button {
+ margin: 0;
+ border: 0;
+ padding: 5px 0;
+ text-align: left;
+ background: transparent;
+ color: var(--button-text-color);
+}
+
+.tg-sort-selector--column-list button:hover {
+ background: #00000010;
+}
+
+.tg-sort-selector--column-list button * {
+ vertical-align: middle;
+}
+
+.tg-sort-selector--column-list button i {
+ font-size: 20px;
+}
+
+
+.tg-sort-selector--column-list {
+ border-bottom: 3px dotted var(--disabled-text-color);
+ padding-bottom: 16px;
+ margin-bottom: 8px;
+}
+
+.tg-sort-selector--header {
+ text-align: right;
+ text-transform: uppercase;
+ font-size: 70%;
+}
+
+.tg-sort-selector--footer {
+ display: flex;
+ flex-direction: row;
+ justify-content: space-between;
+ margin-top: 8px;
+}
+
+.tg-sort-selector--footer button {
+ background-color: var(--button-stroked-background);
+ color: var(--button-stroked-text-color);
+ border: var(--button-stroked-border);
+ padding: 5px 20px;
+ border-radius: 5px;
+}
+
+@media (prefers-color-scheme: dark) {
+ .tg-sort-selector--column-list button:hover {
+ background: #FFFFFF20;
+ }
+}
+
+`);
+
+export { SortingSelector };
diff --git a/testgen/ui/components/frontend/js/main.js b/testgen/ui/components/frontend/js/main.js
index 31f2db7..3c56cdd 100644
--- a/testgen/ui/components/frontend/js/main.js
+++ b/testgen/ui/components/frontend/js/main.js
@@ -3,7 +3,7 @@
* @type {object}
* @property {string} id - id of the specific component to be rendered
* @property {string} key - user key of the specific component to be rendered
- * @property {object} props - object with the props to pass to the rendered component
+ * @property {object} props - object with the props to pass to the rendered component
*/
import van from './van.min.js';
import { Streamlit } from './streamlit.js';
@@ -13,6 +13,7 @@ import { ExpanderToggle } from './components/expander_toggle.js';
import { Link } from './components/link.js';
import { Select } from './components/select.js'
import { SummaryBar } from './components/summary_bar.js';
+import { SortingSelector } from './components/sorting_selector.js';
let currentWindowVan = van;
let topWindowVan = window.top.van;
@@ -24,6 +25,7 @@ const TestGenComponent = (/** @type {string} */ id, /** @type {object} */ props)
expander_toggle: ExpanderToggle,
link: Link,
select: Select,
+ sorting_selector: SortingSelector,
sidebar: window.top.testgen.components.Sidebar,
summary_bar: SummaryBar,
};
diff --git a/testgen/ui/components/widgets/__init__.py b/testgen/ui/components/widgets/__init__.py
index 7c25862..653d5e0 100644
--- a/testgen/ui/components/widgets/__init__.py
+++ b/testgen/ui/components/widgets/__init__.py
@@ -14,4 +14,5 @@
whitespace,
)
from testgen.ui.components.widgets.sidebar import sidebar
+from testgen.ui.components.widgets.sorting_selector import sorting_selector
from testgen.ui.components.widgets.summary_bar import summary_bar
diff --git a/testgen/ui/components/widgets/sorting_selector.py b/testgen/ui/components/widgets/sorting_selector.py
new file mode 100644
index 0000000..1c024f8
--- /dev/null
+++ b/testgen/ui/components/widgets/sorting_selector.py
@@ -0,0 +1,39 @@
+from collections.abc import Iterable
+
+import streamlit as st
+from streamlit.runtime.scriptrunner import get_script_run_ctx
+
+from testgen.ui.components.utils.component import component
+
+
+def sorting_selector(
+ columns: Iterable[tuple[str, str]],
+ default: Iterable[tuple[str, str]] = (),
+ popover_label: str = "Sort",
+ key: str = "testgen:sorting_selector",
+) -> list[tuple[str, str]]:
+ """
+ Renders a pop over that, when clicked, shows a list of database columns to be selected for sorting.
+
+ # Parameters
+ :param columns: Iterable of 2-tuples, being: (, )
+ :param default: Iterable of 2-tuples, being: (, )
+ :param key: unique key to give the component a persisting state
+
+ # Return value
+ Returns a list of 2-tuples, being: (, )
+ """
+
+ ctx = get_script_run_ctx()
+ try:
+ state = ctx.session_state[key]
+ except KeyError:
+ state = default
+
+ with st.popover(popover_label):
+ return component(
+ id_="sorting_selector",
+ key=key,
+ default=default,
+ props={"columns": columns, "state": state},
+ )
diff --git a/testgen/ui/views/profiling_summary.py b/testgen/ui/views/profiling_summary.py
index 6fa7dea..e81fa15 100644
--- a/testgen/ui/views/profiling_summary.py
+++ b/testgen/ui/views/profiling_summary.py
@@ -25,7 +25,7 @@ class DataProfilingPage(Page):
def render(self, project_code: str | None = None, table_group_id: str | None = None, **_kwargs) -> None:
project_code = project_code or session.project
-
+
testgen.page_header(
"Profiling Runs",
"https://docs.datakitchen.io/article/dataops-testgen-help/investigate-profiling",
From 01d35477201dbb94ac39c0c22743d970812019bc Mon Sep 17 00:00:00 2001
From: Ricardo Boni
Date: Tue, 10 Sep 2024 17:37:46 -0400
Subject: [PATCH 40/78] feat(ui): Tracking the sort selector state with the
query parameters
---
.../ui/components/widgets/sorting_selector.py | 62 ++++++++++++++++++-
1 file changed, 59 insertions(+), 3 deletions(-)
diff --git a/testgen/ui/components/widgets/sorting_selector.py b/testgen/ui/components/widgets/sorting_selector.py
index 1c024f8..539e1ce 100644
--- a/testgen/ui/components/widgets/sorting_selector.py
+++ b/testgen/ui/components/widgets/sorting_selector.py
@@ -1,15 +1,50 @@
-from collections.abc import Iterable
+import itertools
+import re
+from collections.abc import Callable, Iterable
+from typing import Any
import streamlit as st
from streamlit.runtime.scriptrunner import get_script_run_ctx
from testgen.ui.components.utils.component import component
+from testgen.ui.navigation.router import Router
+
+
+def _slugfy(text) -> str:
+ return re.sub(r"[^a-z]+", "-", text.lower())
+
+
+def _state_to_str(columns, state):
+ state_parts = []
+ state_dict = dict(state)
+ try:
+ for col_label, col_id in columns:
+ if col_id in state_dict:
+ state_parts.append(".".join((_slugfy(col_label), state_dict[col_id].lower())))
+ return "-".join(state_parts) or "-"
+ except Exception:
+ return None
+
+
+def _state_from_str(columns, state_str):
+ col_slug_to_id = {_slugfy(col_label): col_id for col_label, col_id in columns}
+ state_part_re = re.compile("".join(("(", "|".join(col_slug_to_id.keys()), r")\.(asc|desc)")))
+ state = []
+ try:
+ for state_part in state_str.split("-"):
+ if match := state_part_re.match(state_part):
+ state.append([col_slug_to_id[match.group(1)], match.group(2).upper()])
+ except Exception as e:
+ return None
+ return state
def sorting_selector(
columns: Iterable[tuple[str, str]],
default: Iterable[tuple[str, str]] = (),
+ on_change: Callable[[], Any] | None = None,
popover_label: str = "Sort",
+ query_param: str | None = "sort",
key: str = "testgen:sorting_selector",
) -> list[tuple[str, str]]:
"""
@@ -24,16 +59,37 @@ def sorting_selector(
Returns a list of 2-tuples, being: (, )
"""
+ state = None
+
ctx = get_script_run_ctx()
try:
state = ctx.session_state[key]
except KeyError:
+ pass
+
+ if state is None and query_param and (state_str := st.query_params.get(query_param)):
+ state = _state_from_str(columns, state_str)
+
+ if state is None:
state = default
with st.popover(popover_label):
- return component(
+ new_state = component(
id_="sorting_selector",
key=key,
- default=default,
+ default=state,
+ on_change=on_change,
props={"columns": columns, "state": state},
)
+
+ # For some unknown reason, sometimes, streamlit returns None as the component status
+ new_state = [] if new_state is None else new_state
+
+ if query_param:
+ if tuple(itertools.chain(*default)) == tuple(itertools.chain(*new_state)):
+ value = None
+ else:
+ value = _state_to_str(columns, new_state)
+ Router().set_query_params({query_param: value})
+
+ return new_state
From 50a44b33c4ed56102891d015a9c82985f7a4ea56 Mon Sep 17 00:00:00 2001
From: Ricardo Boni
Date: Wed, 11 Sep 2024 14:21:13 -0400
Subject: [PATCH 41/78] feat(ui): Improving sorting and filtering of hygiene
issues
---
testgen/ui/navigation/router.py | 15 +++--
testgen/ui/views/profiling_anomalies.py | 75 ++++++++++++++++++++-----
2 files changed, 69 insertions(+), 21 deletions(-)
diff --git a/testgen/ui/navigation/router.py b/testgen/ui/navigation/router.py
index 8480ec5..0c58484 100644
--- a/testgen/ui/navigation/router.py
+++ b/testgen/ui/navigation/router.py
@@ -17,9 +17,9 @@ class Router(Singleton):
def __init__(
self,
/,
- routes: list[type[testgen.ui.navigation.page.Page]],
+ routes: list[type[testgen.ui.navigation.page.Page]] | None = None,
) -> None:
- self._routes = {route.path: route(self) for route in routes}
+ self._routes = {route.path: route(self) for route in routes} if routes else {}
def run(self, hide_sidebar=False) -> None:
streamlit_pages = [route.streamlit_page for route in self._routes.values()]
@@ -29,7 +29,7 @@ def run(self, hide_sidebar=False) -> None:
# Otherwise anything custom in the sidebar randomly flickers on page navigation
current_page = st.navigation(streamlit_pages, position="hidden" if hide_sidebar else "sidebar")
session.current_page_args = st.query_params
-
+
# This hack is needed because the auth cookie is not retrieved on the first run
# We have to store the page and wait for the second run
@@ -39,7 +39,7 @@ def run(self, hide_sidebar=False) -> None:
else:
current_page = session.page_pending_cookies or current_page
session.page_pending_cookies = None
-
+
if session.page_args_pending_router is not None:
session.current_page_args = session.page_args_pending_router
st.query_params.from_dict(session.page_args_pending_router)
@@ -47,8 +47,8 @@ def run(self, hide_sidebar=False) -> None:
session.current_page = current_page.url_path
current_page.run()
-
-
+
+
def navigate(self, /, to: str, with_args: dict = {}) -> None: # noqa: B006
try:
if to != session.current_page:
@@ -66,8 +66,7 @@ def navigate(self, /, to: str, with_args: dict = {}) -> None: # noqa: B006
st.error(error_message)
LOG.exception(error_message)
-
- def set_query_params(self, with_args: dict = {}) -> None: # noqa: B006
+ def set_query_params(self, with_args: dict) -> None:
params = st.query_params
params.update(with_args)
params = {k: v for k, v in params.items() if v not in [None, "None", ""]}
diff --git a/testgen/ui/views/profiling_anomalies.py b/testgen/ui/views/profiling_anomalies.py
index fe8e5c4..9a7176a 100644
--- a/testgen/ui/views/profiling_anomalies.py
+++ b/testgen/ui/views/profiling_anomalies.py
@@ -22,7 +22,7 @@ class ProfilingAnomaliesPage(Page):
lambda: "run_id" in session.current_page_args or "profiling-runs",
]
- def render(self, run_id: str, issue_class: str | None = None, **_kwargs) -> None:
+ def render(self, run_id: str, issue_class: str | None = None, issue_type: str | None = None, **_kwargs) -> None:
run_date, _table_group_id, table_group_name, project_code = profiling_queries.lookup_db_parentage_from_run(
run_id
)
@@ -39,7 +39,9 @@ def render(self, run_id: str, issue_class: str | None = None, **_kwargs) -> None
)
others_summary_column, pii_summary_column, _ = st.columns([.3, .3, .4])
- liklihood_filter_column, actions_column, export_button_column = st.columns([.3, .5, .2], vertical_alignment="bottom")
+ (liklihood_filter_column, issue_type_filter_column, actions_column, export_button_column) = (
+ st.columns([.16, .34, .32, .18], vertical_alignment="bottom")
+ )
testgen.flex_row_end(actions_column)
testgen.flex_row_end(export_button_column)
@@ -54,12 +56,35 @@ def render(self, run_id: str, issue_class: str | None = None, **_kwargs) -> None
label="Issue Class",
)
+ with issue_type_filter_column:
+ # Issue filter (optional)
+ issue_type_options = get_issue_types()
+ issue_type = testgen.toolbar_select(
+ options=["All Issue Types", *issue_type_options["anomaly_name"]],
+ default_value=issue_type,
+ required=True,
+ bind_to_query="issue_type",
+ label="Issue Type",
+ )
+ issue_type_id = dict(zip(issue_type_options["anomaly_name"], issue_type_options["id"], strict=False)).get(issue_type)
+
with actions_column:
str_help = "Toggle on to perform actions on multiple Hygiene Issues"
do_multi_select = st.toggle("Multi-Select", help=str_help)
+ with export_button_column:
+ sortable_columns = (
+ ("Table", "r.table_name"),
+ ("Column", "r.column_name"),
+ ("Anomaly", "t.anomaly_name"),
+ ("Likelihood", "likelihood_order"),
+ ("Action", "r.disposition"),
+ )
+ default = (("r.table_name", "ASC"), ("r.column_name", "ASC"))
+ sorting_columns = testgen.sorting_selector(sortable_columns, default)
+
# Get hygiene issue list
- df_pa = get_profiling_anomalies(run_id, issue_class)
+ df_pa = get_profiling_anomalies(run_id, issue_class, issue_type_id, sorting_columns)
# Retrieve disposition action (cache refreshed)
df_action = get_anomaly_disposition(run_id)
@@ -90,7 +115,7 @@ def render(self, run_id: str, issue_class: str | None = None, **_kwargs) -> None
width=400,
)
# write_frequency_graph(df_pa)
-
+
lst_show_columns = [
"table_name",
"column_name",
@@ -162,6 +187,11 @@ def render(self, run_id: str, issue_class: str | None = None, **_kwargs) -> None
):
source_data_dialog(selected_row)
+ cached_functions = [get_anomaly_disposition, get_profiling_anomaly_summary]
+ # Clear the list cache if the list is sorted by disposition/action
+ if "r.disposition" in dict(sorting_columns):
+ cached_functions.append(get_profiling_anomalies)
+
# Need to render toolbar buttons after grid, so selection status is maintained
if actions_column.button(
"✓", help="Confirm this issue as relevant for this run", disabled=not selected
@@ -170,7 +200,7 @@ def render(self, run_id: str, issue_class: str | None = None, **_kwargs) -> None
do_disposition_update(selected, "Confirmed"),
as_toast=True,
clear_cache=True,
- lst_cached_functions=[get_anomaly_disposition, get_profiling_anomaly_summary],
+ lst_cached_functions=cached_functions,
)
if actions_column.button(
"✘", help="Dismiss this issue as not relevant for this run", disabled=not selected
@@ -179,7 +209,7 @@ def render(self, run_id: str, issue_class: str | None = None, **_kwargs) -> None
do_disposition_update(selected, "Dismissed"),
as_toast=True,
clear_cache=True,
- lst_cached_functions=[get_anomaly_disposition, get_profiling_anomaly_summary],
+ lst_cached_functions=cached_functions,
)
if actions_column.button(
"🔇", help="Mute this test to deactivate it for future runs", disabled=not selected
@@ -188,14 +218,14 @@ def render(self, run_id: str, issue_class: str | None = None, **_kwargs) -> None
do_disposition_update(selected, "Inactive"),
as_toast=True,
clear_cache=True,
- lst_cached_functions=[get_anomaly_disposition, get_profiling_anomaly_summary],
+ lst_cached_functions=cached_functions,
)
if actions_column.button("↩︎", help="Clear action", disabled=not selected):
fm.reset_post_updates(
do_disposition_update(selected, "No Decision"),
as_toast=True,
clear_cache=True,
- lst_cached_functions=[get_anomaly_disposition, get_profiling_anomaly_summary],
+ lst_cached_functions=cached_functions,
)
else:
st.markdown(":green[**No Hygiene Issues Found**]")
@@ -213,12 +243,18 @@ def get_db_table_group_choices(str_project_code):
@st.cache_data(show_spinner="Retrieving Data")
-def get_profiling_anomalies(str_profile_run_id, str_likelihood):
+def get_profiling_anomalies(str_profile_run_id, str_likelihood, issue_type_id, sorting_columns):
str_schema = st.session_state["dbschema"]
if str_likelihood == "All Likelihoods":
str_criteria = " AND t.issue_likelihood <> 'Potential PII'"
else:
str_criteria = f" AND t.issue_likelihood = '{str_likelihood}'"
+ if sorting_columns:
+ str_order_by = "ORDER BY " + (", ".join(" ".join(col) for col in sorting_columns))
+ else:
+ str_order_by = ""
+ if issue_type_id:
+ str_criteria += f" AND t.id = '{issue_type_id}'"
# Define the query -- first visible column must be first, because will hold the multi-select box
str_sql = f"""
SELECT r.table_name, r.column_name, r.schema_name,
@@ -228,9 +264,15 @@ def get_profiling_anomalies(str_profile_run_id, str_likelihood):
WHEN t.issue_likelihood = 'Possible' THEN 'Possible: speculative test that often identifies problems'
WHEN t.issue_likelihood = 'Likely' THEN 'Likely: typically indicates a data problem'
WHEN t.issue_likelihood = 'Definite' THEN 'Definite: indicates a highly-likely data problem'
- WHEN t.issue_likelihood = 'Potential PII'
+ WHEN t.issue_likelihood = 'Potential PII'
THEN 'Potential PII: may require privacy policies, standards and procedures for access, storage and transmission.'
- END as likelihood_explanation,
+ END AS likelihood_explanation,
+ CASE
+ WHEN t.issue_likelihood = 'Potential PII' THEN 1
+ WHEN t.issue_likelihood = 'Possible' THEN 2
+ WHEN t.issue_likelihood = 'Likely' THEN 3
+ WHEN t.issue_likelihood = 'Definite' THEN 4
+ END AS likelihood_order,
t.anomaly_description, r.detail, t.suggested_action,
r.anomaly_id, r.table_groups_id::VARCHAR, r.id::VARCHAR, p.profiling_starttime
FROM {str_schema}.profile_anomaly_results r
@@ -240,7 +282,7 @@ def get_profiling_anomalies(str_profile_run_id, str_likelihood):
ON r.profile_run_id = p.id
WHERE r.profile_run_id = '{str_profile_run_id}'
{str_criteria}
- ORDER BY r.schema_name, r.table_name, r.column_name;
+ {str_order_by}
"""
# Retrieve data as df
df = db.retrieve_data(str_sql)
@@ -267,6 +309,13 @@ def get_anomaly_disposition(str_profile_run_id):
return df[["id", "action"]]
+@st.cache_data(show_spinner="Retrieving Status")
+def get_issue_types():
+ schema = st.session_state["dbschema"]
+ df = db.retrieve_data(f"SELECT id, anomaly_name FROM {schema}.profile_anomaly_types")
+ return df
+
+
@st.cache_data(show_spinner=False)
def get_profiling_anomaly_summary(str_profile_run_id):
str_schema = st.session_state["dbschema"]
@@ -314,7 +363,7 @@ def get_bad_data(selected_row):
str_sql = f"""
SELECT t.lookup_query, tg.table_group_schema, c.project_qc_schema,
c.sql_flavor, c.project_host, c.project_port, c.project_db, c.project_user, c.project_pw_encrypted,
- c.url, c.connect_by_url, c.connect_by_key, c.private_key, c.private_key_passphrase
+ c.url, c.connect_by_url, c.connect_by_key, c.private_key, c.private_key_passphrase
FROM {str_schema}.target_data_lookups t
INNER JOIN {str_schema}.table_groups tg
ON ('{selected_row["table_groups_id"]}'::UUID = tg.id)
From ba02ba380ecbb153f94bfe9eae4c665798005b37 Mon Sep 17 00:00:00 2001
From: Aarthy Adityan
Date: Thu, 12 Sep 2024 16:05:03 -0400
Subject: [PATCH 42/78] docs: update readme for pip installation
---
README.md | 38 +++++++++++++++++++++-----------------
1 file changed, 21 insertions(+), 17 deletions(-)
diff --git a/README.md b/README.md
index 467b954..0266eb4 100644
--- a/README.md
+++ b/README.md
@@ -86,7 +86,7 @@ Create and activate a virtual environment with a TestGen-compatible version of P
_On Linux/Mac_
```shell
-python3.10 -m venv venv
+python3 -m venv venv
source venv/bin/activate
```
@@ -108,28 +108,32 @@ testgen --help
### Set up the application database in PostgresSQL
-Set appropriate values for the following environment variables (use `export variable=value` for Linux/Mac and `set variable=value` for Windows). Refer to the [TestGen Configuration](configuration.md) document for more details, defaults, and other supported configuration.
-
+Create a `local.env` file with the following environment variables, replacing the `` placeholders with appropriate values. Refer to the [TestGen Configuration](docs/configuration.md) document for more details, defaults, and other supported configuration.
```shell
# Connection parameters for the PostgreSQL server
-TG_METADATA_DB_HOST
-TG_METADATA_DB_PORT
+export TG_METADATA_DB_HOST=
+export TG_METADATA_DB_PORT=
+
+# Connection credentials for the PostgreSQL server
+# This role must have privileges to create roles, users, database and schema so that the application database can be initialized
+export TG_METADATA_DB_USER=
+export TG_METADATA_DB_PASSWORD=
-# PostgreSQL admin role with privileges to create roles, users, database and schema
-# This role will be used by the next step to initialize the application database
-DATABASE_ADMIN_USER
-DATABASE_ADMIN_PASSWORD
+# Set a password and arbitrary string (the "salt") to be used for encrypting secrets in the application database
+export TG_DECRYPT_PASSWORD=
+export TG_DECRYPT_SALT=
-# Credentials to be used for encrypting secrets in application database
-TG_DECRYPT_SALT
-TG_DECRYPT_PASSWORD
+# Set credentials for the default admin user to be created for TestGen
+export TESTGEN_USERNAME=
+export TESTGEN_PASSWORD=
-# Default admin user to be created for TestGen
-TESTGEN_USERNAME
-TESTGEN_PASSWORD
+# Set an accessible path for storing application logs
+export TESTGEN_LOG_FILE_PATH=
+```
-# Accessible path for storing application logs
-TESTGEN_LOG_FILE_PATH
+Source the file to apply the environment variables. For the Windows equivalent, refer to [this guide](https://bennett4.medium.com/windows-alternative-to-source-env-for-setting-environment-variables-606be2a6d3e1).
+```shell
+source local.env
```
Make sure the PostgreSQL database server is up and running. Initialize the application database for TestGen.
From c81e996f71507841d75968a714526c707c8695e4 Mon Sep 17 00:00:00 2001
From: Ricardo Boni
Date: Wed, 11 Sep 2024 15:28:05 -0400
Subject: [PATCH 43/78] misc: Self-review fixes
---
.../js/components/sorting_selector.js | 19 ++++++---
.../ui/components/widgets/sorting_selector.py | 6 ++-
testgen/ui/views/profiling_anomalies.py | 40 +++++++++----------
3 files changed, 39 insertions(+), 26 deletions(-)
diff --git a/testgen/ui/components/frontend/js/components/sorting_selector.js b/testgen/ui/components/frontend/js/components/sorting_selector.js
index 6564335..64c1d04 100644
--- a/testgen/ui/components/frontend/js/components/sorting_selector.js
+++ b/testgen/ui/components/frontend/js/components/sorting_selector.js
@@ -2,13 +2,18 @@ import {Streamlit} from "../streamlit.js";
import van from '../van.min.js';
/**
+ * @typedef ColDef
+ * @type {Array.}
*
- * @typedef Properties
+ * @typedef StateItem
+ * @type {Array.}
+ *
+ * @typedef Properties
* @type {object}
- * @property {Array} columns
- * @property {Array} default
+ * @property {Array.} columns
+ * @property {Array.} state
*/
-const { a, hr, button, div, i, img, label, option, select, span } = van.tags;
+const { button, div, i, span } = van.tags;
const SortingSelector = (/** @type {Properties} */ props) => {
@@ -70,7 +75,11 @@ const SortingSelector = (/** @type {Properties} */ props) => {
prevComponentState.forEach(([colId, direction]) => selectColumn(colId, direction));
const reset = () => {
- columns.map(([colLabel, colId]) => (componentState[colId].val = { direction: defaultDirection, order: null }));
+ columns.map(
+ ([colLabel, colId]) => (
+ componentState[colId].val = { direction: defaultDirection, order: null }
+ )
+ );
selectedDiv.innerHTML = ``;
}
diff --git a/testgen/ui/components/widgets/sorting_selector.py b/testgen/ui/components/widgets/sorting_selector.py
index 539e1ce..17ffa11 100644
--- a/testgen/ui/components/widgets/sorting_selector.py
+++ b/testgen/ui/components/widgets/sorting_selector.py
@@ -53,6 +53,10 @@ def sorting_selector(
# Parameters
:param columns: Iterable of 2-tuples, being: (, )
:param default: Iterable of 2-tuples, being: (, )
+ :param on_change: Callable that will be called when the component state is updated
+ :param popover_label: Label to be applied to the pop-over button. Default: 'Sort'
+ :param query_param: Name of the query parameter that will store the component state. Can be disabled by setting
+ to None. Default: 'sort'.
:param key: unique key to give the component a persisting state
# Return value
@@ -82,7 +86,7 @@ def sorting_selector(
props={"columns": columns, "state": state},
)
- # For some unknown reason, sometimes, streamlit returns None as the component status
+ # For some unknown reason, sometimes, streamlit returns None as the component state
new_state = [] if new_state is None else new_state
if query_param:
diff --git a/testgen/ui/views/profiling_anomalies.py b/testgen/ui/views/profiling_anomalies.py
index 9a7176a..4372902 100644
--- a/testgen/ui/views/profiling_anomalies.py
+++ b/testgen/ui/views/profiling_anomalies.py
@@ -39,40 +39,35 @@ def render(self, run_id: str, issue_class: str | None = None, issue_type: str |
)
others_summary_column, pii_summary_column, _ = st.columns([.3, .3, .4])
- (liklihood_filter_column, issue_type_filter_column, actions_column, export_button_column) = (
- st.columns([.16, .34, .32, .18], vertical_alignment="bottom")
+ (liklihood_filter_column, issue_type_filter_column, sort_column, actions_column, export_button_column) = (
+ st.columns([.16, .34, .08, .32, .1], vertical_alignment="bottom")
)
testgen.flex_row_end(actions_column)
testgen.flex_row_end(export_button_column)
with liklihood_filter_column:
- # Likelihood selection - optional filter
- status_options = ["All Likelihoods", "Definite", "Likely", "Possible", "Potential PII"]
issue_class = testgen.toolbar_select(
- options=status_options,
+ options=["Definite", "Likely", "Possible", "Potential PII"],
default_value=issue_class,
- required=True,
+ required=False,
bind_to_query="issue_class",
label="Issue Class",
)
with issue_type_filter_column:
- # Issue filter (optional)
issue_type_options = get_issue_types()
- issue_type = testgen.toolbar_select(
- options=["All Issue Types", *issue_type_options["anomaly_name"]],
- default_value=issue_type,
- required=True,
+ issue_type_id = testgen.toolbar_select(
+ options=issue_type_options,
+ default_value=None if issue_class == "Potential PII" else issue_type,
+ value_column="id",
+ display_column="anomaly_name",
+ required=False,
bind_to_query="issue_type",
label="Issue Type",
+ disabled=issue_class == "Potential PII",
)
- issue_type_id = dict(zip(issue_type_options["anomaly_name"], issue_type_options["id"], strict=False)).get(issue_type)
- with actions_column:
- str_help = "Toggle on to perform actions on multiple Hygiene Issues"
- do_multi_select = st.toggle("Multi-Select", help=str_help)
-
- with export_button_column:
+ with sort_column:
sortable_columns = (
("Table", "r.table_name"),
("Column", "r.column_name"),
@@ -80,9 +75,14 @@ def render(self, run_id: str, issue_class: str | None = None, issue_type: str |
("Likelihood", "likelihood_order"),
("Action", "r.disposition"),
)
- default = (("r.table_name", "ASC"), ("r.column_name", "ASC"))
+ default = [(sortable_columns[i][1], "ASC") for i in (0, 1)]
sorting_columns = testgen.sorting_selector(sortable_columns, default)
+ with actions_column:
+ str_help = "Toggle on to perform actions on multiple Hygiene Issues"
+ do_multi_select = st.toggle("Multi-Select", help=str_help)
+
+
# Get hygiene issue list
df_pa = get_profiling_anomalies(run_id, issue_class, issue_type_id, sorting_columns)
@@ -245,7 +245,7 @@ def get_db_table_group_choices(str_project_code):
@st.cache_data(show_spinner="Retrieving Data")
def get_profiling_anomalies(str_profile_run_id, str_likelihood, issue_type_id, sorting_columns):
str_schema = st.session_state["dbschema"]
- if str_likelihood == "All Likelihoods":
+ if str_likelihood is None:
str_criteria = " AND t.issue_likelihood <> 'Potential PII'"
else:
str_criteria = f" AND t.issue_likelihood = '{str_likelihood}'"
@@ -309,7 +309,7 @@ def get_anomaly_disposition(str_profile_run_id):
return df[["id", "action"]]
-@st.cache_data(show_spinner="Retrieving Status")
+@st.cache_data(show_spinner=False)
def get_issue_types():
schema = st.session_state["dbschema"]
df = db.retrieve_data(f"SELECT id, anomaly_name FROM {schema}.profile_anomaly_types")
From 19c694ce4f85758f224e47e4cf76eb0c6e1ed97b Mon Sep 17 00:00:00 2001
From: Ricardo Boni
Date: Thu, 12 Sep 2024 12:23:25 -0400
Subject: [PATCH 44/78] feat(ui): improved sorting and filtering of test
results and profilling results
---
testgen/ui/queries/profiling_queries.py | 11 +++-
testgen/ui/views/profiling_results.py | 28 ++++++---
testgen/ui/views/test_results.py | 79 +++++++++++++++++++------
3 files changed, 91 insertions(+), 27 deletions(-)
diff --git a/testgen/ui/queries/profiling_queries.py b/testgen/ui/queries/profiling_queries.py
index 8f6c089..f831bbc 100644
--- a/testgen/ui/queries/profiling_queries.py
+++ b/testgen/ui/queries/profiling_queries.py
@@ -79,8 +79,13 @@ def lookup_db_parentage_from_run(str_profile_run_id):
@st.cache_data(show_spinner="Retrieving Data")
-def get_profiling_detail(str_profile_run_id, str_table_name, str_column_name):
+def get_profiling_detail(str_profile_run_id, str_table_name, str_column_name, sorting_columns = None):
str_schema = st.session_state["dbschema"]
+ sorting_columns_str = (
+ "p.schema_name, p.table_name, position"
+ if sorting_columns is None
+ else ", ".join(" ".join(col) for col in sorting_columns)
+ )
str_sql = f"""
SELECT -- Identifiers
id::VARCHAR, dk_id,
@@ -98,7 +103,7 @@ def get_profiling_detail(str_profile_run_id, str_table_name, str_column_name):
WHEN 'B' THEN 'Boolean'
ELSE 'N/A'
END as general_type,
- functional_table_type as semantic_table_type,
+ functional_table_type as semantic_table_type,
functional_data_type as semantic_data_type,
datatype_suggestion,
CASE WHEN s.column_name IS NOT NULL THEN 'Yes' END as anomalies,
@@ -142,7 +147,7 @@ def get_profiling_detail(str_profile_run_id, str_table_name, str_column_name):
WHERE p.profile_run_id = '{str_profile_run_id}'::UUID
AND p.table_name ILIKE '{str_table_name}'
AND p.column_name ILIKE '{str_column_name}'
- ORDER BY p.schema_name, p.table_name, position;
+ ORDER BY {sorting_columns_str};
"""
return db.retrieve_data(str_sql)
diff --git a/testgen/ui/views/profiling_results.py b/testgen/ui/views/profiling_results.py
index d6445e0..896631b 100644
--- a/testgen/ui/views/profiling_results.py
+++ b/testgen/ui/views/profiling_results.py
@@ -36,25 +36,27 @@ def render(self, run_id: str, table_name: str | None = None, column_name: str |
{ "label": f"{table_group_name} | {run_date}" },
],
)
-
- table_filter_column, column_filter_column, export_button_column = st.columns([.3, .3, .4], vertical_alignment="bottom")
+
+ table_filter_column, column_filter_column, sort_column, export_button_column = st.columns(
+ [.3, .3, .08, .32], vertical_alignment="bottom"
+ )
with table_filter_column:
# Table Name filter
df = profiling_queries.run_table_lookup_query(table_group_id)
table_name = testgen.toolbar_select(
- options=df,
+ options=df,
value_column="table_name",
default_value=table_name,
bind_to_query="table_name",
label="Table Name",
)
-
+
with column_filter_column:
# Column Name filter
df = profiling_queries.run_column_lookup_query(table_group_id, table_name)
column_name = testgen.toolbar_select(
- options=df,
+ options=df,
value_column="column_name",
default_value=column_name,
bind_to_query="column_name",
@@ -62,14 +64,26 @@ def render(self, run_id: str, table_name: str | None = None, column_name: str |
disabled=not table_name,
)
+ with sort_column:
+ sortable_columns = (
+ ("Schema Name", "p.schema_name"),
+ ("Table Name", "p.table_name"),
+ ("Column Name", "p.column_name"),
+ ("Column Type", "p.column_type"),
+ ("Semantic Data Type", "semantic_data_type"),
+ ("Anomalies", "anomalies"),
+ )
+ default_sorting = [(sortable_columns[i][1], "ASC") for i in (0, 1, 2)]
+ sorting_columns = testgen.sorting_selector(sortable_columns, default_sorting)
+
# Use SQL wildcard to match all values
- if not table_name:
+ if not table_name:
table_name = "%%"
if not column_name:
column_name = "%%"
# Display main results grid
- df = profiling_queries.get_profiling_detail(run_id, table_name, column_name)
+ df = profiling_queries.get_profiling_detail(run_id, table_name, column_name, sorting_columns)
show_columns = [
"schema_name",
"table_name",
diff --git a/testgen/ui/views/test_results.py b/testgen/ui/views/test_results.py
index 833f82f..5fa415b 100644
--- a/testgen/ui/views/test_results.py
+++ b/testgen/ui/views/test_results.py
@@ -28,7 +28,7 @@ class TestResultsPage(Page):
lambda: "run_id" in session.current_page_args or "test-runs",
]
- def render(self, run_id: str, status: str | None = None, **_kwargs) -> None:
+ def render(self, run_id: str, status: str | None = None, test_type: str | None = None, **_kwargs) -> None:
run_date, test_suite_name, project_code = get_drill_test_run(run_id)
run_date = date_service.get_timezoned_timestamp(st.session_state, run_date)
project_service.set_current_project(project_code)
@@ -47,7 +47,9 @@ def render(self, run_id: str, status: str | None = None, **_kwargs) -> None:
testgen.summary_bar(items=tests_summary, key="test_results", height=40, width=800)
# Setup Toolbar
- status_filter_column, actions_column, export_button_column = st.columns([.3, .5, .2], vertical_alignment="bottom")
+ status_filter_column, test_type_filter_column, sort_column, actions_column, export_button_column = st.columns(
+ [.2, .2, .08, .4, .12], vertical_alignment="bottom"
+ )
testgen.flex_row_end(actions_column)
testgen.flex_row_end(export_button_column)
@@ -60,12 +62,36 @@ def render(self, run_id: str, status: str | None = None, **_kwargs) -> None:
]
status = testgen.toolbar_select(
options=status_options,
- default_value=status,
- required=True,
+ default_value=status or "Failures and Warnings",
+ required=False,
bind_to_query="status",
label="Result Status",
)
+ with test_type_filter_column:
+ test_type = testgen.toolbar_select(
+ options=get_test_types(),
+ value_column="test_type",
+ display_column="test_name_short",
+ default_value=test_type,
+ required=False,
+ bind_to_query="test_type",
+ label="Test Type",
+ )
+
+ with sort_column:
+ sortable_columns = (
+ ("Table Name", "r.table_name"),
+ ("Columns/Focus", "r.column_names"),
+ ("Test Type", "r.test_type"),
+ ("UOM", "tt.measure_uom"),
+ ("Result Measure", "result_measure"),
+ ("Status", "result_status"),
+ ("Action", "r.disposition"),
+ )
+ default = [(sortable_columns[i][1], "ASC") for i in (0, 1, 2)]
+ sorting_columns = testgen.sorting_selector(sortable_columns, default)
+
with actions_column:
str_help = "Toggle on to perform actions on multiple results"
do_multi_select = st.toggle("Multi-Select", help=str_help)
@@ -81,10 +107,17 @@ def render(self, run_id: str, status: str | None = None, **_kwargs) -> None:
status = "'Passed'"
# Display main grid and retrieve selection
- selected = show_result_detail(run_id, status, do_multi_select, export_button_column)
+ selected = show_result_detail(
+ run_id, status, test_type, sorting_columns, do_multi_select, export_button_column
+ )
# Need to render toolbar buttons after grid, so selection status is maintained
disable_dispo = True if not selected or status == "'Passed'" else False
+
+ affected_cached_functions = [get_test_disposition]
+ if "r.disposition" in dict(sorting_columns):
+ affected_cached_functions.append(get_test_results)
+
if actions_column.button(
"✓", help="Confirm this issue as relevant for this run", disabled=disable_dispo
):
@@ -92,7 +125,7 @@ def render(self, run_id: str, status: str | None = None, **_kwargs) -> None:
do_disposition_update(selected, "Confirmed"),
as_toast=True,
clear_cache=True,
- lst_cached_functions=[get_test_disposition],
+ lst_cached_functions=affected_cached_functions,
)
if actions_column.button(
"✘", help="Dismiss this issue as not relevant for this run", disabled=disable_dispo
@@ -101,7 +134,7 @@ def render(self, run_id: str, status: str | None = None, **_kwargs) -> None:
do_disposition_update(selected, "Dismissed"),
as_toast=True,
clear_cache=True,
- lst_cached_functions=[get_test_disposition],
+ lst_cached_functions=affected_cached_functions,
)
if actions_column.button(
"🔇", help="Mute this test to deactivate it for future runs", disabled=not selected
@@ -110,14 +143,14 @@ def render(self, run_id: str, status: str | None = None, **_kwargs) -> None:
do_disposition_update(selected, "Inactive"),
as_toast=True,
clear_cache=True,
- lst_cached_functions=[get_test_disposition],
+ lst_cached_functions=affected_cached_functions,
)
if actions_column.button("⟲", help="Clear action", disabled=not selected):
fm.reset_post_updates(
do_disposition_update(selected, "No Decision"),
as_toast=True,
clear_cache=True,
- lst_cached_functions=[get_test_disposition],
+ lst_cached_functions=affected_cached_functions,
)
# Help Links
@@ -142,20 +175,32 @@ def get_drill_test_run(str_test_run_id):
return df.at[0, "test_date"], df.at[0, "test_suite"], df.at[0, "project_code"]
+@st.cache_data(show_spinner=False)
+def get_test_types():
+ schema = st.session_state["dbschema"]
+ df = db.retrieve_data(f"SELECT test_type, test_name_short FROM {schema}.test_types")
+ return df
+
+
@st.cache_data(show_spinner="Retrieving Results")
-def get_test_results(str_run_id, str_sel_test_status):
+def get_test_results(str_run_id, str_sel_test_status, test_type_id, sorting_columns):
schema = st.session_state["dbschema"]
- return get_test_results_uncached(schema, str_run_id, str_sel_test_status)
+ return get_test_results_uncached(schema, str_run_id, str_sel_test_status, test_type_id, sorting_columns)
-def get_test_results_uncached(str_schema, str_run_id, str_sel_test_status):
+def get_test_results_uncached(str_schema, str_run_id, str_sel_test_status, test_type_id, sorting_columns):
# First visible row first, so multi-select checkbox will render
+ str_order_by = "ORDER BY " + (", ".join(" ".join(col) for col in sorting_columns)) if sorting_columns else ""
+ test_type_clause = f"AND r.test_type = '{test_type_id}'" if test_type_id else ""
+ status_clause = f" AND r.result_status IN ({str_sel_test_status})" if str_sel_test_status else ""
str_sql = f"""
WITH run_results
AS (SELECT *
FROM {str_schema}.test_results r
- WHERE r.test_run_id = '{str_run_id}'
- AND r.result_status IN ({str_sel_test_status})
+ WHERE
+ r.test_run_id = '{str_run_id}'
+ {status_clause}
+ {test_type_clause}
)
SELECT r.table_name,
p.project_name, ts.test_suite, tg.table_groups_name, cn.connection_name, cn.project_host, cn.sql_flavor,
@@ -214,7 +259,7 @@ def get_test_results_uncached(str_schema, str_run_id, str_sel_test_status):
LEFT JOIN {str_schema}.cat_test_conditions c
ON (cn.sql_flavor = c.sql_flavor
AND r.test_type = c.test_type)
- ORDER BY schema_name, table_name, column_names, test_type;
+ {str_order_by} ;
"""
df = db.retrieve_data(str_sql)
@@ -551,9 +596,9 @@ def show_test_def_detail(str_test_def_id):
)
-def show_result_detail(str_run_id, str_sel_test_status, do_multi_select, export_container):
+def show_result_detail(str_run_id, str_sel_test_status, test_type_id, sorting_columns, do_multi_select, export_container):
# Retrieve test results (always cached, action as null)
- df = get_test_results(str_run_id, str_sel_test_status)
+ df = get_test_results(str_run_id, str_sel_test_status, test_type_id, sorting_columns)
# Retrieve disposition action (cache refreshed)
df_action = get_test_disposition(str_run_id)
# Update action from disposition df
From 3b231e9c05aa28b5eb91b40e2326815917ad4b34 Mon Sep 17 00:00:00 2001
From: Ricardo Boni
Date: Mon, 16 Sep 2024 19:03:57 -0400
Subject: [PATCH 45/78] misc(ui): Addressing code review feedback
---
.../js/components/sorting_selector.js | 42 +++++++++++++------
.../ui/components/widgets/sorting_selector.py | 4 +-
2 files changed, 30 insertions(+), 16 deletions(-)
diff --git a/testgen/ui/components/frontend/js/components/sorting_selector.js b/testgen/ui/components/frontend/js/components/sorting_selector.js
index 64c1d04..926a173 100644
--- a/testgen/ui/components/frontend/js/components/sorting_selector.js
+++ b/testgen/ui/components/frontend/js/components/sorting_selector.js
@@ -29,7 +29,7 @@ const SortingSelector = (/** @type {Properties} */ props) => {
const columnLabel = columns.reduce((acc, [colLabel, colId]) => ({ ...acc, [colId]: colLabel}), {});
- Streamlit.setFrameHeight(130 + 30 * columns.length);
+ Streamlit.setFrameHeight(100 + 30 * columns.length);
const componentState = columns.reduce(
(state, [colLabel, colId]) => (
@@ -83,16 +83,16 @@ const SortingSelector = (/** @type {Properties} */ props) => {
selectedDiv.innerHTML = ``;
}
+ const externalComponentState = () => Object.entries(componentState).filter(
+ ([colId, colState]) => colState.val.order !== null
+ ).sort(
+ ([colIdA, colStateA], [colIdB, colStateB]) => colStateA.val.order - colStateB.val.order
+ ).map(
+ ([colId, colState]) => [colId, colState.val.direction]
+ )
+
const apply = () => {
- Streamlit.sendData(
- Object.entries(componentState).filter(
- ([colId, colState]) => colState.val.order !== null
- ).sort(
- ([colIdA, colStateA], [colIdB, colStateB]) => colStateA.val.order - colStateB.val.order
- ).map(
- ([colId, colState]) => [colId, colState.val.direction]
- )
- );
+ Streamlit.sendData(externalComponentState());
}
const columnItem = (colId) => {
@@ -120,6 +120,12 @@ const SortingSelector = (/** @type {Properties} */ props) => {
columns.map(([colLabel, colId]) => van.derive(() => columnItem(colId))),
)
+ const resetDisabled = () => Object.entries(componentState).filter(
+ ([colId, colState]) => colState.val.order != null
+ ).length === 0;
+
+ const applyDisabled = () => externalComponentState().toString() === (props.state.val || []).toString();
+
return div(
{ class: 'tg-sort-selector' },
div(
@@ -137,11 +143,15 @@ const SortingSelector = (/** @type {Properties} */ props) => {
div(
{ class: `tg-sort-selector--footer` },
button(
- { onclick: reset },
+ {
+ onclick: reset,
+ style: `color: var(--button-text-color);`,
+ disabled: van.derive(resetDisabled),
+ },
span(`Reset`),
),
button(
- { onclick: apply },
+ { onclick: apply, disabled: van.derive(applyDisabled) },
span(`Apply`),
)
)
@@ -189,7 +199,7 @@ stylesheet.replace(`
.tg-sort-selector--column-list {
border-bottom: 3px dotted var(--disabled-text-color);
- padding-bottom: 16px;
+ padding-bottom: 8px;
margin-bottom: 8px;
}
@@ -197,6 +207,7 @@ stylesheet.replace(`
text-align: right;
text-transform: uppercase;
font-size: 70%;
+ color: var(--secondary-text-color);
}
.tg-sort-selector--footer {
@@ -214,6 +225,11 @@ stylesheet.replace(`
border-radius: 5px;
}
+.tg-sort-selector--footer button[disabled] {
+ color: var(--disabled-text-color) !important;
+}
+
+
@media (prefers-color-scheme: dark) {
.tg-sort-selector--column-list button:hover {
background: #FFFFFF20;
diff --git a/testgen/ui/components/widgets/sorting_selector.py b/testgen/ui/components/widgets/sorting_selector.py
index 17ffa11..b81f932 100644
--- a/testgen/ui/components/widgets/sorting_selector.py
+++ b/testgen/ui/components/widgets/sorting_selector.py
@@ -4,7 +4,6 @@
from typing import Any
import streamlit as st
-from streamlit.runtime.scriptrunner import get_script_run_ctx
from testgen.ui.components.utils.component import component
from testgen.ui.navigation.router import Router
@@ -65,9 +64,8 @@ def sorting_selector(
state = None
- ctx = get_script_run_ctx()
try:
- state = ctx.session_state[key]
+ state = st.session_state[key]
except KeyError:
pass
From 1d8e310f69aa545cd65672faccc4f02fef9cb025 Mon Sep 17 00:00:00 2001
From: Ricardo Boni
Date: Mon, 16 Sep 2024 20:43:51 -0400
Subject: [PATCH 46/78] misc: Addressing code review feedback
---
testgen/ui/components/widgets/sorting_selector.py | 12 +++++-------
1 file changed, 5 insertions(+), 7 deletions(-)
diff --git a/testgen/ui/components/widgets/sorting_selector.py b/testgen/ui/components/widgets/sorting_selector.py
index b81f932..8b168f1 100644
--- a/testgen/ui/components/widgets/sorting_selector.py
+++ b/testgen/ui/components/widgets/sorting_selector.py
@@ -28,13 +28,11 @@ def _state_to_str(columns, state):
def _state_from_str(columns, state_str):
col_slug_to_id = {_slugfy(col_label): col_id for col_label, col_id in columns}
state_part_re = re.compile("".join(("(", "|".join(col_slug_to_id.keys()), r")\.(asc|desc)")))
- state = []
- try:
- for state_part in state_str.split("-"):
- if match := state_part_re.match(state_part):
- state.append([col_slug_to_id[match.group(1)], match.group(2).upper()])
- except Exception as e:
- return None
+ state = [
+ [col_slug_to_id[col_slug], direction.upper()]
+ for col_slug, direction
+ in state_part_re.findall(state_str)
+ ]
return state
From f27b085473feef3092a9f0127b9e09f5b9cfeea1 Mon Sep 17 00:00:00 2001
From: Aarthy Adityan
Date: Fri, 13 Sep 2024 12:07:21 -0400
Subject: [PATCH 47/78] feat(ui): add project and table groups summary to
overview page
---
testgen/ui/assets/style.css | 10 +-
testgen/ui/views/overview.py | 298 +++++++++++++++++++++++++++++++++--
2 files changed, 296 insertions(+), 12 deletions(-)
diff --git a/testgen/ui/assets/style.css b/testgen/ui/assets/style.css
index c3a39c5..3a7b50a 100644
--- a/testgen/ui/assets/style.css
+++ b/testgen/ui/assets/style.css
@@ -1,6 +1,7 @@
body {
--primary-color: #06a04a;
--link-color: #1976d2;
+ --error-color: #EF5350;
--primary-text-color: #000000de;
--secondary-text-color: #0000008a;
@@ -126,7 +127,6 @@ button[title="Show password text"] {
.element-container:has(iframe[height="0"][title="testgen.ui.components.utils.component.testgen"]) {
display: none !important;
}
-/* ... */
/* Cards Component */
[data-testid="stVerticalBlockBorderWrapper"]:has(> div > div[data-testid="stVerticalBlock"] > div.element-container > div.stHtml > i.testgen_card) {
@@ -172,6 +172,14 @@ button[title="Show password text"] {
gap: unset;
}
+/* Stylistic equivalent of st.caption("text") for customization
+Use as st.html('
text
') */
+.caption {
+ color: var(--caption-text-color);
+ font-size: 14px;
+ margin-bottom: 0;
+}
+
/* Tooltips */
[data-tooltip] {
position: relative;
diff --git a/testgen/ui/views/overview.py b/testgen/ui/views/overview.py
index 901fa7f..b500826 100644
--- a/testgen/ui/views/overview.py
+++ b/testgen/ui/views/overview.py
@@ -1,14 +1,18 @@
-import logging
import typing
+from datetime import datetime
+import pandas as pd
import streamlit as st
+import testgen.ui.services.database_service as db
+from testgen.common import date_service
from testgen.ui.components import widgets as testgen
from testgen.ui.navigation.menu import MenuItem
from testgen.ui.navigation.page import Page
+from testgen.ui.services import test_suite_service
from testgen.ui.session import session
-LOG = logging.getLogger("testgen")
+STALE_PROFILE_DAYS = 60
class OverviewPage(Page):
@@ -18,17 +22,289 @@ class OverviewPage(Page):
]
menu_item = MenuItem(icon="home", label="Overview", order=0)
- def render(self, **_kwargs):
+ def render(self, project_code: str | None = None, **_kwargs):
+ project_code = project_code or session.project
+ table_groups_df: pd.DataFrame = get_table_groups_summary(project_code)
+
testgen.page_header(
- "Welcome to DataOps TestGen",
+ "Project Overview",
"https://docs.datakitchen.io/article/dataops-testgen-help/introduction-to-dataops-testgen",
- )
+ )
+
+ render_project_summary(table_groups_df)
+
+ st.html(f'
Table Groups ({len(table_groups_df.index)})
')
+ for index, table_group in table_groups_df.iterrows():
+ render_table_group_card(table_group, project_code, index)
+
+
+def render_project_summary(table_groups: pd.DataFrame) -> None:
+ project_column, _ = st.columns([.5, .5])
+ with project_column:
+ with testgen.card():
+ summary_column, _ = st.columns([.8, .2])
+ # score_column, summary_column = st.columns([.5, .5])
+
+ # with score_column:
+ # st.caption("Project HIT score")
+ # st.metric(
+ # "Project HIT score",
+ # value=project_score,
+ # delta=project_score_delta or 0,
+ # label_visibility="collapsed",
+ # )
+
+ with summary_column:
+ st.caption("Project Summary")
+ st.html(f"""{len(table_groups.index)} table groups
+ {table_groups['latest_tests_suite_ct'].sum()} test suites
+ {table_groups['latest_tests_ct'].sum()} test definitions
+ """)
+
+
+@st.fragment
+def render_table_group_card(table_group: pd.Series, project_code: str, key: int) -> None:
+ with testgen.card(title=table_group["table_groups_name"]) as test_suite_card:
+
+ # Don't remove this
+ # For some reason, st.columns do not get completely removed from DOM when used conditionally within a fragment
+ # Without this CSS, the "hidden" elements in the expanded state take up space
+ testgen.no_flex_gap()
+
+ with test_suite_card.actions:
+ expand_toggle = testgen.expander_toggle(key=f"toggle_{key}")
+
+ profile_column, tests_column = st.columns([.5, .5])
+ # score_column, profile_column, tests_column = st.columns([.2, .35, .45])
+
+ # with score_column:
+ # st.caption("HIT score")
+ # st.metric(
+ # "HIT score",
+ # value=table_group["score"],
+ # delta=table_group["score_delta"] or 0,
+ # label_visibility="collapsed",
+ # )
+
+ with profile_column:
+ testgen.no_flex_gap()
+ is_stale = (datetime.utcnow() - table_group["latest_profile_start"]).days > STALE_PROFILE_DAYS
+ st.html(f"""
')
- # if (latest_generation := test_suite["latest_generation"]) and not pd.isnull(latest_generation):
- # generation_column.markdown(date_service.get_timezoned_timestamp(st.session_state, latest_generation))
- # else:
- # generation_column.markdown("--")
+ if (latest_generation := test_suite["latest_auto_gen_date"]) and not pd.isnull(latest_generation):
+ generation_column.html(f'
')
+ else:
+ generation_column.markdown("--")
latest_run_id = test_suite["latest_run_id"]
if latest_run_id and not pd.isnull(latest_run_id):
@@ -198,11 +200,12 @@ def render_test_suite_item(test_suite: pd.Series, column_spec: list[int], key: i
{ "label": "Passed", "value": int(test_suite["last_run_passed_ct"]), "color": "green" },
{ "label": "Warnings", "value": int(test_suite["last_run_warning_ct"]), "color": "yellow" },
{ "label": "Failed", "value": int(test_suite["last_run_failed_ct"]), "color": "red" },
- { "label": "Errors", "value": int(test_suite["last_run_error_ct"]), "color": "grey" },
+ { "label": "Errors", "value": int(test_suite["last_run_error_ct"]), "color": "brown" },
+ { "label": "Dismissed", "value": int(test_suite["last_run_dismissed_ct"]), "color": "grey" },
],
key=f"tests_{key}",
height=8,
- width=120,
+ width=200,
)
else:
run_column.markdown("--")
@@ -273,16 +276,50 @@ def get_table_groups_summary(project_code: str) -> pd.DataFrame:
),
latest_tests AS (
SELECT suites.table_groups_id,
- COUNT(*) as test_suite_ct,
- SUM(passed_ct) as passed_ct,
- SUM(warning_ct) as warning_ct,
- SUM(failed_ct) as failed_ct,
- SUM(error_ct) as error_ct
+ COUNT(DISTINCT latest_run.test_suite_id) as test_suite_ct,
+ COUNT(*) as test_ct,
+ SUM(
+ CASE
+ WHEN COALESCE(latest_results.disposition, 'Confirmed') = 'Confirmed'
+ AND latest_results.result_status = 'Passed' THEN 1
+ ELSE 0
+ END
+ ) as passed_ct,
+ SUM(
+ CASE
+ WHEN COALESCE(latest_results.disposition, 'Confirmed') = 'Confirmed'
+ AND latest_results.result_status = 'Warning' THEN 1
+ ELSE 0
+ END
+ ) as warning_ct,
+ SUM(
+ CASE
+ WHEN COALESCE(latest_results.disposition, 'Confirmed') = 'Confirmed'
+ AND latest_results.result_status = 'Failed' THEN 1
+ ELSE 0
+ END
+ ) as failed_ct,
+ SUM(
+ CASE
+ WHEN COALESCE(latest_results.disposition, 'Confirmed') = 'Confirmed'
+ AND latest_results.result_status = 'Error' THEN 1
+ ELSE 0
+ END
+ ) as error_ct,
+ SUM(
+ CASE
+ WHEN COALESCE(latest_results.disposition, 'Confirmed') IN ('Dismissed', 'Inactive') THEN 1
+ ELSE 0
+ END
+ ) as dismissed_ct
FROM latest_run_dates lrd
LEFT JOIN {schema}.test_runs latest_run ON (
lrd.test_suite_id = latest_run.test_suite_id
AND lrd.test_starttime = latest_run.test_starttime
)
+ LEFT JOIN {schema}.test_results latest_results ON (
+ latest_run.id = latest_results.test_run_id
+ )
LEFT JOIN {schema}.test_suites as suites ON (suites.id = lrd.test_suite_id)
GROUP BY suites.table_groups_id
)
@@ -297,11 +334,12 @@ def get_table_groups_summary(project_code: str) -> pd.DataFrame:
latest_profile.possible_ct as latest_anomalies_possible_ct,
latest_profile.dismissed_ct as latest_anomalies_dismissed_ct,
latest_tests.test_suite_ct as latest_tests_suite_ct,
- latest_tests.passed_ct + latest_tests.warning_ct + latest_tests.failed_ct + latest_tests.error_ct as latest_tests_ct,
+ latest_tests.test_ct as latest_tests_ct,
latest_tests.passed_ct as latest_tests_passed_ct,
latest_tests.warning_ct as latest_tests_warning_ct,
latest_tests.failed_ct as latest_tests_failed_ct,
- latest_tests.error_ct as latest_tests_error_ct
+ latest_tests.error_ct as latest_tests_error_ct,
+ latest_tests.dismissed_ct as latest_tests_dismissed_ct
FROM {schema}.table_groups as groups
LEFT JOIN latest_profile ON (groups.id = latest_profile.table_groups_id)
LEFT JOIN latest_tests ON (groups.id = latest_tests.table_groups_id)
diff --git a/testgen/ui/views/test_results.py b/testgen/ui/views/test_results.py
index 5fa415b..9c932f3 100644
--- a/testgen/ui/views/test_results.py
+++ b/testgen/ui/views/test_results.py
@@ -287,23 +287,57 @@ def get_test_disposition(str_run_id):
@st.cache_data(show_spinner=ALWAYS_SPIN)
-def get_test_result_summary(str_run_id):
- str_schema = st.session_state["dbschema"]
- str_sql = f"""
- SELECT passed_ct,
- warning_ct,
- failed_ct,
- COALESCE(error_ct, 0) as error_ct
- FROM {str_schema}.test_runs
- WHERE id = '{str_run_id}'::UUID;
+def get_test_result_summary(run_id):
+ schema = st.session_state["dbschema"]
+ sql = f"""
+ SELECT SUM(
+ CASE
+ WHEN COALESCE(test_results.disposition, 'Confirmed') = 'Confirmed'
+ AND test_results.result_status = 'Passed' THEN 1
+ ELSE 0
+ END
+ ) as passed_ct,
+ SUM(
+ CASE
+ WHEN COALESCE(test_results.disposition, 'Confirmed') = 'Confirmed'
+ AND test_results.result_status = 'Warning' THEN 1
+ ELSE 0
+ END
+ ) as warning_ct,
+ SUM(
+ CASE
+ WHEN COALESCE(test_results.disposition, 'Confirmed') = 'Confirmed'
+ AND test_results.result_status = 'Failed' THEN 1
+ ELSE 0
+ END
+ ) as failed_ct,
+ SUM(
+ CASE
+ WHEN COALESCE(test_results.disposition, 'Confirmed') = 'Confirmed'
+ AND test_results.result_status = 'Error' THEN 1
+ ELSE 0
+ END
+ ) as error_ct,
+ SUM(
+ CASE
+ WHEN COALESCE(test_results.disposition, 'Confirmed') IN ('Dismissed', 'Inactive') THEN 1
+ ELSE 0
+ END
+ ) as dismissed_ct
+ FROM {schema}.test_runs
+ LEFT JOIN {schema}.test_results ON (
+ test_runs.id = test_results.test_run_id
+ )
+ WHERE test_runs.id = '{run_id}'::UUID;
"""
- df = db.retrieve_data(str_sql)
+ df = db.retrieve_data(sql)
return [
{ "label": "Passed", "value": int(df.at[0, "passed_ct"]), "color": "green" },
{ "label": "Warnings", "value": int(df.at[0, "warning_ct"]), "color": "yellow" },
{ "label": "Failed", "value": int(df.at[0, "failed_ct"]), "color": "red" },
- { "label": "Errors", "value": int(df.at[0, "error_ct"]), "color": "grey" },
+ { "label": "Errors", "value": int(df.at[0, "error_ct"]), "color": "brown" },
+ { "label": "Dismissed", "value": int(df.at[0, "dismissed_ct"]), "color": "grey" },
]
diff --git a/testgen/ui/views/test_suites.py b/testgen/ui/views/test_suites.py
index caa497e..84da279 100644
--- a/testgen/ui/views/test_suites.py
+++ b/testgen/ui/views/test_suites.py
@@ -124,10 +124,11 @@ def render(self, project_code: str | None = None, table_group_id: str | None = N
{ "label": "Passed", "value": int(test_suite["last_run_passed_ct"]), "color": "green" },
{ "label": "Warnings", "value": int(test_suite["last_run_warning_ct"]), "color": "yellow" },
{ "label": "Failed", "value": int(test_suite["last_run_failed_ct"]), "color": "red" },
- { "label": "Errors", "value": int(test_suite["last_run_error_ct"]), "color": "grey" },
+ { "label": "Errors", "value": int(test_suite["last_run_error_ct"]), "color": "brown" },
+ { "label": "Dismissed", "value": int(test_suite["last_run_dismissed_ct"]), "color": "grey" },
],
- height=30,
- width=100,
+ height=20,
+ width=350,
key=f"test_suite:keys:run-rummary:{test_suite['id']}",
)
From 772a512daea940ccc7fdcd20d6288331631cfd7f Mon Sep 17 00:00:00 2001
From: Ricardo Boni
Date: Tue, 17 Sep 2024 13:13:46 -0400
Subject: [PATCH 49/78] fix(test results): sorting and filtering should be
optional for the CLI
---
testgen/ui/views/test_results.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/testgen/ui/views/test_results.py b/testgen/ui/views/test_results.py
index 9c932f3..cc8962c 100644
--- a/testgen/ui/views/test_results.py
+++ b/testgen/ui/views/test_results.py
@@ -188,7 +188,7 @@ def get_test_results(str_run_id, str_sel_test_status, test_type_id, sorting_colu
return get_test_results_uncached(schema, str_run_id, str_sel_test_status, test_type_id, sorting_columns)
-def get_test_results_uncached(str_schema, str_run_id, str_sel_test_status, test_type_id, sorting_columns):
+def get_test_results_uncached(str_schema, str_run_id, str_sel_test_status, test_type_id=None, sorting_columns=None):
# First visible row first, so multi-select checkbox will render
str_order_by = "ORDER BY " + (", ".join(" ".join(col) for col in sorting_columns)) if sorting_columns else ""
test_type_clause = f"AND r.test_type = '{test_type_id}'" if test_type_id else ""
From 2d73a57ba5173e9df1633e2e047d106cfdb08af6 Mon Sep 17 00:00:00 2001
From: Aarthy Adityan
Date: Tue, 17 Sep 2024 00:38:29 -0400
Subject: [PATCH 50/78] feat(ui): bind grid selection to query params on result
pages
---
testgen/ui/services/form_service.py | 28 ++++++++++++++++++++++---
testgen/ui/views/profiling_anomalies.py | 8 +++++--
testgen/ui/views/profiling_results.py | 6 +++++-
testgen/ui/views/test_results.py | 9 ++++++--
4 files changed, 43 insertions(+), 8 deletions(-)
diff --git a/testgen/ui/services/form_service.py b/testgen/ui/services/form_service.py
index a1a56de..e1810f3 100644
--- a/testgen/ui/services/form_service.py
+++ b/testgen/ui/services/form_service.py
@@ -19,6 +19,7 @@
import testgen.common.date_service as date_service
import testgen.ui.services.authentication_service as authentication_service
import testgen.ui.services.database_service as db
+from testgen.ui.navigation.router import Router
"""
Shared rendering of UI elements
@@ -766,8 +767,10 @@ def render_grid_select(
str_prompt=None,
int_height=400,
do_multi_select=False,
+ bind_to_query=None,
show_column_headers=None,
render_highlights=True,
+ key="aggrid",
):
show_prompt(str_prompt)
@@ -841,7 +844,18 @@ def render_grid_select(
gb = GridOptionsBuilder.from_dataframe(df)
selection_mode = "multiple" if do_multi_select else "single"
- gb.configure_selection(selection_mode=selection_mode, use_checkbox=do_multi_select)
+
+ pre_selected_rows = None
+ if bind_to_query:
+ query_value = st.query_params.get(bind_to_query)
+ # Workaround for this open issue: https://github.com/PablocFonseca/streamlit-aggrid/issues/207#issuecomment-1793039564
+ pre_selected_rows = { query_value: True } if isinstance(query_value, str) and query_value.isdigit() else None
+
+ gb.configure_selection(
+ selection_mode=selection_mode,
+ use_checkbox=do_multi_select,
+ pre_selected_rows=pre_selected_rows,
+ )
all_columns = list(df.columns)
@@ -896,10 +910,18 @@ def render_grid_select(
"padding-bottom": "0px !important",
}
},
+ # Key is needed for query binding to work
+ # Changing selection mode does not work if same key is used for both modes
+ key=f"{key}_{selection_mode}",
)
- if len(grid_data["selected_rows"]):
- return grid_data["selected_rows"]
+ selected_rows = grid_data["selected_rows"]
+ if bind_to_query:
+ Router().set_query_params({
+ bind_to_query: selected_rows[0].get("_selectedRowNodeInfo", {}).get("nodeRowIndex") if len(selected_rows) else None,
+ })
+ if len(selected_rows):
+ return selected_rows
def render_logo(logo_path: str = logo_file):
diff --git a/testgen/ui/views/profiling_anomalies.py b/testgen/ui/views/profiling_anomalies.py
index 4372902..ab1bfcc 100644
--- a/testgen/ui/views/profiling_anomalies.py
+++ b/testgen/ui/views/profiling_anomalies.py
@@ -127,7 +127,11 @@ def render(self, run_id: str, issue_class: str | None = None, issue_type: str |
# Show main grid and retrieve selections
selected = fm.render_grid_select(
- df_pa, lst_show_columns, int_height=400, do_multi_select=do_multi_select
+ df_pa,
+ lst_show_columns,
+ int_height=400,
+ do_multi_select=do_multi_select,
+ bind_to_query="selected",
)
with export_button_column:
@@ -149,7 +153,7 @@ def render(self, run_id: str, issue_class: str | None = None, issue_type: str |
if selected:
# Always show details for last selected row
- selected_row = selected[len(selected) - 1]
+ selected_row = selected[0]
else:
selected_row = None
diff --git a/testgen/ui/views/profiling_results.py b/testgen/ui/views/profiling_results.py
index 896631b..bd134d9 100644
--- a/testgen/ui/views/profiling_results.py
+++ b/testgen/ui/views/profiling_results.py
@@ -98,7 +98,11 @@ def render(self, run_id: str, table_name: str | None = None, column_name: str |
with st.expander("📜 **Table CREATE script with suggested datatypes**"):
st.code(generate_create_script(df), "sql")
- selected_row = fm.render_grid_select(df, show_columns)
+ selected_row = fm.render_grid_select(
+ df,
+ show_columns,
+ bind_to_query="selected",
+ )
with export_button_column:
testgen.flex_row_end()
diff --git a/testgen/ui/views/test_results.py b/testgen/ui/views/test_results.py
index cc8962c..67e49a5 100644
--- a/testgen/ui/views/test_results.py
+++ b/testgen/ui/views/test_results.py
@@ -660,7 +660,12 @@ def show_result_detail(str_run_id, str_sel_test_status, test_type_id, sorting_co
]
selected_rows = fm.render_grid_select(
- df, lst_show_columns, do_multi_select=do_multi_select, show_column_headers=lst_show_headers
+ df,
+ lst_show_columns,
+ do_multi_select=do_multi_select,
+ show_column_headers=lst_show_headers,
+ key="grid:test-results",
+ bind_to_query="selected",
)
with export_container:
@@ -705,7 +710,7 @@ def show_result_detail(str_run_id, str_sel_test_status, test_type_id, sorting_co
if not selected_rows:
st.markdown(":orange[Select a record to see more information.]")
else:
- selected_row = selected_rows[len(selected_rows) - 1]
+ selected_row = selected_rows[0]
dfh = get_test_result_history(
selected_row["test_type"],
selected_row["test_suite_id"],
From cd837623de640cacd83cb7118ff143c1f5658ed2 Mon Sep 17 00:00:00 2001
From: Luis Trinidad
Date: Wed, 18 Sep 2024 07:22:39 -0400
Subject: [PATCH 51/78] fix(test definitions): guard against null dynamic
attribute labels
---
testgen/ui/views/test_definitions.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/testgen/ui/views/test_definitions.py b/testgen/ui/views/test_definitions.py
index ee25183..b43e1e8 100644
--- a/testgen/ui/views/test_definitions.py
+++ b/testgen/ui/views/test_definitions.py
@@ -312,7 +312,9 @@ def show_test_form(
dynamic_attributes = dynamic_attributes_raw.split(",")
dynamic_attributes_labels_raw = selected_test_type_row["default_parm_prompts"]
- dynamic_attributes_labels = dynamic_attributes_labels_raw.split(",")
+ dynamic_attributes_labels = ""
+ if dynamic_attributes_labels_raw:
+ dynamic_attributes_labels = dynamic_attributes_labels_raw.split(",")
dynamic_attributes_help_raw = selected_test_type_row["default_parm_help"]
if not dynamic_attributes_help_raw:
From 87bb2499c14eff5b9b5e87fe78f2a8bd75f790d6 Mon Sep 17 00:00:00 2001
From: Luis Trinidad
Date: Wed, 18 Sep 2024 07:23:43 -0400
Subject: [PATCH 52/78] fix(test definitions): remove extra empty option from
add dialog dropdown
---
testgen/ui/views/test_definitions.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/testgen/ui/views/test_definitions.py b/testgen/ui/views/test_definitions.py
index b43e1e8..463d8df 100644
--- a/testgen/ui/views/test_definitions.py
+++ b/testgen/ui/views/test_definitions.py
@@ -642,7 +642,7 @@ def prompt_for_test_type():
df = run_test_type_lookup_query(str_test_type=None, boo_show_referential=boo_show_referential,
boo_show_table=boo_show_table, boo_show_column=boo_show_column,
boo_show_custom=boo_show_custom)
- lst_choices = ["(Select a Test Type)", *df["select_name"].tolist()]
+ lst_choices = df["select_name"].tolist()
str_selected = selectbox("Test Type", lst_choices)
if str_selected:
From 7c03eaed4184aa913ee40907aa40e2ec633394e9 Mon Sep 17 00:00:00 2001
From: Aarthy Adityan
Date: Wed, 18 Sep 2024 13:35:07 -0400
Subject: [PATCH 53/78] feat(ui): add paginator component
---
.../frontend/js/components/paginator.js | 111 ++++++++++++++++++
testgen/ui/components/frontend/js/main.js | 2 +
.../ui/components/widgets/expander_toggle.py | 5 -
testgen/ui/components/widgets/paginator.py | 25 ++++
4 files changed, 138 insertions(+), 5 deletions(-)
create mode 100644 testgen/ui/components/frontend/js/components/paginator.js
create mode 100644 testgen/ui/components/widgets/paginator.py
diff --git a/testgen/ui/components/frontend/js/components/paginator.js b/testgen/ui/components/frontend/js/components/paginator.js
new file mode 100644
index 0000000..7c839a2
--- /dev/null
+++ b/testgen/ui/components/frontend/js/components/paginator.js
@@ -0,0 +1,111 @@
+/**
+ * @typedef Properties
+ * @type {object}
+ * @property {number} count
+ * @property {number} pageSize
+ * @property {number} pageIndex
+ */
+import van from '../van.min.js';
+import { Streamlit } from '../streamlit.js';
+
+const { div, span, i, button } = van.tags;
+
+const Paginator = (/** @type Properties */ props) => {
+ const count = props.count.val;
+ const pageSize = props.pageSize.val;
+
+ Streamlit.setFrameHeight(32);
+
+ if (!window.testgen.loadedStylesheets.expanderToggle) {
+ document.adoptedStyleSheets.push(stylesheet);
+ window.testgen.loadedStylesheets.expanderToggle = true;
+ }
+
+ const pageIndexState = van.state(props.pageIndex.val || 0);
+
+ return div(
+ { class: 'tg-paginator' },
+ span(
+ { class: 'tg-paginator--label' },
+ () => {
+ const pageIndex = pageIndexState.val;
+ return `${pageSize * pageIndex + 1} - ${Math.min(count, pageSize * (pageIndex + 1))} of ${count}`
+ },
+ ),
+ button(
+ {
+ class: 'tg-paginator--button',
+ onclick: () => {
+ pageIndexState.val = 0;
+ Streamlit.sendData(pageIndexState.val);
+ },
+ disabled: () => pageIndexState.val === 0,
+ },
+ i({class: 'material-symbols-rounded'}, 'first_page')
+ ),
+ button(
+ {
+ class: 'tg-paginator--button',
+ onclick: () => {
+ pageIndexState.val--;
+ Streamlit.sendData(pageIndexState.val);
+ },
+ disabled: () => pageIndexState.val === 0,
+ },
+ i({class: 'material-symbols-rounded'}, 'chevron_left')
+ ),
+ button(
+ {
+ class: 'tg-paginator--button',
+ onclick: () => {
+ pageIndexState.val++;
+ Streamlit.sendData(pageIndexState.val);
+ },
+ disabled: () => pageIndexState.val === Math.ceil(count / pageSize) - 1,
+ },
+ i({class: 'material-symbols-rounded'}, 'chevron_right')
+ ),
+ button(
+ {
+ class: 'tg-paginator--button',
+ onclick: () => {
+ pageIndexState.val = Math.ceil(count / pageSize) - 1;
+ Streamlit.sendData(pageIndexState.val);
+ },
+ disabled: () => pageIndexState.val === Math.ceil(count / pageSize) - 1,
+ },
+ i({class: 'material-symbols-rounded'}, 'last_page')
+ ),
+ );
+};
+
+const stylesheet = new CSSStyleSheet();
+stylesheet.replace(`
+.tg-paginator {
+ display: flex;
+ flex-direction: row;
+ align-items: center;
+ justify-content: flex-end;
+}
+
+.tg-paginator--label {
+ margin-right: 20px;
+ color: var(--secondary-text-color);
+}
+
+.tg-paginator--button {
+ background-color: transparent;
+ border: none;
+ height: 32px;
+ padding: 4px;
+ color: var(--secondary-text-color);
+ cursor: pointer;
+}
+
+.tg-paginator--button[disabled] {
+ color: var(--disabled-text-color);
+ cursor: default;
+}
+`);
+
+export { Paginator };
diff --git a/testgen/ui/components/frontend/js/main.js b/testgen/ui/components/frontend/js/main.js
index 3c56cdd..bf8bc4b 100644
--- a/testgen/ui/components/frontend/js/main.js
+++ b/testgen/ui/components/frontend/js/main.js
@@ -11,6 +11,7 @@ import { Button } from './components/button.js'
import { Breadcrumbs } from './components/breadcrumbs.js'
import { ExpanderToggle } from './components/expander_toggle.js';
import { Link } from './components/link.js';
+import { Paginator } from './components/paginator.js';
import { Select } from './components/select.js'
import { SummaryBar } from './components/summary_bar.js';
import { SortingSelector } from './components/sorting_selector.js';
@@ -24,6 +25,7 @@ const TestGenComponent = (/** @type {string} */ id, /** @type {object} */ props)
button: Button,
expander_toggle: ExpanderToggle,
link: Link,
+ paginator: Paginator,
select: Select,
sorting_selector: SortingSelector,
sidebar: window.top.testgen.components.Sidebar,
diff --git a/testgen/ui/components/widgets/expander_toggle.py b/testgen/ui/components/widgets/expander_toggle.py
index 16e1bf3..21f6dcb 100644
--- a/testgen/ui/components/widgets/expander_toggle.py
+++ b/testgen/ui/components/widgets/expander_toggle.py
@@ -1,11 +1,7 @@
-import logging
-
import streamlit as st
from testgen.ui.components.utils.component import component
-LOG = logging.getLogger("testgen")
-
def expander_toggle(
default: bool = False,
@@ -22,7 +18,6 @@ def expander_toggle(
:param collapse_label: label for expanded state, default="Collapse"
:param key: unique key to give the component a persisting state
"""
- LOG.debug(key)
if key in st.session_state:
default = st.session_state[key]
diff --git a/testgen/ui/components/widgets/paginator.py b/testgen/ui/components/widgets/paginator.py
new file mode 100644
index 0000000..8c1e4c7
--- /dev/null
+++ b/testgen/ui/components/widgets/paginator.py
@@ -0,0 +1,25 @@
+from testgen.ui.components.utils.component import component
+
+
+def paginator(
+ count: int,
+ page_size: int,
+ page_index: int = 0,
+ key: str = "testgen:paginator",
+) -> bool:
+ """
+ Testgen component to display pagination arrows.
+
+ # Parameters
+ :param count: total number of items being paginated
+ :param page_size: number of items displayed per page
+ :param page_index: index of initial page displayed, default=0 (first page)
+ :param key: unique key to give the component a persisting state
+ """
+
+ return component(
+ id_="paginator",
+ key=key,
+ default=page_index,
+ props={"count": count, "pageSize": page_size, "pageIndex": page_index},
+ )
From 858419f83fbc1e145e9ab68f4f7d021e85c2b229 Mon Sep 17 00:00:00 2001
From: Aarthy Adityan
Date: Wed, 18 Sep 2024 13:36:12 -0400
Subject: [PATCH 54/78] misc(ui): updates to components and widgets
---
testgen/ui/assets/style.css | 26 +++++++++++++------
testgen/ui/components/frontend/css/shared.css | 13 +++++++++-
.../frontend/js/components/button.js | 2 ++
testgen/ui/components/widgets/__init__.py | 5 ++++
testgen/ui/components/widgets/button.py | 4 +++
testgen/ui/components/widgets/card.py | 2 +-
testgen/ui/components/widgets/page.py | 26 +++++++++++++++----
7 files changed, 63 insertions(+), 15 deletions(-)
diff --git a/testgen/ui/assets/style.css b/testgen/ui/assets/style.css
index ed5b605..25b829e 100644
--- a/testgen/ui/assets/style.css
+++ b/testgen/ui/assets/style.css
@@ -3,6 +3,15 @@ body {
--link-color: #1976d2;
--error-color: #EF5350;
+ --red: #EF5350;
+ --orange: #FF9800;
+ --yellow: #FDD835;
+ --green: #9CCC65;
+ --purple: #AB47BC;
+ --blue: #42A5F5;
+ --brown: #8D6E63;
+ --grey: #BDBDBD;
+
--primary-text-color: #000000de;
--secondary-text-color: #0000008a;
--disabled-text-color: #00000042;
@@ -129,10 +138,6 @@ button[title="Show password text"] {
}
/* Cards Component */
-[data-testid="stVerticalBlockBorderWrapper"]:has(> div > div[data-testid="stVerticalBlock"] > div.element-container > div.stHtml > i.testgen_card) {
- background-color: var(--dk-card-background);
-}
-
[data-testid="stVerticalBlockBorderWrapper"]:has(> div > div[data-testid="stVerticalBlock"] > div.element-container > div.stHtml > i.testgen_card) .testgen_card-header > .testgen_card-title {
margin: unset;
padding: unset;
@@ -149,6 +154,10 @@ button[title="Show password text"] {
}
/* ... */
+[data-testid="stVerticalBlockBorderWrapper"]:has(> div > div[data-testid="stVerticalBlock"] > div.element-container > div.stHtml > i.bg-white) {
+ background-color: var(--dk-card-background);
+}
+
[data-testid="column"]:has(> div[data-testid="stVerticalBlockBorderWrapper"] > div > div[data-testid="stVerticalBlock"] > div.element-container > div.stHtml > i.flex-row) [data-testid="stVerticalBlock"] {
width: 100%;
flex-direction: row;
@@ -173,19 +182,20 @@ button[title="Show password text"] {
}
/* Stylistic equivalent of st.caption("text") for customization
-Use as st.html('
text
') */
+Use as testgen.caption("text", "extra_styles") */
.caption {
color: var(--caption-text-color);
font-size: 14px;
margin-bottom: 0;
}
-/* Stylistic equivalent of testgen.link() to match font style of links
-Use as st.html('
")
+ else:
+ st.markdown("--")
def get_table_groups_summary(project_code: str) -> pd.DataFrame:
diff --git a/testgen/ui/views/test_suites.py b/testgen/ui/views/test_suites.py
index 84da279..0024de7 100644
--- a/testgen/ui/views/test_suites.py
+++ b/testgen/ui/views/test_suites.py
@@ -12,11 +12,13 @@
from testgen.commands.run_execute_tests import run_execution_steps_in_background
from testgen.commands.run_generate_tests import run_test_gen_queries
from testgen.commands.run_observability_exporter import export_test_results
+from testgen.common import date_service
from testgen.ui.components import widgets as testgen
from testgen.ui.navigation.menu import MenuItem
from testgen.ui.navigation.page import Page
from testgen.ui.services.string_service import empty_if_null
from testgen.ui.session import session
+from testgen.utils import to_int
class TestSuitesPage(Page):
@@ -66,7 +68,7 @@ def render(self, project_code: str | None = None, table_group_id: str | None = N
testgen.button(
type_="icon",
icon="output",
- tooltip="Export results to observability",
+ tooltip="Export results to Observability",
tooltip_position="right",
on_click=partial(observability_export_dialog, test_suite),
key=f"test_suite:keys:export:{test_suite['id']}",
@@ -91,46 +93,46 @@ def render(self, project_code: str | None = None, table_group_id: str | None = N
main_section, latest_run_section, actions_section = st.columns([.4, .4, .2])
with main_section:
+ testgen.no_flex_gap()
testgen.link(
- label=f"{test_suite['last_run_test_ct']} tests definitions",
+ label=f"{to_int(test_suite['last_run_test_ct'])} tests definitions",
href="test-suites:definitions",
params={ "test_suite_id": test_suite["id"] },
right_icon="chevron_right",
key=f"test_suite:keys:go-to-definitions:{test_suite['id']}",
)
- st.html(f"""
-
-
Description
-
{test_suite['test_suite_description']}
-
- """)
-
- if (latest_run_start := test_suite["latest_run_start"]) and not pd.isnull(latest_run_start):
- with latest_run_section:
- testgen.no_flex_gap()
- st.html('
Latest Run
')
+ testgen.caption("Description")
+ st.markdown(test_suite["test_suite_description"] or "--")
+
+ with latest_run_section:
+ testgen.no_flex_gap()
+ st.caption("Latest Run")
+
+ if (latest_run_start := test_suite["latest_run_start"]) and pd.notnull(latest_run_start):
testgen.link(
- label=latest_run_start.strftime("%B %d, %H:%M %p"),
+ label=date_service.get_timezoned_timestamp(st.session_state, latest_run_start),
href="test-runs:results",
params={ "run_id": str(test_suite["latest_run_id"]) },
- right_icon="chevron_right",
style="margin-bottom: 8px;",
height=29,
key=f"test_suite:keys:go-to-runs:{test_suite['id']}",
)
- testgen.summary_bar(
- items=[
- { "label": "Passed", "value": int(test_suite["last_run_passed_ct"]), "color": "green" },
- { "label": "Warnings", "value": int(test_suite["last_run_warning_ct"]), "color": "yellow" },
- { "label": "Failed", "value": int(test_suite["last_run_failed_ct"]), "color": "red" },
- { "label": "Errors", "value": int(test_suite["last_run_error_ct"]), "color": "brown" },
- { "label": "Dismissed", "value": int(test_suite["last_run_dismissed_ct"]), "color": "grey" },
- ],
- height=20,
- width=350,
- key=f"test_suite:keys:run-rummary:{test_suite['id']}",
- )
+ if to_int(test_suite["last_run_test_ct"]):
+ testgen.summary_bar(
+ items=[
+ { "label": "Passed", "value": to_int(test_suite["last_run_passed_ct"]), "color": "green" },
+ { "label": "Warnings", "value": to_int(test_suite["last_run_warning_ct"]), "color": "yellow" },
+ { "label": "Failed", "value": to_int(test_suite["last_run_failed_ct"]), "color": "red" },
+ { "label": "Errors", "value": to_int(test_suite["last_run_error_ct"]), "color": "brown" },
+ { "label": "Dismissed", "value": to_int(test_suite["last_run_dismissed_ct"]), "color": "grey" },
+ ],
+ height=20,
+ width=350,
+ key=f"test_suite:keys:run-rummary:{test_suite['id']}",
+ )
+ else:
+ st.markdown("--")
with actions_section:
testgen.button(
From 68e829adfc028d598383c2d98926c4f53d5d322a Mon Sep 17 00:00:00 2001
From: Luis Trinidad
Date: Tue, 17 Sep 2024 11:09:36 -0400
Subject: [PATCH 59/78] misc(ui): add widget to wrap streamlit's native dialog
Streamlit native dialog caches usage of a decorated function by the
function's making then unusuable for list details
dialogs. The new wrapper widget assigns an 8 character random string as
a suffix to the function .
---
testgen/ui/components/widgets/__init__.py | 1 +
testgen/ui/components/widgets/dialog.py | 44 +++++++++++++++++++++++
2 files changed, 45 insertions(+)
create mode 100644 testgen/ui/components/widgets/dialog.py
diff --git a/testgen/ui/components/widgets/__init__.py b/testgen/ui/components/widgets/__init__.py
index eba62b7..243355c 100644
--- a/testgen/ui/components/widgets/__init__.py
+++ b/testgen/ui/components/widgets/__init__.py
@@ -3,6 +3,7 @@
from testgen.ui.components.widgets.breadcrumbs import breadcrumbs
from testgen.ui.components.widgets.button import button
from testgen.ui.components.widgets.card import card
+from testgen.ui.components.widgets.dialog import dialog
from testgen.ui.components.widgets.expander_toggle import expander_toggle
from testgen.ui.components.widgets.link import link
from testgen.ui.components.widgets.page import (
diff --git a/testgen/ui/components/widgets/dialog.py b/testgen/ui/components/widgets/dialog.py
new file mode 100644
index 0000000..6c2c4e9
--- /dev/null
+++ b/testgen/ui/components/widgets/dialog.py
@@ -0,0 +1,44 @@
+import functools
+import random
+import string
+import typing
+
+import streamlit as st
+from streamlit.elements.lib.dialog import DialogWidth
+
+
+def dialog(title: str, *, width: DialogWidth = "small", key: str | None = None) -> typing.Callable:
+ """
+ Wrap Streamlit's native dialog to avoid passing parameters that will
+ be ignored during the fragment's re-run.
+ """
+ dialog_contents: typing.Callable = lambda: None
+
+ def render_dialog() -> typing.Any:
+ args = []
+ kwargs = {}
+ if key:
+ args, kwargs = st.session_state[key]
+ return dialog_contents(*args, **kwargs)
+
+ name_suffix = "".join(random.choices(string.ascii_lowercase, k=8)) # noqa: S311
+
+ # NOTE: st.dialog uses __qualname__ to generate the fragment hash, effectively overshadowing the uniqueness of the
+ # render_dialog() function.
+ render_dialog.__name__ = f"render_dialog_{name_suffix}"
+ render_dialog.__qualname__ = render_dialog.__qualname__.replace("render_dialog", render_dialog.__name__)
+
+ render_dialog = st.dialog(title=title, width=width)(render_dialog)
+
+ def decorator(func: typing.Callable) -> typing.Callable:
+ nonlocal dialog_contents
+ dialog_contents = func
+
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ if key:
+ st.session_state[key] = (args, kwargs)
+ render_dialog()
+ return wrapper
+
+ return decorator
From aca2b2e17bacc0a7916378c10d4458caab419dd0 Mon Sep 17 00:00:00 2001
From: Luis Trinidad
Date: Tue, 17 Sep 2024 11:10:39 -0400
Subject: [PATCH 60/78] fix(ui): return the custom button value from the widget
function
---
testgen/ui/components/widgets/button.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/testgen/ui/components/widgets/button.py b/testgen/ui/components/widgets/button.py
index dba2fe5..a78bc0d 100644
--- a/testgen/ui/components/widgets/button.py
+++ b/testgen/ui/components/widgets/button.py
@@ -15,7 +15,7 @@ def button(
on_click: typing.Callable[..., None] | None = None,
style: str | None = None,
key: str | None = None,
-) -> None:
+) -> typing.Any:
"""
Testgen component to create custom styled buttons.
@@ -40,4 +40,4 @@ def button(
if style:
props.update({"style": style})
- component(id_="button", key=key, props=props, on_change=on_click)
+ return component(id_="button", key=key, props=props, on_change=on_click)
From c61156c44d0643dd462c77ac30539c49b0158a16 Mon Sep 17 00:00:00 2001
From: Luis Trinidad
Date: Tue, 17 Sep 2024 11:13:23 -0400
Subject: [PATCH 61/78] refactor(ui): add new methods to the connections page
Add methods to the connections page object for better customizability.
---
testgen/ui/views/connections.py | 406 ++++++++++++++++++++++++++-
testgen/ui/views/connections_base.py | 360 ------------------------
2 files changed, 399 insertions(+), 367 deletions(-)
delete mode 100644 testgen/ui/views/connections_base.py
diff --git a/testgen/ui/views/connections.py b/testgen/ui/views/connections.py
index 31e50b6..3fc44dc 100644
--- a/testgen/ui/views/connections.py
+++ b/testgen/ui/views/connections.py
@@ -1,14 +1,19 @@
+import dataclasses
import logging
+import os
+import time
import typing
import streamlit as st
+import testgen.ui.services.database_service as db
+from testgen.commands.run_setup_profiling_tools import get_setup_profiling_tools_queries
+from testgen.common.database.database_service import empty_cache
from testgen.ui.components import widgets as testgen
from testgen.ui.navigation.menu import MenuItem
from testgen.ui.navigation.page import Page
-from testgen.ui.services import connection_service
+from testgen.ui.services import authentication_service, connection_service
from testgen.ui.session import session
-from testgen.ui.views.connections_base import create_qc_schema_dialog, show_connection_form
LOG = logging.getLogger("testgen")
@@ -34,16 +39,14 @@ def render(self, project_code: str, **_kwargs) -> None:
enable_table_groups = connection["project_host"] and connection["project_db"] and connection["project_qc_schema"]
- form_container = st.expander("", expanded=True)
- with form_container:
- mode = "edit"
- show_connection_form(connection, mode, project_code)
+ with st.container(border=True):
+ self.show_connection_form(connection, "edit", project_code)
if actions_column.button(
"Configure QC Utility Schema",
help="Creates the required Utility schema and related functions in the target database",
):
- create_qc_schema_dialog(connection)
+ self.create_qc_schema_dialog(connection)
if actions_column.button(
f":{'gray' if not enable_table_groups else 'green'}[Table Groups →]",
@@ -53,3 +56,392 @@ def render(self, project_code: str, **_kwargs) -> None:
"connections:table-groups",
{"connection_id": connection["connection_id"]},
)
+
+ @testgen.dialog(title="Configure QC Utility Schema", key="config_qc_dialog_args")
+ def create_qc_schema_dialog(self, selected_connection):
+ connection_id = selected_connection["connection_id"]
+ project_qc_schema = selected_connection["project_qc_schema"]
+ sql_flavor = selected_connection["sql_flavor"]
+ user = selected_connection["project_user"]
+
+ create_qc_schema = st.toggle("Create QC Utility Schema", value=True)
+ grant_privileges = st.toggle("Grant access privileges to TestGen user", value=True)
+
+ user_role = None
+
+ # TODO ALEX: This textbox may be needed if we want to grant permissions to user role
+ # if sql_flavor == "snowflake":
+ # user_role_textbox_label = f"Primary role for database user {user}"
+ # user_role = st.text_input(label=user_role_textbox_label, max_chars=100)
+
+ admin_credentials_expander = st.expander("Admin credential options", expanded=True)
+ with admin_credentials_expander:
+ admin_connection_option_index = 0
+ admin_connection_options = ["Do not use admin credentials", "Use admin credentials with Password"]
+ if sql_flavor == "snowflake":
+ admin_connection_options.append("Use admin credentials with Key-Pair")
+
+ admin_connection_option = st.radio(
+ "Admin credential options",
+ label_visibility="hidden",
+ options=admin_connection_options,
+ index=admin_connection_option_index,
+ horizontal=True,
+ )
+
+ st.markdown(" ", unsafe_allow_html=True)
+
+ db_user = None
+ db_password = None
+ admin_private_key_passphrase = None
+ admin_private_key = None
+ if admin_connection_option == admin_connection_options[0]:
+ st.markdown(":orange[User created in the connection dialog will be used.]")
+ else:
+ db_user = st.text_input(label="Admin db user", max_chars=40)
+ if admin_connection_option == admin_connection_options[1]:
+ db_password = st.text_input(
+ label="Admin db password", max_chars=40, type="password"
+ )
+ st.markdown(":orange[Note: Admin credentials are not stored, are only used for this operation.]")
+
+ if len(admin_connection_options) > 2 and admin_connection_option == admin_connection_options[2]:
+ admin_private_key_passphrase = st.text_input(
+ label="Private Key Passphrase",
+ key="create-qc-schema-private-key-password",
+ type="password",
+ max_chars=200,
+ help="Passphrase used while creating the private Key (leave empty if not applicable)",
+ )
+
+ admin_uploaded_file = st.file_uploader("Upload private key (rsa_key.p8)", key="admin-uploaded-file")
+ if admin_uploaded_file:
+ admin_private_key = admin_uploaded_file.getvalue().decode("utf-8")
+
+ st.markdown(":orange[Note: Admin credentials are not stored, are only used for this operation.]")
+
+ submit = st.button("Update Configuration")
+
+ if submit:
+ empty_cache()
+ script_expander = st.expander("Script Details")
+
+ operation_status = st.empty()
+ operation_status.info(f"Configuring QC Utility Schema '{project_qc_schema}'...")
+
+ try:
+ skip_granting_privileges = not grant_privileges
+ queries = get_setup_profiling_tools_queries(sql_flavor, create_qc_schema, skip_granting_privileges, project_qc_schema, user, user_role)
+ with script_expander:
+ st.code(
+ os.linesep.join(queries),
+ language="sql",
+ line_numbers=True)
+
+ connection_service.create_qc_schema(
+ connection_id,
+ create_qc_schema,
+ db_user if db_user else None,
+ db_password if db_password else None,
+ skip_granting_privileges,
+ admin_private_key_passphrase=admin_private_key_passphrase,
+ admin_private_key=admin_private_key,
+ user_role=user_role,
+ )
+ operation_status.empty()
+ operation_status.success("Operation has finished successfully.")
+
+ except Exception as e:
+ operation_status.empty()
+ operation_status.error("Error configuring QC Utility Schema.")
+ error_message = e.args[0]
+ st.text_area("Error Details", value=error_message)
+
+ def show_connection_form(self, selected_connection, mode, project_code):
+ flavor_options = ["redshift", "snowflake", "mssql", "postgresql"]
+ connection_options = ["Connect by Password", "Connect by Key-Pair"]
+
+ left_column, right_column = st.columns([0.75, 0.25])
+
+ mid_column = st.columns(1)[0]
+ url_override_toogle_container = st.container()
+ bottom_left_column, bottom_right_column = st.columns([0.25, 0.75])
+ button_left_column, button_right_column = st.columns([0.20, 0.80])
+ connection_status_wrapper = st.container()
+ connection_status_container = connection_status_wrapper.empty()
+
+ connection_id = selected_connection["connection_id"] if mode == "edit" else None
+ connection_name = selected_connection["connection_name"] if mode == "edit" else ""
+ sql_flavor_index = flavor_options.index(selected_connection["sql_flavor"]) if mode == "edit" else 0
+ project_port = selected_connection["project_port"] if mode == "edit" else ""
+ project_host = selected_connection["project_host"] if mode == "edit" else ""
+ project_db = selected_connection["project_db"] if mode == "edit" else ""
+ project_user = selected_connection["project_user"] if mode == "edit" else ""
+ url = selected_connection["url"] if mode == "edit" else ""
+ project_qc_schema = selected_connection["project_qc_schema"] if mode == "edit" else "qc"
+ password = selected_connection["password"] if mode == "edit" else ""
+ max_threads = selected_connection["max_threads"] if mode == "edit" else 4
+ max_query_chars = selected_connection["max_query_chars"] if mode == "edit" else 10000
+ connect_by_url = selected_connection["connect_by_url"] if mode == "edit" else False
+ connect_by_key = selected_connection["connect_by_key"] if mode == "edit" else False
+ connection_option_index = 1 if connect_by_key else 0
+ private_key = selected_connection["private_key"] if mode == "edit" else None
+ private_key_passphrase = selected_connection["private_key_passphrase"] if mode == "edit" else ""
+
+ new_connection = {
+ "connection_id": connection_id,
+ "project_code": project_code,
+ "private_key": private_key,
+ "private_key_passphrase": private_key_passphrase,
+ "password": password,
+ "url": url,
+ "max_threads": right_column.number_input(
+ label="Max Threads (Advanced Tuning)",
+ min_value=1,
+ max_value=8,
+ value=max_threads,
+ help=(
+ "Maximum number of concurrent threads that run tests. Default values should be retained unless "
+ "test queries are failing."
+ ),
+ key=f"connections:form:max-threads:{connection_id or 0}",
+ ),
+ "max_query_chars": right_column.number_input(
+ label="Max Expression Length (Advanced Tuning)",
+ min_value=500,
+ max_value=14000,
+ value=max_query_chars,
+ help="Some tests are consolidated into queries for maximum performance. Default values should be retained unless test queries are failing.",
+ key=f"connections:form:max-length:{connection_id or 0}",
+ ),
+ "connection_name": left_column.text_input(
+ label="Connection Name",
+ max_chars=40,
+ value=connection_name,
+ help="Your name for this connection. Can be any text.",
+ key=f"connections:form:name:{connection_id or 0}",
+ ),
+ "sql_flavor": left_column.selectbox(
+ label="SQL Flavor",
+ options=flavor_options,
+ index=sql_flavor_index,
+ help="The type of database server that you will connect to. This determines TestGen's drivers and SQL dialect.",
+ key=f"connections:form:flavor:{connection_id or 0}",
+ )
+ }
+
+ st.session_state.disable_url_widgets = connect_by_url
+
+ new_connection["project_port"] = right_column.text_input(
+ label="Port",
+ max_chars=5,
+ value=project_port,
+ disabled=st.session_state.disable_url_widgets,
+ key=f"connections:form:port:{connection_id or 0}",
+ )
+ new_connection["project_host"] = left_column.text_input(
+ label="Host",
+ max_chars=250,
+ value=project_host,
+ disabled=st.session_state.disable_url_widgets,
+ key=f"connections:form:host:{connection_id or 0}",
+ )
+ new_connection["project_db"] = left_column.text_input(
+ label="Database",
+ max_chars=100,
+ value=project_db,
+ help="The name of the database defined on your host where your schemas and tables is present.",
+ disabled=st.session_state.disable_url_widgets,
+ key=f"connections:form:database:{connection_id or 0}",
+ )
+
+ new_connection["project_user"] = left_column.text_input(
+ label="User",
+ max_chars=50,
+ value=project_user,
+ help="Username to connect to your database.",
+ key=f"connections:form:user:{connection_id or 0}",
+ )
+
+ new_connection["project_qc_schema"] = right_column.text_input(
+ label="QC Utility Schema",
+ max_chars=50,
+ value=project_qc_schema,
+ help="The name of the schema on your database that will contain TestGen's profiling functions.",
+ key=f"connections:form:qcschema:{connection_id or 0}",
+ )
+
+ if new_connection["sql_flavor"] == "snowflake":
+ mid_column.divider()
+
+ connection_option = mid_column.radio(
+ "Connection options",
+ options=connection_options,
+ index=connection_option_index,
+ horizontal=True,
+ help="Connection strategy",
+ key=f"connections:form:type_options:{connection_id or 0}",
+ )
+
+ new_connection["connect_by_key"] = connection_option == "Connect by Key-Pair"
+ password_column = mid_column
+ else:
+ new_connection["connect_by_key"] = False
+ password_column = left_column
+
+ uploaded_file = None
+
+ if new_connection["connect_by_key"]:
+ new_connection["private_key_passphrase"] = mid_column.text_input(
+ label="Private Key Passphrase",
+ type="password",
+ max_chars=200,
+ value=private_key_passphrase,
+ help="Passphrase used while creating the private Key (leave empty if not applicable)",
+ key=f"connections:form:passphrase:{connection_id or 0}",
+ )
+
+ uploaded_file = mid_column.file_uploader("Upload private key (rsa_key.p8)")
+ else:
+ new_connection["password"] = password_column.text_input(
+ label="Password",
+ max_chars=50,
+ type="password",
+ value=password,
+ help="Password to connect to your database.",
+ key=f"connections:form:password:{connection_id or 0}",
+ )
+
+ mid_column.divider()
+
+ url_override_help_text = "If this switch is set to on, the connection string will be driven by the field below. "
+ if new_connection["connect_by_key"]:
+ url_override_help_text += "Only user name will be passed per the relevant fields above."
+ else:
+ url_override_help_text += "Only user name and password will be passed per the relevant fields above."
+
+ def on_connect_by_url_change():
+ value = st.session_state.connect_by_url_toggle
+ st.session_state.disable_url_widgets = value
+
+ new_connection["connect_by_url"] = url_override_toogle_container.toggle(
+ "URL override",
+ value=connect_by_url,
+ key="connect_by_url_toggle",
+ help=url_override_help_text,
+ on_change=on_connect_by_url_change,
+ )
+
+ if new_connection["connect_by_url"]:
+ connection_string = connection_service.form_overwritten_connection_url(new_connection)
+ connection_string_beginning, connection_string_end = connection_string.split("@", 1)
+ connection_string_header = connection_string_beginning + "@"
+ connection_string_header = connection_string_header.replace("%3E", ">")
+ connection_string_header = connection_string_header.replace("%3C", "<")
+
+ if not url:
+ url = connection_string_end
+
+ new_connection["url"] = bottom_right_column.text_input(
+ label="URL Suffix",
+ max_chars=200,
+ value=url,
+ help="Provide a connection string directly. This will override connection parameters if the 'Connect by URL' switch is set.",
+ )
+
+ bottom_left_column.text_input(label="URL Prefix", value=connection_string_header, disabled=True)
+
+ bottom_left_column.markdown(" ", unsafe_allow_html=True)
+
+ testgen.flex_row_end(button_right_column)
+ submit = button_right_column.button(
+ "Save" if mode == "edit" else "Add Connection",
+ disabled=authentication_service.current_user_has_read_role(),
+ )
+
+ if submit:
+ if not new_connection["password"] and not new_connection["connect_by_key"]:
+ st.error("Enter a valid password.")
+ else:
+ if uploaded_file:
+ new_connection["private_key"] = uploaded_file.getvalue().decode("utf-8")
+
+ if mode == "edit":
+ connection_service.edit_connection(new_connection)
+ else:
+ connection_service.add_connection(new_connection)
+ success_message = (
+ "Changes have been saved successfully. "
+ if mode == "edit"
+ else "New connection added successfully. "
+ )
+ st.success(success_message)
+ time.sleep(1)
+ st.rerun()
+
+ test_connection = button_left_column.button("Test Connection")
+
+ if test_connection:
+ connection_status_container.empty()
+ connection_status_container.info("Testing the connection...")
+
+ connection_status = self.test_connection(new_connection)
+ renderer = {
+ True: connection_status_container.success,
+ False: connection_status_container.error,
+ }[connection_status.successful]
+
+ renderer(connection_status.message)
+ if not connection_status.successful and connection_status.details:
+ st.text_area("Connection Error Details", value=connection_status.details)
+
+ def test_connection(self, connection: dict) -> "ConnectionStatus":
+ if connection["connect_by_key"] and connection["connection_id"] is None:
+ return ConnectionStatus(
+ message="Please add the connection before testing it (so that we can get your private key file).",
+ successful=False,
+ )
+
+ empty_cache()
+ try:
+ sql_query = "select 1;"
+ results = db.retrieve_target_db_data(
+ connection["sql_flavor"],
+ connection["project_host"],
+ connection["project_port"],
+ connection["project_db"],
+ connection["project_user"],
+ connection["password"],
+ connection["url"],
+ connection["connect_by_url"],
+ connection["connect_by_key"],
+ connection["private_key"],
+ connection["private_key_passphrase"],
+ sql_query,
+ )
+ connection_successful = len(results) == 1 and results[0][0] == 1
+
+ if not connection_successful:
+ return ConnectionStatus(message="Error completing a query to the database server.", successful=False)
+
+ qc_error_message = "The connection was successful, but there is an issue with the QC Utility Schema"
+ try:
+ qc_results = connection_service.test_qc_connection(connection["project_code"], connection)
+ if not all(qc_results):
+ return ConnectionStatus(
+ message=qc_error_message,
+ details=f"QC Utility Schema confirmation failed. details: {qc_results}",
+ successful=False,
+ )
+ return ConnectionStatus(message="The connection was successful.", successful=True)
+ except Exception as error:
+ return ConnectionStatus(message=qc_error_message, details=error.args[0], successful=False)
+ except Exception as error:
+ return ConnectionStatus(message="Error attempting the Connection.", details=error.args[0], successful=False)
+
+
+@dataclasses.dataclass(frozen=True, slots=True)
+class ConnectionStatus:
+ message: str
+ successful: bool
+ details: str | None = dataclasses.field(default=None)
diff --git a/testgen/ui/views/connections_base.py b/testgen/ui/views/connections_base.py
deleted file mode 100644
index e3765a9..0000000
--- a/testgen/ui/views/connections_base.py
+++ /dev/null
@@ -1,360 +0,0 @@
-import os
-import time
-
-import streamlit as st
-
-import testgen.ui.services.database_service as db
-from testgen.commands.run_setup_profiling_tools import get_setup_profiling_tools_queries
-from testgen.common.database.database_service import empty_cache
-from testgen.ui.services import authentication_service, connection_service
-
-
-@st.dialog(title="Configure QC Utility Schema")
-def create_qc_schema_dialog(selected_connection):
- connection_id = selected_connection["connection_id"]
- project_qc_schema = selected_connection["project_qc_schema"]
- sql_flavor = selected_connection["sql_flavor"]
- user = selected_connection["project_user"]
-
- create_qc_schema = st.toggle("Create QC Utility Schema", value=True)
- grant_privileges = st.toggle("Grant access privileges to TestGen user", value=True)
-
- user_role = None
-
- # TODO ALEX: This textbox may be needed if we want to grant permissions to user role
- # if sql_flavor == "snowflake":
- # user_role_textbox_label = f"Primary role for database user {user}"
- # user_role = st.text_input(label=user_role_textbox_label, max_chars=100)
-
- admin_credentials_expander = st.expander("Admin credential options", expanded=True)
- with admin_credentials_expander:
- admin_connection_option_index = 0
- admin_connection_options = ["Do not use admin credentials", "Use admin credentials with Password"]
- if sql_flavor == "snowflake":
- admin_connection_options.append("Use admin credentials with Key-Pair")
-
- admin_connection_option = st.radio(
- "Admin credential options",
- label_visibility="hidden",
- options=admin_connection_options,
- index=admin_connection_option_index,
- horizontal=True,
- )
-
- st.markdown(" ", unsafe_allow_html=True)
-
- db_user = None
- db_password = None
- admin_private_key_passphrase = None
- admin_private_key = None
- if admin_connection_option == admin_connection_options[0]:
- st.markdown(":orange[User created in the connection dialog will be used.]")
- else:
- db_user = st.text_input(label="Admin db user", max_chars=40)
- if admin_connection_option == admin_connection_options[1]:
- db_password = st.text_input(
- label="Admin db password", max_chars=40, type="password"
- )
- st.markdown(":orange[Note: Admin credentials are not stored, are only used for this operation.]")
-
- if len(admin_connection_options) > 2 and admin_connection_option == admin_connection_options[2]:
- admin_private_key_passphrase = st.text_input(
- label="Private Key Passphrase",
- key="create-qc-schema-private-key-password",
- type="password",
- max_chars=200,
- help="Passphrase used while creating the private Key (leave empty if not applicable)",
- )
-
- admin_uploaded_file = st.file_uploader("Upload private key (rsa_key.p8)", key="admin-uploaded-file")
- if admin_uploaded_file:
- admin_private_key = admin_uploaded_file.getvalue().decode("utf-8")
-
- st.markdown(":orange[Note: Admin credentials are not stored, are only used for this operation.]")
-
- submit = st.button("Update Configuration")
-
- if submit:
- empty_cache()
- script_expander = st.expander("Script Details")
-
- operation_status = st.empty()
- operation_status.info(f"Configuring QC Utility Schema '{project_qc_schema}'...")
-
- try:
- skip_granting_privileges = not grant_privileges
- queries = get_setup_profiling_tools_queries(sql_flavor, create_qc_schema, skip_granting_privileges, project_qc_schema, user, user_role)
- with script_expander:
- st.code(
- os.linesep.join(queries),
- language="sql",
- line_numbers=True)
-
- connection_service.create_qc_schema(
- connection_id,
- create_qc_schema,
- db_user if db_user else None,
- db_password if db_password else None,
- skip_granting_privileges,
- admin_private_key_passphrase=admin_private_key_passphrase,
- admin_private_key=admin_private_key,
- user_role=user_role,
- )
- operation_status.empty()
- operation_status.success("Operation has finished successfully.")
-
- except Exception as e:
- operation_status.empty()
- operation_status.error("Error configuring QC Utility Schema.")
- error_message = e.args[0]
- st.text_area("Error Details", value=error_message)
-
-
-def show_connection_form(selected_connection, mode, project_code):
- flavor_options = ["redshift", "snowflake", "mssql", "postgresql"]
- connection_options = ["Connect by Password", "Connect by Key-Pair"]
-
- left_column, right_column = st.columns([0.75, 0.25])
- mid_column = st.columns(1)[0]
- toggle_left_column, toggle_right_column = st.columns([0.25, 0.75])
- bottom_left_column, bottom_right_column = st.columns([0.25, 0.75])
- button_left_column, button_right_column, button_remaining_column = st.columns([0.20, 0.20, 0.60])
-
- connection_id = selected_connection["connection_id"] if mode == "edit" else None
- connection_name = selected_connection["connection_name"] if mode == "edit" else ""
- sql_flavor_index = flavor_options.index(selected_connection["sql_flavor"]) if mode == "edit" else 0
- project_port = selected_connection["project_port"] if mode == "edit" else ""
- project_host = selected_connection["project_host"] if mode == "edit" else ""
- project_db = selected_connection["project_db"] if mode == "edit" else ""
- project_user = selected_connection["project_user"] if mode == "edit" else ""
- url = selected_connection["url"] if mode == "edit" else ""
- project_qc_schema = selected_connection["project_qc_schema"] if mode == "edit" else "qc"
- password = selected_connection["password"] if mode == "edit" else ""
- max_threads = selected_connection["max_threads"] if mode == "edit" else 4
- max_query_chars = selected_connection["max_query_chars"] if mode == "edit" else 10000
- connect_by_url = selected_connection["connect_by_url"] if mode == "edit" else False
- connect_by_key = selected_connection["connect_by_key"] if mode == "edit" else False
- connection_option_index = 1 if connect_by_key else 0
- private_key = selected_connection["private_key"] if mode == "edit" else None
- private_key_passphrase = selected_connection["private_key_passphrase"] if mode == "edit" else ""
-
- new_connection = {
- "connection_id": connection_id,
- "project_code": project_code,
- "private_key": private_key,
- "private_key_passphrase": private_key_passphrase,
- "password": password,
- "url": url,
- "max_threads": right_column.number_input(
- label="Max Threads (Advanced Tuning)",
- min_value=1,
- max_value=8,
- value=max_threads,
- help="Maximum number of concurrent threads that run tests. Default values should be retained unless test queries are failing.",
- ),
- "max_query_chars": right_column.number_input(
- label="Max Expression Length (Advanced Tuning)",
- min_value=500,
- max_value=14000,
- value=max_query_chars,
- help="Some tests are consolidated into queries for maximum performance. Default values should be retained unless test queries are failing.",
- ),
- "connection_name": left_column.text_input(
- label="Connection Name",
- max_chars=40,
- value=connection_name,
- help="Your name for this connection. Can be any text.",
- ),
- "sql_flavor": left_column.selectbox(
- label="SQL Flavor",
- options=flavor_options,
- index=sql_flavor_index,
- help="The type of database server that you will connect to. This determines TestGen's drivers and SQL dialect.",
- )
- }
-
- if "disable_url_widgets" not in st.session_state:
- st.session_state.disable_url_widgets = connect_by_url
-
- new_connection["project_port"] = right_column.text_input(label="Port", max_chars=5, value=project_port, disabled=st.session_state.disable_url_widgets)
- new_connection["project_host"] = left_column.text_input(label="Host", max_chars=250, value=project_host, disabled=st.session_state.disable_url_widgets)
- new_connection["project_db"] = left_column.text_input(
- label="Database",
- max_chars=100,
- value=project_db,
- help="The name of the database defined on your host where your schemas and tables is present.",
- disabled=st.session_state.disable_url_widgets,
- )
-
- new_connection["project_user"] = left_column.text_input(
- label="User",
- max_chars=50,
- value=project_user,
- help="Username to connect to your database.",
- )
-
- new_connection["project_qc_schema"] = right_column.text_input(
- label="QC Utility Schema",
- max_chars=50,
- value=project_qc_schema,
- help="The name of the schema on your database that will contain TestGen's profiling functions.",
- )
-
- if new_connection["sql_flavor"] == "snowflake":
- mid_column.divider()
-
- connection_option = mid_column.radio(
- "Connection options",
- options=connection_options,
- index=connection_option_index,
- horizontal=True,
- help="Connection strategy",
- )
-
- new_connection["connect_by_key"] = connection_option == "Connect by Key-Pair"
- password_column = mid_column
- else:
- new_connection["connect_by_key"] = False
- password_column = left_column
-
- uploaded_file = None
-
- if new_connection["connect_by_key"]:
- new_connection["private_key_passphrase"] = mid_column.text_input(
- label="Private Key Passphrase",
- type="password",
- max_chars=200,
- value=private_key_passphrase,
- help="Passphrase used while creating the private Key (leave empty if not applicable)",
- )
-
- uploaded_file = mid_column.file_uploader("Upload private key (rsa_key.p8)")
- else:
- new_connection["password"] = password_column.text_input(
- label="Password",
- max_chars=50,
- type="password",
- value=password,
- help="Password to connect to your database.",
- )
-
- mid_column.divider()
-
- url_override_help_text = "If this switch is set to on, the connection string will be driven by the field below. "
- if new_connection["connect_by_key"]:
- url_override_help_text += "Only user name will be passed per the relevant fields above."
- else:
- url_override_help_text += "Only user name and password will be passed per the relevant fields above."
-
- def on_connect_by_url_change():
- value = st.session_state.connect_by_url_toggle
- st.session_state.disable_url_widgets = value
-
- new_connection["connect_by_url"] = toggle_left_column.toggle(
- "URL override",
- value=connect_by_url,
- key="connect_by_url_toggle",
- help=url_override_help_text,
- on_change=on_connect_by_url_change
- )
-
- if new_connection["connect_by_url"]:
- connection_string = connection_service.form_overwritten_connection_url(new_connection)
- connection_string_beginning, connection_string_end = connection_string.split("@", 1)
- connection_string_header = connection_string_beginning + "@"
- connection_string_header = connection_string_header.replace("%3E", ">")
- connection_string_header = connection_string_header.replace("%3C", "<")
-
- if not url:
- url = connection_string_end
-
- new_connection["url"] = bottom_right_column.text_input(
- label="URL Suffix",
- max_chars=200,
- value=url,
- help="Provide a connection string directly. This will override connection parameters if the 'Connect by URL' switch is set.",
- )
-
- bottom_left_column.text_input(label="URL Prefix", value=connection_string_header, disabled=True)
-
- bottom_left_column.markdown(" ", unsafe_allow_html=True)
-
- submit_button_text = "Save" if mode == "edit" else "Add Connection"
- submit = button_left_column.button(
- submit_button_text, disabled=authentication_service.current_user_has_read_role()
- )
-
- if submit:
- if not new_connection["password"] and not new_connection["connect_by_key"]:
- st.error("Enter a valid password.")
- else:
- if uploaded_file:
- new_connection["private_key"] = uploaded_file.getvalue().decode("utf-8")
-
- if mode == "edit":
- connection_service.edit_connection(new_connection)
- else:
- connection_service.add_connection(new_connection)
- success_message = (
- "Changes have been saved successfully. "
- if mode == "edit"
- else "New connection added successfully. "
- )
- st.success(success_message)
- time.sleep(1)
- st.rerun()
-
- test_left_column, test_mid_column, test_right_column = st.columns([0.15, 0.15, 0.70])
- test_connection = button_right_column.button("Test Connection")
-
- connection_status = test_right_column.empty()
-
- if test_connection:
- if mode == "add" and new_connection["connect_by_key"]:
- connection_status.empty()
- connection_status.error(
- "Please add the connection before testing it (so that we can get your private key file).")
- else:
- empty_cache()
- connection_status.empty()
- connection_status.info("Testing the connection...")
- try:
- sql_query = "select 1;"
- results = db.retrieve_target_db_data(
- new_connection["sql_flavor"],
- new_connection["project_host"],
- new_connection["project_port"],
- new_connection["project_db"],
- new_connection["project_user"],
- new_connection["password"],
- new_connection["url"],
- new_connection["connect_by_url"],
- new_connection["connect_by_key"],
- new_connection["private_key"],
- new_connection["private_key_passphrase"],
- sql_query,
- )
- if len(results) == 1 and results[0][0] == 1:
- qc_error_message = "The connection was successful, but there is an issue with the QC Utility Schema"
- try:
- qc_results = connection_service.test_qc_connection(project_code, new_connection)
- if not all(qc_results):
- error_message = f"QC Utility Schema confirmation failed. details: {qc_results}"
- connection_status.empty()
- connection_status.error(qc_error_message)
- st.text_area("Connection Error Details", value=error_message)
- else:
- connection_status.empty()
- connection_status.success("The connection was successful.")
- except Exception as e:
- connection_status.empty()
- connection_status.error(qc_error_message)
- error_message = e.args[0]
- st.text_area("Connection Error Details", value=error_message)
- else:
- test_right_column.error("Error completing a query to the database server.")
- except Exception as e:
- connection_status.empty()
- connection_status.error("Error attempting the Connection.")
- error_message = e.args[0]
- st.text_area("Connection Error Details", value=error_message)
From d1ba19488bfa563f9f1e0f3c7db41f4427b806bc Mon Sep 17 00:00:00 2001
From: Luis Trinidad
Date: Tue, 17 Sep 2024 11:18:24 -0400
Subject: [PATCH 62/78] fix(ui): use wrapper dialog widget on test suites cards
---
testgen/ui/views/test_suites.py | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/testgen/ui/views/test_suites.py b/testgen/ui/views/test_suites.py
index 0024de7..d6d56a5 100644
--- a/testgen/ui/views/test_suites.py
+++ b/testgen/ui/views/test_suites.py
@@ -155,12 +155,12 @@ def get_db_table_group_choices(project_code):
return dq.run_table_groups_lookup_query(schema, project_code)
-@st.dialog(title="Add Test Suite")
+@testgen.dialog(title="Add Test Suite", key="add_test_suite_dialog_args")
def add_test_suite_dialog(project_code, table_groups_df):
show_test_suite("add", project_code, table_groups_df)
-@st.dialog(title="Edit Test Suite")
+@testgen.dialog(title="Edit Test Suite", key="edit_test_suite_dialog_args")
def edit_test_suite_dialog(project_code, table_groups_df, selected):
show_test_suite("edit", project_code, table_groups_df, selected)
@@ -274,7 +274,7 @@ def show_test_suite(mode, project_code, table_groups_df, selected=None):
st.rerun()
-@st.dialog(title="Delete Test Suite")
+@testgen.dialog(title="Delete Test Suite", key="delete_test_suite_dialog_args")
def delete_test_suite_dialog(selected_test_suite):
test_suite_id = selected_test_suite["id"]
test_suite_name = selected_test_suite["test_suite"]
@@ -324,7 +324,7 @@ def delete_test_suite_dialog(selected_test_suite):
st.rerun()
-@st.dialog(title="Run Tests")
+@testgen.dialog(title="Run Tests", key="run_tests_dialog_args")
def run_tests_dialog(project_code, selected_test_suite):
test_suite_key = selected_test_suite["test_suite"]
start_process_button_message = "Start"
@@ -364,7 +364,7 @@ def run_tests_dialog(project_code, selected_test_suite):
)
-@st.dialog(title="Generate Tests")
+@testgen.dialog(title="Generate Tests", key="generate_tests_dialog_args")
def generate_tests_dialog(selected_test_suite):
test_suite_id = selected_test_suite["id"]
test_suite_key = selected_test_suite["test_suite"]
@@ -441,7 +441,7 @@ def generate_tests_dialog(selected_test_suite):
status_container.success("Process has successfully finished.")
-@st.dialog(title="Export to Observability")
+@testgen.dialog(title="Export to Observability", key="export_to_obs_dialog_args")
def observability_export_dialog(selected_test_suite):
project_key = selected_test_suite["project_code"]
test_suite_key = selected_test_suite["test_suite"]
From 0156d7f078406686de9afa11c180a8fbbf085266 Mon Sep 17 00:00:00 2001
From: Luis Trinidad
Date: Fri, 20 Sep 2024 08:45:10 -0400
Subject: [PATCH 63/78] misc: upgrade streamlit to version 1.38.0
The upgrade fixes an error with dynamic dialogs opened from callbacks.
The upgrade also makes the dialogs look smaller by default, this commit
explicitly sets the width to large for most of the dialogs.
---
pyproject.toml | 2 +-
testgen/ui/assets/style.css | 2 +-
testgen/ui/components/widgets/__init__.py | 1 -
testgen/ui/components/widgets/dialog.py | 44 -----------------------
testgen/ui/views/connections.py | 2 +-
testgen/ui/views/test_suites.py | 12 +++----
6 files changed, 9 insertions(+), 54 deletions(-)
delete mode 100644 testgen/ui/components/widgets/dialog.py
diff --git a/pyproject.toml b/pyproject.toml
index 5be21d0..6e06b0d 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -43,7 +43,7 @@ dependencies = [
"trogon==0.4.0",
"numpy==1.26.4",
"pandas==2.1.4",
- "streamlit==1.37.1",
+ "streamlit==1.38.0",
"streamlit-extras==0.3.0",
"streamlit-aggrid==0.3.4.post3",
"streamlit-antd-components==0.2.2",
diff --git a/testgen/ui/assets/style.css b/testgen/ui/assets/style.css
index 25b829e..b5eec64 100644
--- a/testgen/ui/assets/style.css
+++ b/testgen/ui/assets/style.css
@@ -81,7 +81,7 @@ div[data-testid="collapsedControl"] {
/* Dialog - sets the width of all st.dialog */
/* There is no way to target "large" and "small" dialogs reliably */
-div[data-testid="stModal"] div[role="dialog"] {
+div[data-testid="stDialog"] div[role="dialog"] {
width: calc(55rem);
}
/* */
diff --git a/testgen/ui/components/widgets/__init__.py b/testgen/ui/components/widgets/__init__.py
index 243355c..eba62b7 100644
--- a/testgen/ui/components/widgets/__init__.py
+++ b/testgen/ui/components/widgets/__init__.py
@@ -3,7 +3,6 @@
from testgen.ui.components.widgets.breadcrumbs import breadcrumbs
from testgen.ui.components.widgets.button import button
from testgen.ui.components.widgets.card import card
-from testgen.ui.components.widgets.dialog import dialog
from testgen.ui.components.widgets.expander_toggle import expander_toggle
from testgen.ui.components.widgets.link import link
from testgen.ui.components.widgets.page import (
diff --git a/testgen/ui/components/widgets/dialog.py b/testgen/ui/components/widgets/dialog.py
deleted file mode 100644
index 6c2c4e9..0000000
--- a/testgen/ui/components/widgets/dialog.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import functools
-import random
-import string
-import typing
-
-import streamlit as st
-from streamlit.elements.lib.dialog import DialogWidth
-
-
-def dialog(title: str, *, width: DialogWidth = "small", key: str | None = None) -> typing.Callable:
- """
- Wrap Streamlit's native dialog to avoid passing parameters that will
- be ignored during the fragment's re-run.
- """
- dialog_contents: typing.Callable = lambda: None
-
- def render_dialog() -> typing.Any:
- args = []
- kwargs = {}
- if key:
- args, kwargs = st.session_state[key]
- return dialog_contents(*args, **kwargs)
-
- name_suffix = "".join(random.choices(string.ascii_lowercase, k=8)) # noqa: S311
-
- # NOTE: st.dialog uses __qualname__ to generate the fragment hash, effectively overshadowing the uniqueness of the
- # render_dialog() function.
- render_dialog.__name__ = f"render_dialog_{name_suffix}"
- render_dialog.__qualname__ = render_dialog.__qualname__.replace("render_dialog", render_dialog.__name__)
-
- render_dialog = st.dialog(title=title, width=width)(render_dialog)
-
- def decorator(func: typing.Callable) -> typing.Callable:
- nonlocal dialog_contents
- dialog_contents = func
-
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- if key:
- st.session_state[key] = (args, kwargs)
- render_dialog()
- return wrapper
-
- return decorator
diff --git a/testgen/ui/views/connections.py b/testgen/ui/views/connections.py
index 3fc44dc..0f6fc5f 100644
--- a/testgen/ui/views/connections.py
+++ b/testgen/ui/views/connections.py
@@ -57,7 +57,7 @@ def render(self, project_code: str, **_kwargs) -> None:
{"connection_id": connection["connection_id"]},
)
- @testgen.dialog(title="Configure QC Utility Schema", key="config_qc_dialog_args")
+ @st.dialog(title="Configure QC Utility Schema")
def create_qc_schema_dialog(self, selected_connection):
connection_id = selected_connection["connection_id"]
project_qc_schema = selected_connection["project_qc_schema"]
diff --git a/testgen/ui/views/test_suites.py b/testgen/ui/views/test_suites.py
index d6d56a5..0024de7 100644
--- a/testgen/ui/views/test_suites.py
+++ b/testgen/ui/views/test_suites.py
@@ -155,12 +155,12 @@ def get_db_table_group_choices(project_code):
return dq.run_table_groups_lookup_query(schema, project_code)
-@testgen.dialog(title="Add Test Suite", key="add_test_suite_dialog_args")
+@st.dialog(title="Add Test Suite")
def add_test_suite_dialog(project_code, table_groups_df):
show_test_suite("add", project_code, table_groups_df)
-@testgen.dialog(title="Edit Test Suite", key="edit_test_suite_dialog_args")
+@st.dialog(title="Edit Test Suite")
def edit_test_suite_dialog(project_code, table_groups_df, selected):
show_test_suite("edit", project_code, table_groups_df, selected)
@@ -274,7 +274,7 @@ def show_test_suite(mode, project_code, table_groups_df, selected=None):
st.rerun()
-@testgen.dialog(title="Delete Test Suite", key="delete_test_suite_dialog_args")
+@st.dialog(title="Delete Test Suite")
def delete_test_suite_dialog(selected_test_suite):
test_suite_id = selected_test_suite["id"]
test_suite_name = selected_test_suite["test_suite"]
@@ -324,7 +324,7 @@ def delete_test_suite_dialog(selected_test_suite):
st.rerun()
-@testgen.dialog(title="Run Tests", key="run_tests_dialog_args")
+@st.dialog(title="Run Tests")
def run_tests_dialog(project_code, selected_test_suite):
test_suite_key = selected_test_suite["test_suite"]
start_process_button_message = "Start"
@@ -364,7 +364,7 @@ def run_tests_dialog(project_code, selected_test_suite):
)
-@testgen.dialog(title="Generate Tests", key="generate_tests_dialog_args")
+@st.dialog(title="Generate Tests")
def generate_tests_dialog(selected_test_suite):
test_suite_id = selected_test_suite["id"]
test_suite_key = selected_test_suite["test_suite"]
@@ -441,7 +441,7 @@ def generate_tests_dialog(selected_test_suite):
status_container.success("Process has successfully finished.")
-@testgen.dialog(title="Export to Observability", key="export_to_obs_dialog_args")
+@st.dialog(title="Export to Observability")
def observability_export_dialog(selected_test_suite):
project_key = selected_test_suite["project_code"]
test_suite_key = selected_test_suite["test_suite"]
From 3d37b11bcf8651921a998779ef2bbeb91ec5541a Mon Sep 17 00:00:00 2001
From: Aarthy Adityan
Date: Fri, 20 Sep 2024 12:24:08 -0400
Subject: [PATCH 64/78] fix(ui): redirect on invalid query params in inner
pages
---
testgen/ui/navigation/router.py | 8 ++++++++
testgen/ui/queries/profiling_queries.py | 18 +++++++++---------
testgen/ui/queries/test_suite_queries.py | 2 +-
testgen/ui/services/connection_service.py | 7 ++++---
testgen/ui/services/test_suite_service.py | 9 +++++++--
testgen/ui/views/profiling_anomalies.py | 11 ++++++++---
testgen/ui/views/profiling_results.py | 9 ++++++++-
testgen/ui/views/table_groups.py | 6 ++++++
testgen/ui/views/test_definitions.py | 6 ++++++
testgen/ui/views/test_results.py | 23 +++++++++++++++--------
10 files changed, 72 insertions(+), 27 deletions(-)
diff --git a/testgen/ui/navigation/router.py b/testgen/ui/navigation/router.py
index 0c58484..d010ee9 100644
--- a/testgen/ui/navigation/router.py
+++ b/testgen/ui/navigation/router.py
@@ -1,6 +1,7 @@
from __future__ import annotations
import logging
+import time
import streamlit as st
@@ -66,6 +67,13 @@ def navigate(self, /, to: str, with_args: dict = {}) -> None: # noqa: B006
st.error(error_message)
LOG.exception(error_message)
+
+ def navigate_with_warning(self, warning: str, to: str, with_args: dict = {}) -> None: # noqa: B006
+ st.warning(warning)
+ time.sleep(3)
+ self.navigate(to, with_args)
+
+
def set_query_params(self, with_args: dict) -> None:
params = st.query_params
params.update(with_args)
diff --git a/testgen/ui/queries/profiling_queries.py b/testgen/ui/queries/profiling_queries.py
index f831bbc..e22d711 100644
--- a/testgen/ui/queries/profiling_queries.py
+++ b/testgen/ui/queries/profiling_queries.py
@@ -1,3 +1,4 @@
+import pandas as pd
import streamlit as st
import testgen.ui.services.database_service as db
@@ -63,19 +64,18 @@ def run_column_lookup_query(str_table_groups_id, str_table_name):
@st.cache_data(show_spinner=False)
-def lookup_db_parentage_from_run(str_profile_run_id):
- str_schema = st.session_state["dbschema"]
- # Define the query
- str_sql = f"""
+def lookup_db_parentage_from_run(profile_run_id: str) -> tuple[pd.Timestamp, str, str, str] | None:
+ schema: str = st.session_state["dbschema"]
+ sql = f"""
SELECT profiling_starttime as profile_run_date, table_groups_id, g.table_groups_name, g.project_code
- FROM {str_schema}.profiling_runs pr
- INNER JOIN {str_schema}.table_groups g
+ FROM {schema}.profiling_runs pr
+ INNER JOIN {schema}.table_groups g
ON pr.table_groups_id = g.id
- WHERE pr.id = '{str_profile_run_id}'
+ WHERE pr.id = '{profile_run_id}'
"""
- df = db.retrieve_data(str_sql)
+ df = db.retrieve_data(sql)
if not df.empty:
- return df.at[0, "profile_run_date"], df.at[0, "table_groups_id"], df.at[0, "table_groups_name"], df.at[0, "project_code"]
+ return df.at[0, "profile_run_date"], str(df.at[0, "table_groups_id"]), df.at[0, "table_groups_name"], df.at[0, "project_code"]
@st.cache_data(show_spinner="Retrieving Data")
diff --git a/testgen/ui/queries/test_suite_queries.py b/testgen/ui/queries/test_suite_queries.py
index 61982ca..8885545 100644
--- a/testgen/ui/queries/test_suite_queries.py
+++ b/testgen/ui/queries/test_suite_queries.py
@@ -118,7 +118,7 @@ def get_by_project(schema, project_code, table_group_id=None):
@st.cache_data(show_spinner=False)
-def get_by_id(schema, test_suite_id):
+def get_by_id(schema: str, test_suite_id: str) -> pd.DataFrame:
sql = f"""
SELECT
suites.id::VARCHAR(50),
diff --git a/testgen/ui/services/connection_service.py b/testgen/ui/services/connection_service.py
index faad168..394c82a 100644
--- a/testgen/ui/services/connection_service.py
+++ b/testgen/ui/services/connection_service.py
@@ -14,11 +14,12 @@
from testgen.common.encrypt import DecryptText, EncryptText
-def get_by_id(connection_id, hide_passwords: bool = True):
+def get_by_id(connection_id: str, hide_passwords: bool = True) -> dict | None:
connections_df = connection_queries.get_by_id(connection_id)
decrypt_connections(connections_df, hide_passwords)
- connection = connections_df.to_dict(orient="records")[0]
- return connection
+ connections_list = connections_df.to_dict(orient="records")
+ if len(connections_list):
+ return connections_list[0]
def get_connections(project_code, hide_passwords: bool = False):
diff --git a/testgen/ui/services/test_suite_service.py b/testgen/ui/services/test_suite_service.py
index 720695e..b877963 100644
--- a/testgen/ui/services/test_suite_service.py
+++ b/testgen/ui/services/test_suite_service.py
@@ -1,3 +1,4 @@
+import pandas as pd
import streamlit as st
import testgen.ui.queries.test_suite_queries as test_suite_queries
@@ -9,9 +10,13 @@ def get_by_project(project_code, table_group_id=None):
return test_suite_queries.get_by_project(schema, project_code, table_group_id)
-def get_by_id(test_suite_id):
+def get_by_id(test_suite_id: str) -> pd.Series:
schema = st.session_state["dbschema"]
- return test_suite_queries.get_by_id(schema, test_suite_id).iloc[0]
+ df = test_suite_queries.get_by_id(schema, test_suite_id)
+ if not df.empty:
+ return df.iloc[0]
+ else:
+ return pd.Series()
def edit(test_suite):
diff --git a/testgen/ui/views/profiling_anomalies.py b/testgen/ui/views/profiling_anomalies.py
index ab1bfcc..c94c2bf 100644
--- a/testgen/ui/views/profiling_anomalies.py
+++ b/testgen/ui/views/profiling_anomalies.py
@@ -23,9 +23,14 @@ class ProfilingAnomaliesPage(Page):
]
def render(self, run_id: str, issue_class: str | None = None, issue_type: str | None = None, **_kwargs) -> None:
- run_date, _table_group_id, table_group_name, project_code = profiling_queries.lookup_db_parentage_from_run(
- run_id
- )
+ run_parentage = profiling_queries.lookup_db_parentage_from_run(run_id)
+ if not run_parentage:
+ self.router.navigate_with_warning(
+ f"Profiling run with ID '{run_id}' does not exist. Redirecting to list of Profiling Runs ...",
+ "profiling-runs",
+ )
+
+ run_date, _table_group_id, table_group_name, project_code = run_parentage
run_date = date_service.get_timezoned_timestamp(st.session_state, run_date)
project_service.set_current_project(project_code)
diff --git a/testgen/ui/views/profiling_results.py b/testgen/ui/views/profiling_results.py
index bd134d9..e0a38f0 100644
--- a/testgen/ui/views/profiling_results.py
+++ b/testgen/ui/views/profiling_results.py
@@ -22,9 +22,16 @@ class ProfilingResultsPage(Page):
]
def render(self, run_id: str, table_name: str | None = None, column_name: str | None = None, **_kwargs) -> None:
- run_date, table_group_id, table_group_name, project_code = profiling_queries.lookup_db_parentage_from_run(
+ run_parentage = profiling_queries.lookup_db_parentage_from_run(
run_id
)
+ if not run_parentage:
+ self.router.navigate_with_warning(
+ f"Profiling run with ID '{run_id}' does not exist. Redirecting to list of Profiling Runs ...",
+ "profiling-runs",
+ )
+
+ run_date, table_group_id, table_group_name, project_code = run_parentage
run_date = date_service.get_timezoned_timestamp(st.session_state, run_date)
project_service.set_current_project(project_code)
diff --git a/testgen/ui/views/table_groups.py b/testgen/ui/views/table_groups.py
index 734214a..7403743 100644
--- a/testgen/ui/views/table_groups.py
+++ b/testgen/ui/views/table_groups.py
@@ -27,6 +27,12 @@ class TableGroupsPage(Page):
def render(self, connection_id: str, **_kwargs) -> None:
connection = connection_service.get_by_id(connection_id, hide_passwords=False)
+ if not connection:
+ self.router.navigate_with_warning(
+ f"Connection with ID '{connection_id}' does not exist. Redirecting to list of Connections ...",
+ "connections",
+ )
+
project_code = connection["project_code"]
project_service.set_current_project(project_code)
diff --git a/testgen/ui/views/test_definitions.py b/testgen/ui/views/test_definitions.py
index 463d8df..7921b58 100644
--- a/testgen/ui/views/test_definitions.py
+++ b/testgen/ui/views/test_definitions.py
@@ -30,6 +30,12 @@ class TestDefinitionsPage(Page):
def render(self, test_suite_id: str, table_name: str | None = None, column_name: str | None = None, **_kwargs) -> None:
test_suite = test_suite_service.get_by_id(test_suite_id)
+ if test_suite.empty:
+ self.router.navigate_with_warning(
+ f"Test suite with ID '{test_suite_id}' does not exist. Redirecting to list of Test Suites ...",
+ "test-suites",
+ )
+
table_group = table_group_service.get_by_id(test_suite["table_groups_id"])
project_code = table_group["project_code"]
project_service.set_current_project(project_code)
diff --git a/testgen/ui/views/test_results.py b/testgen/ui/views/test_results.py
index 67e49a5..fbbff5e 100644
--- a/testgen/ui/views/test_results.py
+++ b/testgen/ui/views/test_results.py
@@ -29,7 +29,14 @@ class TestResultsPage(Page):
]
def render(self, run_id: str, status: str | None = None, test_type: str | None = None, **_kwargs) -> None:
- run_date, test_suite_name, project_code = get_drill_test_run(run_id)
+ run_parentage = get_drill_test_run(run_id)
+ if not run_parentage:
+ self.router.navigate_with_warning(
+ f"Test run with ID '{run_id}' does not exist. Redirecting to list of Test Runs ...",
+ "test-runs",
+ )
+
+ run_date, test_suite_name, project_code = run_parentage
run_date = date_service.get_timezoned_timestamp(st.session_state, run_date)
project_service.set_current_project(project_code)
@@ -160,17 +167,17 @@ def render(self, run_id: str, status: str | None = None, test_type: str | None =
@st.cache_data(show_spinner=ALWAYS_SPIN)
-def get_drill_test_run(str_test_run_id):
- str_schema = st.session_state["dbschema"]
- str_sql = f"""
+def get_drill_test_run(test_run_id: str) -> tuple[pd.Timestamp, str, str] | None:
+ schema: str = st.session_state["dbschema"]
+ sql = f"""
SELECT tr.test_starttime as test_date,
ts.test_suite,
ts.project_code
- FROM {str_schema}.test_runs tr
- INNER JOIN {str_schema}.test_suites ts ON tr.test_suite_id = ts.id
- WHERE tr.id = '{str_test_run_id}'::UUID;
+ FROM {schema}.test_runs tr
+ INNER JOIN {schema}.test_suites ts ON tr.test_suite_id = ts.id
+ WHERE tr.id = '{test_run_id}'::UUID;
"""
- df = db.retrieve_data(str_sql)
+ df = db.retrieve_data(sql)
if not df.empty:
return df.at[0, "test_date"], df.at[0, "test_suite"], df.at[0, "project_code"]
From 994e0d9a1f54aa4a9ab845ff49b04ccddb17b294 Mon Sep 17 00:00:00 2001
From: Ricardo Boni
Date: Thu, 19 Sep 2024 15:43:41 -0400
Subject: [PATCH 65/78] fix(test validation): Fixing the test validation
---
.../test_parameter_validation_query.py | 16 +++--
.../commands/run_test_parameter_validation.py | 62 +++++++++++++------
.../dbsetup/060_create_standard_views.sql | 6 +-
.../ex_flag_tests_test_definitions.sql | 58 ++---------------
.../ex_get_test_column_list_tg.sql | 9 ++-
.../ex_prep_flag_tests_test_definitions.sql | 6 ++
.../ex_write_test_val_errors.sql | 5 +-
testgen/ui/views/test_results.py | 6 +-
8 files changed, 79 insertions(+), 89 deletions(-)
create mode 100644 testgen/template/validate_tests/ex_prep_flag_tests_test_definitions.sql
diff --git a/testgen/commands/queries/test_parameter_validation_query.py b/testgen/commands/queries/test_parameter_validation_query.py
index be0c9bc..d34e210 100644
--- a/testgen/commands/queries/test_parameter_validation_query.py
+++ b/testgen/commands/queries/test_parameter_validation_query.py
@@ -10,8 +10,8 @@ class CTestParamValidationSQL:
project_code = ""
test_suite = ""
test_schemas = ""
- missing_columns = ""
- missing_tables = ""
+ message = ""
+ test_ids = [] # noqa
exception_message = ""
flag_val = ""
@@ -29,11 +29,9 @@ def _ReplaceParms(self, strInputString):
strInputString = strInputString.replace("{TEST_RUN_ID}", self.test_run_id)
strInputString = strInputString.replace("{FLAG}", self.flag_val)
strInputString = strInputString.replace("{TEST_SCHEMAS}", self.test_schemas)
- strInputString = strInputString.replace("{MISSING_COLUMNS}", self.missing_columns)
- strInputString = strInputString.replace("{MISSING_TABLES}", self.missing_tables)
strInputString = strInputString.replace("{EXCEPTION_MESSAGE}", self.exception_message)
- strInputString = strInputString.replace("{MISSING_COLUMNS_NO_QUOTES}", self.missing_columns.replace("'", ""))
- strInputString = strInputString.replace("{MISSING_TABLES_NO_QUOTES}", self.missing_tables.replace("'", ""))
+ strInputString = strInputString.replace("{MESSAGE}", self.message)
+ strInputString = strInputString.replace("{CAT_TEST_IDS}", ", ".join(map(str, self.test_ids)))
strInputString = strInputString.replace("{START_TIME}", self.today)
strInputString = strInputString.replace("{NOW}", date_service.get_now_as_string())
@@ -62,6 +60,12 @@ def GetProjectTestValidationColumns(self):
return strQ
+ def PrepFlagTestsWithFailedValidation(self):
+ # Runs on Project DB
+ strQ = self._ReplaceParms(read_template_sql_file("ex_prep_flag_tests_test_definitions.sql", "validate_tests"))
+
+ return strQ
+
def FlagTestsWithFailedValidation(self):
# Runs on Project DB
strQ = self._ReplaceParms(read_template_sql_file("ex_flag_tests_test_definitions.sql", "validate_tests"))
diff --git a/testgen/commands/run_test_parameter_validation.py b/testgen/commands/run_test_parameter_validation.py
index 21f9102..8e93148 100644
--- a/testgen/commands/run_test_parameter_validation.py
+++ b/testgen/commands/run_test_parameter_validation.py
@@ -1,7 +1,15 @@
import logging
+from collections import defaultdict
+from itertools import chain
from testgen.commands.queries.test_parameter_validation_query import CTestParamValidationSQL
-from testgen.common import AssignConnectParms, RetrieveDBResultsToDictList, RetrieveTestExecParms, RunActionQueryList
+from testgen.common import (
+ AssignConnectParms,
+ RetrieveDBResultsToDictList,
+ RetrieveDBResultsToList,
+ RetrieveTestExecParms,
+ RunActionQueryList,
+)
LOG = logging.getLogger("testgen")
@@ -45,15 +53,15 @@ def run_parameter_validation_queries(
# Retrieve Test Column list
LOG.info("CurrentStep: Retrieve Test Columns for Validation")
strColumnList = clsExecute.GetTestValidationColumns(booClean)
- lstTestColumns = RetrieveDBResultsToDictList("DKTG", strColumnList)
+ test_columns, _ = RetrieveDBResultsToList("DKTG", strColumnList)
- if len(lstTestColumns) == 0:
+ if not test_columns:
LOG.warning(f"No test columns are present to validate in Test Suite {strTestSuite}")
missing_columns = []
else:
# Derive test schema list -- make CSV string from list of columns
# to be used as criteria for retrieving data dictionary
- setSchemas = {s["columns"].split(".")[0] for s in lstTestColumns}
+ setSchemas = {col.split(".")[0] for col, _ in test_columns}
strSchemas = ", ".join([f"'{value}'" for value in setSchemas])
LOG.debug("Test column list successfully retrieved")
@@ -71,7 +79,7 @@ def run_parameter_validation_queries(
LOG.debug("Project column list successfully received")
LOG.info("CurrentStep: Compare column sets")
# load results into sets
- result_set1 = {item["columns"].lower() for item in set(lstTestColumns)}
+ result_set1 = {col.lower() for col, _ in test_columns}
result_set2 = {item["columns"].lower() for item in set(lstProjectTestColumns)}
# Check if all columns exist in the table
@@ -80,11 +88,8 @@ def run_parameter_validation_queries(
if len(missing_columns) == 0:
LOG.info("No missing column in Project Column list.")
- strMissingColumns = ", ".join(f"'{x}'" for x in missing_columns)
- srtNoQuoteMissingCols = strMissingColumns.replace("'", "")
-
if missing_columns:
- LOG.debug("Test Columns are missing in target database: %s", srtNoQuoteMissingCols)
+ LOG.debug("Test Columns are missing in target database: %s", ", ".join(missing_columns))
# Extracting schema.tables that are missing from the result sets
tables_set1 = {elem.rsplit(".", 1)[0] for elem in result_set1}
@@ -94,25 +99,46 @@ def run_parameter_validation_queries(
missing_tables = tables_set1.difference(tables_set2)
if missing_tables:
- strMissingtables = ", ".join(f"'{x}'" for x in missing_tables)
+ LOG.info("Missing tables: %s", ", ".join(missing_tables))
else:
LOG.info("No missing tables in Project Column list.")
- strMissingtables = "''"
# Flag test_definitions tests with missing columns:
LOG.info("CurrentStep: Flagging Tests That Failed Validation")
- clsExecute.missing_columns = strMissingColumns
- clsExecute.missing_tables = strMissingtables
+
# Flag Value is D if called from execute_tests_qry.py, otherwise N to disable now
if booRunFromTestExec:
clsExecute.flag_val = "D"
- strTempMessage = "Tests that failed parameter validation have been flagged."
+ LOG.debug("Tests that failed parameter validation will be flagged.")
else:
clsExecute.flag_val = "N"
- strTempMessage = "Tests that failed parameter validation have been set to inactive."
- strFlagTests = clsExecute.FlagTestsWithFailedValidation()
- RunActionQueryList("DKTG", [strFlagTests])
- LOG.debug(strTempMessage)
+ LOG.debug("Tests that failed parameter validation will be deactivated.")
+
+ tests_missing_tables = defaultdict(list)
+ tests_missing_columns = defaultdict(list)
+ for column_name, test_ids in test_columns:
+ column_name = column_name.lower()
+ table_name = column_name.rsplit(".", 1)[0]
+ if table_name in missing_tables:
+ tests_missing_tables[table_name].extend(test_ids)
+ elif column_name in missing_columns:
+ tests_missing_columns[column_name].extend(test_ids)
+
+ clsExecute.test_ids = list(set(chain(*tests_missing_tables.values(), *tests_missing_columns.values())))
+ strPrepFlagTests = clsExecute.PrepFlagTestsWithFailedValidation()
+ RunActionQueryList("DKTG", [strPrepFlagTests])
+
+ for column_name, test_ids in tests_missing_columns.items():
+ clsExecute.message = f"Missing column: {column_name}"
+ clsExecute.test_ids = test_ids
+ strFlagTests = clsExecute.FlagTestsWithFailedValidation()
+ RunActionQueryList("DKTG", [strFlagTests])
+
+ for table_name, test_ids in tests_missing_tables.items():
+ clsExecute.message = f"Missing table: {table_name}"
+ clsExecute.test_ids = test_ids
+ strFlagTests = clsExecute.FlagTestsWithFailedValidation()
+ RunActionQueryList("DKTG", [strFlagTests])
# when run_parameter_validation_queries() is called from execute_tests_query.py:
# we disable tests and write validation errors to test_results table.
diff --git a/testgen/template/dbsetup/060_create_standard_views.sql b/testgen/template/dbsetup/060_create_standard_views.sql
index 2984bcf..9ec8331 100644
--- a/testgen/template/dbsetup/060_create_standard_views.sql
+++ b/testgen/template/dbsetup/060_create_standard_views.sql
@@ -136,14 +136,14 @@ SELECT p.project_name,
(1 - r.result_code)::INTEGER as exception_ct,
CASE
WHEN result_status = 'Warning'
- AND result_message NOT ILIKE 'ERROR - TEST COLUMN MISSING%' THEN 1
+ AND result_message NOT ILIKE 'Inactivated%' THEN 1
END::INTEGER as warning_ct,
CASE
WHEN result_status = 'Failed'
- AND result_message NOT ILIKE 'ERROR - TEST COLUMN MISSING%' THEN 1
+ AND result_message NOT ILIKE 'Inactivated%' THEN 1
END::INTEGER as failed_ct,
CASE
- WHEN result_message ILIKE 'ERROR - TEST COLUMN MISSING%' THEN 1
+ WHEN result_message ILIKE 'Inactivated%' THEN 1
END as execution_error_ct,
p.project_code,
r.table_groups_id,
diff --git a/testgen/template/validate_tests/ex_flag_tests_test_definitions.sql b/testgen/template/validate_tests/ex_flag_tests_test_definitions.sql
index 2ef7689..e9ebc1f 100644
--- a/testgen/template/validate_tests/ex_flag_tests_test_definitions.sql
+++ b/testgen/template/validate_tests/ex_flag_tests_test_definitions.sql
@@ -1,55 +1,7 @@
/*
-Mark Test inactive for Missing columns with update status
+Mark Test inactive for Missing columns/tables with update status
*/
-with test_columns as
- (SELECT DISTINCT schema_name || '.' || table_name || '.' || column_name AS columns
- FROM ( SELECT cat_test_id,
- schema_name,
- table_name,
- UNNEST(STRING_TO_ARRAY(all_columns, '~|~')) AS column_name
- FROM ( SELECT cat_test_id,
- schema_name,
- table_name,
- CONCAT_WS('~|~', column_name,
- groupby_names,
- window_date_column) AS all_columns
- FROM test_definitions d
- INNER JOIN test_types t
- ON d.test_type = t.test_type
- WHERE test_suite_id = '{TEST_SUITE_ID}'
- AND t.test_scope = 'column'
-
- UNION
- SELECT cat_test_id,
- match_schema_name AS schema_name,
- match_table_name AS table_name,
- CONCAT_WS('~|~',
- match_column_names,
- match_groupby_names) AS all_columns
- FROM test_definitions d
- INNER JOIN test_types t
- ON d.test_type = t.test_type
- WHERE test_suite_id = '{TEST_SUITE_ID}'
- AND t.test_scope = 'column') a ) b)
-update test_definitions
-set test_active = '{FLAG}',
- test_definition_status = 'Inactivated {RUN_DATE}: Missing Column'
-where cat_test_id in (select distinct cat_test_id
- from test_columns
- where lower(columns) in
- ({MISSING_COLUMNS}));
-
-
-/*
-Mark Test inactive for Missing table with update status
-*/
-with test_columns as
- (select distinct cat_test_id, schema_name || '.' || table_name || '.' || column_name as columns
- from test_definitions
- where test_suite_id = '{TEST_SUITE_ID}'
- and lower(schema_name || '.' || table_name) in ({MISSING_TABLES}))
-update test_definitions
-set test_active = '{FLAG}',
- test_definition_status = 'Inactivated {RUN_DATE}: Missing Table'
-where cat_test_id in (select distinct cat_test_id
- from test_columns);
+UPDATE test_definitions
+SET test_active = '{FLAG}',
+ test_definition_status = LEFT('Inactivated {RUN_DATE}: ' || CONCAT_WS('; ', substring(test_definition_status from 34), '{MESSAGE}'), 200)
+WHERE cat_test_id IN ({CAT_TEST_IDS});
diff --git a/testgen/template/validate_tests/ex_get_test_column_list_tg.sql b/testgen/template/validate_tests/ex_get_test_column_list_tg.sql
index 318909c..df7bdde 100644
--- a/testgen/template/validate_tests/ex_get_test_column_list_tg.sql
+++ b/testgen/template/validate_tests/ex_get_test_column_list_tg.sql
@@ -1,5 +1,6 @@
-SELECT DISTINCT schema_name || '.' || table_name || '.' || column_name AS columns
- FROM ( SELECT cat_test_id,
+ SELECT schema_name || '.' || table_name || '.' || column_name AS columns,
+ ARRAY_AGG(cat_test_id) as test_id_array
+ FROM (SELECT cat_test_id,
schema_name AS schema_name,
table_name AS table_name,
TRIM(UNNEST(STRING_TO_ARRAY(column_name, ','))) as column_name
@@ -47,4 +48,6 @@ SELECT DISTINCT schema_name || '.' || table_name || '.' || column_name AS column
INNER JOIN test_types t
ON d.test_type = t.test_type
WHERE test_suite_id = '{TEST_SUITE_ID}'
- AND t.test_scope = 'referential' ) cols;
+ AND t.test_scope = 'referential' ) cols
+ WHERE column_name SIMILAR TO '[A-Za-z0-9_]+'
+GROUP BY columns;
diff --git a/testgen/template/validate_tests/ex_prep_flag_tests_test_definitions.sql b/testgen/template/validate_tests/ex_prep_flag_tests_test_definitions.sql
new file mode 100644
index 0000000..d5eb6a2
--- /dev/null
+++ b/testgen/template/validate_tests/ex_prep_flag_tests_test_definitions.sql
@@ -0,0 +1,6 @@
+/*
+Clean the test definition status before it's set with missing tables / columns information
+*/
+UPDATE test_definitions
+SET test_definition_status = NULL
+WHERE cat_test_id IN ({CAT_TEST_IDS});
diff --git a/testgen/template/validate_tests/ex_write_test_val_errors.sql b/testgen/template/validate_tests/ex_write_test_val_errors.sql
index 8546863..b1d47d3 100644
--- a/testgen/template/validate_tests/ex_write_test_val_errors.sql
+++ b/testgen/template/validate_tests/ex_write_test_val_errors.sql
@@ -21,9 +21,8 @@ INSERT INTO test_results
'{TEST_RUN_ID}' as test_run_id,
NULL as input_parameters,
0 as result_code,
- -- TODO: show only missing columns referenced in this test
- left('ERROR - TEST COLUMN MISSING: {MISSING_COLUMNS_NO_QUOTES}', 470) AS result_message,
+ test_definition_status AS result_message,
NULL as result_measure
FROM test_definitions
- WHERE test_active = '-1'
+ WHERE test_active = 'D'
AND test_suite_id = '{TEST_SUITE_ID}';
diff --git a/testgen/ui/views/test_results.py b/testgen/ui/views/test_results.py
index fbbff5e..9a194b4 100644
--- a/testgen/ui/views/test_results.py
+++ b/testgen/ui/views/test_results.py
@@ -225,14 +225,14 @@ def get_test_results_uncached(str_schema, str_run_id, str_sel_test_status, test_
(1 - r.result_code)::INTEGER as exception_ct,
CASE
WHEN result_status = 'Warning'
- AND result_message NOT ILIKE 'ERROR - TEST COLUMN MISSING%%' THEN 1
+ AND result_message NOT ILIKE 'Inactivated%%' THEN 1
END::INTEGER as warning_ct,
CASE
WHEN result_status = 'Failed'
- AND result_message NOT ILIKE 'ERROR - TEST COLUMN MISSING%%' THEN 1
+ AND result_message NOT ILIKE 'Inactivated%%' THEN 1
END::INTEGER as failed_ct,
CASE
- WHEN result_message ILIKE 'ERROR - TEST COLUMN MISSING%%' THEN 1
+ WHEN result_message ILIKE 'Inactivated%%' THEN 1
END as execution_error_ct,
p.project_code, r.table_groups_id::VARCHAR,
r.id::VARCHAR as test_result_id, r.test_run_id::VARCHAR,
From dd5f043e43c8bfa0498c133949871debc0f4c553 Mon Sep 17 00:00:00 2001
From: Ricardo Boni
Date: Tue, 24 Sep 2024 12:09:29 -0400
Subject: [PATCH 66/78] Revert "feat(ui): bind grid selection to query params
on result pages"
This reverts commit 2d73a57ba5173e9df1633e2e047d106cfdb08af6.
---
testgen/ui/services/form_service.py | 28 +++----------------------
testgen/ui/views/profiling_anomalies.py | 8 ++-----
testgen/ui/views/profiling_results.py | 6 +-----
testgen/ui/views/test_results.py | 9 ++------
4 files changed, 8 insertions(+), 43 deletions(-)
diff --git a/testgen/ui/services/form_service.py b/testgen/ui/services/form_service.py
index e1810f3..a1a56de 100644
--- a/testgen/ui/services/form_service.py
+++ b/testgen/ui/services/form_service.py
@@ -19,7 +19,6 @@
import testgen.common.date_service as date_service
import testgen.ui.services.authentication_service as authentication_service
import testgen.ui.services.database_service as db
-from testgen.ui.navigation.router import Router
"""
Shared rendering of UI elements
@@ -767,10 +766,8 @@ def render_grid_select(
str_prompt=None,
int_height=400,
do_multi_select=False,
- bind_to_query=None,
show_column_headers=None,
render_highlights=True,
- key="aggrid",
):
show_prompt(str_prompt)
@@ -844,18 +841,7 @@ def render_grid_select(
gb = GridOptionsBuilder.from_dataframe(df)
selection_mode = "multiple" if do_multi_select else "single"
-
- pre_selected_rows = None
- if bind_to_query:
- query_value = st.query_params.get(bind_to_query)
- # Workaround for this open issue: https://github.com/PablocFonseca/streamlit-aggrid/issues/207#issuecomment-1793039564
- pre_selected_rows = { query_value: True } if isinstance(query_value, str) and query_value.isdigit() else None
-
- gb.configure_selection(
- selection_mode=selection_mode,
- use_checkbox=do_multi_select,
- pre_selected_rows=pre_selected_rows,
- )
+ gb.configure_selection(selection_mode=selection_mode, use_checkbox=do_multi_select)
all_columns = list(df.columns)
@@ -910,18 +896,10 @@ def render_grid_select(
"padding-bottom": "0px !important",
}
},
- # Key is needed for query binding to work
- # Changing selection mode does not work if same key is used for both modes
- key=f"{key}_{selection_mode}",
)
- selected_rows = grid_data["selected_rows"]
- if bind_to_query:
- Router().set_query_params({
- bind_to_query: selected_rows[0].get("_selectedRowNodeInfo", {}).get("nodeRowIndex") if len(selected_rows) else None,
- })
- if len(selected_rows):
- return selected_rows
+ if len(grid_data["selected_rows"]):
+ return grid_data["selected_rows"]
def render_logo(logo_path: str = logo_file):
diff --git a/testgen/ui/views/profiling_anomalies.py b/testgen/ui/views/profiling_anomalies.py
index c94c2bf..b81e158 100644
--- a/testgen/ui/views/profiling_anomalies.py
+++ b/testgen/ui/views/profiling_anomalies.py
@@ -132,11 +132,7 @@ def render(self, run_id: str, issue_class: str | None = None, issue_type: str |
# Show main grid and retrieve selections
selected = fm.render_grid_select(
- df_pa,
- lst_show_columns,
- int_height=400,
- do_multi_select=do_multi_select,
- bind_to_query="selected",
+ df_pa, lst_show_columns, int_height=400, do_multi_select=do_multi_select
)
with export_button_column:
@@ -158,7 +154,7 @@ def render(self, run_id: str, issue_class: str | None = None, issue_type: str |
if selected:
# Always show details for last selected row
- selected_row = selected[0]
+ selected_row = selected[len(selected) - 1]
else:
selected_row = None
diff --git a/testgen/ui/views/profiling_results.py b/testgen/ui/views/profiling_results.py
index e0a38f0..aa94ae6 100644
--- a/testgen/ui/views/profiling_results.py
+++ b/testgen/ui/views/profiling_results.py
@@ -105,11 +105,7 @@ def render(self, run_id: str, table_name: str | None = None, column_name: str |
with st.expander("📜 **Table CREATE script with suggested datatypes**"):
st.code(generate_create_script(df), "sql")
- selected_row = fm.render_grid_select(
- df,
- show_columns,
- bind_to_query="selected",
- )
+ selected_row = fm.render_grid_select(df, show_columns)
with export_button_column:
testgen.flex_row_end()
diff --git a/testgen/ui/views/test_results.py b/testgen/ui/views/test_results.py
index fbbff5e..14990c3 100644
--- a/testgen/ui/views/test_results.py
+++ b/testgen/ui/views/test_results.py
@@ -667,12 +667,7 @@ def show_result_detail(str_run_id, str_sel_test_status, test_type_id, sorting_co
]
selected_rows = fm.render_grid_select(
- df,
- lst_show_columns,
- do_multi_select=do_multi_select,
- show_column_headers=lst_show_headers,
- key="grid:test-results",
- bind_to_query="selected",
+ df, lst_show_columns, do_multi_select=do_multi_select, show_column_headers=lst_show_headers
)
with export_container:
@@ -717,7 +712,7 @@ def show_result_detail(str_run_id, str_sel_test_status, test_type_id, sorting_co
if not selected_rows:
st.markdown(":orange[Select a record to see more information.]")
else:
- selected_row = selected_rows[0]
+ selected_row = selected_rows[len(selected_rows) - 1]
dfh = get_test_result_history(
selected_row["test_type"],
selected_row["test_suite_id"],
From e7494f366be0e2e33d747b87baaff3f993ca74a7 Mon Sep 17 00:00:00 2001
From: Ricardo Boni
Date: Tue, 24 Sep 2024 12:26:13 -0400
Subject: [PATCH 67/78] fix(profiling results): Disabling sorting was making
the page crash
---
testgen/ui/queries/profiling_queries.py | 14 ++++++++------
1 file changed, 8 insertions(+), 6 deletions(-)
diff --git a/testgen/ui/queries/profiling_queries.py b/testgen/ui/queries/profiling_queries.py
index e22d711..dc93496 100644
--- a/testgen/ui/queries/profiling_queries.py
+++ b/testgen/ui/queries/profiling_queries.py
@@ -81,11 +81,13 @@ def lookup_db_parentage_from_run(profile_run_id: str) -> tuple[pd.Timestamp, str
@st.cache_data(show_spinner="Retrieving Data")
def get_profiling_detail(str_profile_run_id, str_table_name, str_column_name, sorting_columns = None):
str_schema = st.session_state["dbschema"]
- sorting_columns_str = (
- "p.schema_name, p.table_name, position"
- if sorting_columns is None
- else ", ".join(" ".join(col) for col in sorting_columns)
- )
+ if sorting_columns is None:
+ order_by_str = "ORDER BY p.schema_name, p.table_name, position"
+ elif len(sorting_columns):
+ order_by_str = "ORDER BY " + ", ".join(" ".join(col) for col in sorting_columns)
+ else:
+ order_by_str = ""
+
str_sql = f"""
SELECT -- Identifiers
id::VARCHAR, dk_id,
@@ -147,7 +149,7 @@ def get_profiling_detail(str_profile_run_id, str_table_name, str_column_name, so
WHERE p.profile_run_id = '{str_profile_run_id}'::UUID
AND p.table_name ILIKE '{str_table_name}'
AND p.column_name ILIKE '{str_column_name}'
- ORDER BY {sorting_columns_str};
+ {order_by_str};
"""
return db.retrieve_data(str_sql)
From e24d221d5bb5bbdc0a120c7fcec6fe18929e998c Mon Sep 17 00:00:00 2001
From: Aarthy Adityan
Date: Tue, 24 Sep 2024 12:54:10 -0400
Subject: [PATCH 68/78] fix(ui): use consistent labels for test result status
---
testgen/ui/views/overview.py | 8 ++++----
testgen/ui/views/test_results.py | 22 +++++++++++-----------
testgen/ui/views/test_runs.py | 4 ++--
testgen/ui/views/test_suites.py | 4 ++--
4 files changed, 19 insertions(+), 19 deletions(-)
diff --git a/testgen/ui/views/overview.py b/testgen/ui/views/overview.py
index 8e01482..2423fef 100644
--- a/testgen/ui/views/overview.py
+++ b/testgen/ui/views/overview.py
@@ -141,9 +141,9 @@ def render_table_group_card(table_group: pd.Series, project_code: str, key: int)
testgen.summary_bar(
items=[
{ "label": "Passed", "value": passed_tests, "color": "green" },
- { "label": "Warnings", "value": to_int(table_group["latest_tests_warning_ct"]), "color": "yellow" },
+ { "label": "Warning", "value": to_int(table_group["latest_tests_warning_ct"]), "color": "yellow" },
{ "label": "Failed", "value": to_int(table_group["latest_tests_failed_ct"]), "color": "red" },
- { "label": "Errors", "value": to_int(table_group["latest_tests_error_ct"]), "color": "brown" },
+ { "label": "Error", "value": to_int(table_group["latest_tests_error_ct"]), "color": "brown" },
{ "label": "Dismissed", "value": to_int(table_group["latest_tests_dismissed_ct"]), "color": "grey" },
],
key=f"tests_{key}",
@@ -208,9 +208,9 @@ def render_test_suite_item(test_suite: pd.Series, column_spec: list[int], key: i
testgen.summary_bar(
items=[
{ "label": "Passed", "value": to_int(test_suite["last_run_passed_ct"]), "color": "green" },
- { "label": "Warnings", "value": to_int(test_suite["last_run_warning_ct"]), "color": "yellow" },
+ { "label": "Warning", "value": to_int(test_suite["last_run_warning_ct"]), "color": "yellow" },
{ "label": "Failed", "value": to_int(test_suite["last_run_failed_ct"]), "color": "red" },
- { "label": "Errors", "value": to_int(test_suite["last_run_error_ct"]), "color": "brown" },
+ { "label": "Error", "value": to_int(test_suite["last_run_error_ct"]), "color": "brown" },
{ "label": "Dismissed", "value": to_int(test_suite["last_run_dismissed_ct"]), "color": "grey" },
],
key=f"tests_{key}",
diff --git a/testgen/ui/views/test_results.py b/testgen/ui/views/test_results.py
index 14990c3..abf26f4 100644
--- a/testgen/ui/views/test_results.py
+++ b/testgen/ui/views/test_results.py
@@ -62,14 +62,14 @@ def render(self, run_id: str, status: str | None = None, test_type: str | None =
with status_filter_column:
status_options = [
- "Failures and Warnings",
- "Failed Tests",
- "Tests with Warnings",
- "Passed Tests",
+ "Failed + Warning",
+ "Failed",
+ "Warning",
+ "Passed",
]
status = testgen.toolbar_select(
options=status_options,
- default_value=status or "Failures and Warnings",
+ default_value=status or "Failed + Warning",
required=False,
bind_to_query="status",
label="Result Status",
@@ -104,13 +104,13 @@ def render(self, run_id: str, status: str | None = None, test_type: str | None =
do_multi_select = st.toggle("Multi-Select", help=str_help)
match status:
- case "Failures and Warnings":
+ case "Failed + Warning":
status = "'Failed','Warning'"
- case "Failed Tests":
+ case "Failed":
status = "'Failed'"
- case "Tests with Warnings":
+ case "Warning":
status = "'Warning'"
- case "Passed Tests":
+ case "Passed":
status = "'Passed'"
# Display main grid and retrieve selection
@@ -341,9 +341,9 @@ def get_test_result_summary(run_id):
return [
{ "label": "Passed", "value": int(df.at[0, "passed_ct"]), "color": "green" },
- { "label": "Warnings", "value": int(df.at[0, "warning_ct"]), "color": "yellow" },
+ { "label": "Warning", "value": int(df.at[0, "warning_ct"]), "color": "yellow" },
{ "label": "Failed", "value": int(df.at[0, "failed_ct"]), "color": "red" },
- { "label": "Errors", "value": int(df.at[0, "error_ct"]), "color": "brown" },
+ { "label": "Error", "value": int(df.at[0, "error_ct"]), "color": "brown" },
{ "label": "Dismissed", "value": int(df.at[0, "dismissed_ct"]), "color": "grey" },
]
diff --git a/testgen/ui/views/test_runs.py b/testgen/ui/views/test_runs.py
index eb972dd..0c26007 100644
--- a/testgen/ui/views/test_runs.py
+++ b/testgen/ui/views/test_runs.py
@@ -144,9 +144,9 @@ def render_test_run_row(test_run: pd.Series, column_spec: list[int]) -> None:
testgen.summary_bar(
items=[
{ "label": "Passed", "value": to_int(test_run["passed_ct"]), "color": "green" },
- { "label": "Warnings", "value": to_int(test_run["warning_ct"]), "color": "yellow" },
+ { "label": "Warning", "value": to_int(test_run["warning_ct"]), "color": "yellow" },
{ "label": "Failed", "value": to_int(test_run["failed_ct"]), "color": "red" },
- { "label": "Errors", "value": to_int(test_run["error_ct"]), "color": "brown" },
+ { "label": "Error", "value": to_int(test_run["error_ct"]), "color": "brown" },
{ "label": "Dismissed", "value": to_int(test_run["dismissed_ct"]), "color": "grey" },
],
height=10,
diff --git a/testgen/ui/views/test_suites.py b/testgen/ui/views/test_suites.py
index 0024de7..2bf5516 100644
--- a/testgen/ui/views/test_suites.py
+++ b/testgen/ui/views/test_suites.py
@@ -122,9 +122,9 @@ def render(self, project_code: str | None = None, table_group_id: str | None = N
testgen.summary_bar(
items=[
{ "label": "Passed", "value": to_int(test_suite["last_run_passed_ct"]), "color": "green" },
- { "label": "Warnings", "value": to_int(test_suite["last_run_warning_ct"]), "color": "yellow" },
+ { "label": "Warning", "value": to_int(test_suite["last_run_warning_ct"]), "color": "yellow" },
{ "label": "Failed", "value": to_int(test_suite["last_run_failed_ct"]), "color": "red" },
- { "label": "Errors", "value": to_int(test_suite["last_run_error_ct"]), "color": "brown" },
+ { "label": "Error", "value": to_int(test_suite["last_run_error_ct"]), "color": "brown" },
{ "label": "Dismissed", "value": to_int(test_suite["last_run_dismissed_ct"]), "color": "grey" },
],
height=20,
From bf7ac1bc36012cc2ad9350f8b275bd9f9e444c2d Mon Sep 17 00:00:00 2001
From: Aarthy Adityan
Date: Tue, 24 Sep 2024 14:11:50 -0400
Subject: [PATCH 69/78] fix(ui): stop disposition buttons from glitching when
clicked
---
testgen/ui/views/profiling_anomalies.py | 53 +++++++++----------------
testgen/ui/views/test_definitions.py | 52 +++++++++---------------
testgen/ui/views/test_results.py | 53 +++++++++----------------
3 files changed, 57 insertions(+), 101 deletions(-)
diff --git a/testgen/ui/views/profiling_anomalies.py b/testgen/ui/views/profiling_anomalies.py
index b81e158..60a6fc1 100644
--- a/testgen/ui/views/profiling_anomalies.py
+++ b/testgen/ui/views/profiling_anomalies.py
@@ -197,41 +197,26 @@ def render(self, run_id: str, issue_class: str | None = None, issue_type: str |
if "r.disposition" in dict(sorting_columns):
cached_functions.append(get_profiling_anomalies)
+ disposition_actions = [
+ { "icon": "✓", "help": "Confirm this issue as relevant for this run", "status": "Confirmed" },
+ { "icon": "✘", "help": "Dismiss this issue as not relevant for this run", "status": "Dismissed" },
+ { "icon": "🔇", "help": "Mute this test to deactivate it for future runs", "status": "Inactive" },
+ { "icon": "↩︎", "help": "Clear action", "status": "No Decision" },
+ ]
+
# Need to render toolbar buttons after grid, so selection status is maintained
- if actions_column.button(
- "✓", help="Confirm this issue as relevant for this run", disabled=not selected
- ):
- fm.reset_post_updates(
- do_disposition_update(selected, "Confirmed"),
- as_toast=True,
- clear_cache=True,
- lst_cached_functions=cached_functions,
- )
- if actions_column.button(
- "✘", help="Dismiss this issue as not relevant for this run", disabled=not selected
- ):
- fm.reset_post_updates(
- do_disposition_update(selected, "Dismissed"),
- as_toast=True,
- clear_cache=True,
- lst_cached_functions=cached_functions,
- )
- if actions_column.button(
- "🔇", help="Mute this test to deactivate it for future runs", disabled=not selected
- ):
- fm.reset_post_updates(
- do_disposition_update(selected, "Inactive"),
- as_toast=True,
- clear_cache=True,
- lst_cached_functions=cached_functions,
- )
- if actions_column.button("↩︎", help="Clear action", disabled=not selected):
- fm.reset_post_updates(
- do_disposition_update(selected, "No Decision"),
- as_toast=True,
- clear_cache=True,
- lst_cached_functions=cached_functions,
- )
+ for action in disposition_actions:
+ action["button"] = actions_column.button(action["icon"], help=action["help"], disabled=not selected)
+
+ # This has to be done as a second loop - otherwise, the rest of the buttons after the clicked one are not displayed briefly while refreshing
+ for action in disposition_actions:
+ if action["button"]:
+ fm.reset_post_updates(
+ do_disposition_update(selected, action["status"]),
+ as_toast=True,
+ clear_cache=True,
+ lst_cached_functions=cached_functions,
+ )
else:
st.markdown(":green[**No Hygiene Issues Found**]")
diff --git a/testgen/ui/views/test_definitions.py b/testgen/ui/views/test_definitions.py
index 7921b58..f3d17eb 100644
--- a/testgen/ui/views/test_definitions.py
+++ b/testgen/ui/views/test_definitions.py
@@ -91,39 +91,25 @@ def render(self, test_suite_id: str, table_name: str | None = None, column_name:
)
fm.render_refresh_button(table_actions_column)
- # Display buttons
- if disposition_column.button("✓", help="Activate for future runs", disabled=not selected):
- fm.reset_post_updates(
- update_test_definition(selected, "test_active", True, "Activated"),
- as_toast=True,
- clear_cache=True,
- lst_cached_functions=[],
- )
- if disposition_column.button("✘", help="Inactivate Test for future runs", disabled=not selected):
- fm.reset_post_updates(
- update_test_definition(selected, "test_active", False, "Inactivated"),
- as_toast=True,
- clear_cache=True,
- lst_cached_functions=[],
- )
- if disposition_column.button(
- "🔒", help="Protect from future test generation", disabled=not selected
- ):
- fm.reset_post_updates(
- update_test_definition(selected, "lock_refresh", True, "Locked"),
- as_toast=True,
- clear_cache=True,
- lst_cached_functions=[],
- )
- if disposition_column.button(
- "🔐", help="Unlock for future test generation", disabled=not selected
- ):
- fm.reset_post_updates(
- update_test_definition(selected, "lock_refresh", False, "Unlocked"),
- as_toast=True,
- clear_cache=True,
- lst_cached_functions=[],
- )
+ disposition_actions = [
+ { "icon": "✓", "help": "Activate for future runs", "attribute": "test_active", "value": True, "message": "Activated" },
+ { "icon": "✘", "help": "Inactivate Test for future runs", "attribute": "test_active", "value": False, "message": "Inactivated" },
+ { "icon": "🔒", "help": "Protect from future test generation", "attribute": "lock_refresh", "value": True, "message": "Locked" },
+ { "icon": "🔐", "help": "Unlock for future test generation", "attribute": "lock_refresh", "value": False, "message": "Unlocked" },
+ ]
+
+ for action in disposition_actions:
+ action["button"] = disposition_column.button(action["icon"], help=action["help"], disabled=not selected)
+
+ # This has to be done as a second loop - otherwise, the rest of the buttons after the clicked one are not displayed briefly while refreshing
+ for action in disposition_actions:
+ if action["button"]:
+ fm.reset_post_updates(
+ update_test_definition(selected, action["attribute"], action["value"], action["message"]),
+ as_toast=True,
+ clear_cache=True,
+ lst_cached_functions=[],
+ )
if selected:
selected_test_def = selected[0]
diff --git a/testgen/ui/views/test_results.py b/testgen/ui/views/test_results.py
index abf26f4..f77e932 100644
--- a/testgen/ui/views/test_results.py
+++ b/testgen/ui/views/test_results.py
@@ -125,40 +125,25 @@ def render(self, run_id: str, status: str | None = None, test_type: str | None =
if "r.disposition" in dict(sorting_columns):
affected_cached_functions.append(get_test_results)
- if actions_column.button(
- "✓", help="Confirm this issue as relevant for this run", disabled=disable_dispo
- ):
- fm.reset_post_updates(
- do_disposition_update(selected, "Confirmed"),
- as_toast=True,
- clear_cache=True,
- lst_cached_functions=affected_cached_functions,
- )
- if actions_column.button(
- "✘", help="Dismiss this issue as not relevant for this run", disabled=disable_dispo
- ):
- fm.reset_post_updates(
- do_disposition_update(selected, "Dismissed"),
- as_toast=True,
- clear_cache=True,
- lst_cached_functions=affected_cached_functions,
- )
- if actions_column.button(
- "🔇", help="Mute this test to deactivate it for future runs", disabled=not selected
- ):
- fm.reset_post_updates(
- do_disposition_update(selected, "Inactive"),
- as_toast=True,
- clear_cache=True,
- lst_cached_functions=affected_cached_functions,
- )
- if actions_column.button("⟲", help="Clear action", disabled=not selected):
- fm.reset_post_updates(
- do_disposition_update(selected, "No Decision"),
- as_toast=True,
- clear_cache=True,
- lst_cached_functions=affected_cached_functions,
- )
+ disposition_actions = [
+ { "icon": "✓", "help": "Confirm this issue as relevant for this run", "status": "Confirmed" },
+ { "icon": "✘", "help": "Dismiss this issue as not relevant for this run", "status": "Dismissed" },
+ { "icon": "🔇", "help": "Mute this test to deactivate it for future runs", "status": "Inactive" },
+ { "icon": "↩︎", "help": "Clear action", "status": "No Decision" },
+ ]
+
+ for action in disposition_actions:
+ action["button"] = actions_column.button(action["icon"], help=action["help"], disabled=disable_dispo)
+
+ # This has to be done as a second loop - otherwise, the rest of the buttons after the clicked one are not displayed briefly while refreshing
+ for action in disposition_actions:
+ if action["button"]:
+ fm.reset_post_updates(
+ do_disposition_update(selected, action["status"]),
+ as_toast=True,
+ clear_cache=True,
+ lst_cached_functions=affected_cached_functions,
+ )
# Help Links
st.markdown(
From 7c887cfe60f1e635e1d27dce2015fb43152956ac Mon Sep 17 00:00:00 2001
From: Aarthy Adityan
Date: Tue, 24 Sep 2024 18:19:02 -0400
Subject: [PATCH 70/78] fix(css): override streamlit's default colors for
buttons and form inputs
---
testgen/ui/assets/style.css | 16 +++++++++-------
1 file changed, 9 insertions(+), 7 deletions(-)
diff --git a/testgen/ui/assets/style.css b/testgen/ui/assets/style.css
index b5eec64..5b8386e 100644
--- a/testgen/ui/assets/style.css
+++ b/testgen/ui/assets/style.css
@@ -87,22 +87,24 @@ div[data-testid="stDialog"] div[role="dialog"] {
/* */
/* Theming for buttons and form inputs */
-button[data-testid="baseButton-secondary"]:hover,
-button[data-testid="baseButton-secondary"]:focus:not(:active),
-button[data-testid="baseButton-secondaryFormSubmit"]:hover,
-button[data-testid="baseButton-secondaryFormSubmit"]:focus:not(:active) {
+button[data-testid="stBaseButton-secondary"]:hover,
+button[data-testid="stBaseButton-secondary"]:focus:not(:active),
+button[data-testid="stBaseButton-secondaryFormSubmit"]:hover,
+button[data-testid="stBaseButton-secondaryFormSubmit"]:focus:not(:active) {
border-color: var(--primary-color);
color: var(--primary-color);
}
-button[data-testid="baseButton-secondary"]:active,
-button[data-testid="baseButton-secondaryFormSubmit"]:active,
+button[data-testid="stBaseButton-secondary"]:active,
+button[data-testid="stBaseButton-secondaryFormSubmit"]:active,
label[data-baseweb="checkbox"]:has(input[aria-checked="true"]) > span {
border-color: var(--primary-color);
background-color: var(--primary-color);
}
-div[data-testid="stTextInput-RootElement"]:focus-within,
+div[data-testid="stTextInputRootElement"]:focus-within,
+div[data-testid="stNumberInputContainer"]:focus-within,
+div[data-baseweb="select"]:focus-within > div,
div[data-baseweb="select"] > div:has(input[aria-expanded="true"]) {
border-color: var(--primary-color);
}
From d748c348de6c95a23bcb7ddb4fa4bbef4c01356e Mon Sep 17 00:00:00 2001
From: Aarthy Adityan
Date: Wed, 25 Sep 2024 00:24:41 -0400
Subject: [PATCH 71/78] fix(ui): hide actions not accessible to current user
role
---
testgen/ui/services/form_service.py | 3 +-
testgen/ui/views/connections.py | 2 +-
testgen/ui/views/table_groups.py | 2 +-
testgen/ui/views/test_definitions.py | 7 +-
testgen/ui/views/test_results.py | 5 +-
testgen/ui/views/test_suites.py | 99 ++++++++++++++--------------
6 files changed, 62 insertions(+), 56 deletions(-)
diff --git a/testgen/ui/services/form_service.py b/testgen/ui/services/form_service.py
index a1a56de..ba07527 100644
--- a/testgen/ui/services/form_service.py
+++ b/testgen/ui/services/form_service.py
@@ -631,6 +631,7 @@ def render_edit_form(
lst_key_columns,
lst_disabled=None,
str_text_display=None,
+ submit_disabled=False,
form_unique_key: str | None = None,
):
show_header(str_form_name)
@@ -687,7 +688,7 @@ def render_edit_form(
else:
# If Hidden, add directly to dct_mods for updates
dct_mods[column] = row_selected[column]
- edit_allowed = authentication_service.current_user_has_edit_role()
+ edit_allowed = not submit_disabled and authentication_service.current_user_has_edit_role()
submit = st.form_submit_button("Save Changes", disabled=not edit_allowed)
if submit and edit_allowed:
diff --git a/testgen/ui/views/connections.py b/testgen/ui/views/connections.py
index 0f6fc5f..07a5031 100644
--- a/testgen/ui/views/connections.py
+++ b/testgen/ui/views/connections.py
@@ -23,7 +23,7 @@ class ConnectionsPage(Page):
can_activate: typing.ClassVar = [
lambda: session.authentication_status,
]
- menu_item = MenuItem(icon="database", label="Data Configuration", order=3)
+ menu_item = MenuItem(icon="database", label="Data Configuration", order=4)
def render(self, project_code: str, **_kwargs) -> None:
dataframe = connection_service.get_connections(project_code)
diff --git a/testgen/ui/views/table_groups.py b/testgen/ui/views/table_groups.py
index 7403743..418acb0 100644
--- a/testgen/ui/views/table_groups.py
+++ b/testgen/ui/views/table_groups.py
@@ -20,8 +20,8 @@
class TableGroupsPage(Page):
path = "connections:table-groups"
can_activate: typing.ClassVar = [
- lambda: authentication_service.current_user_has_admin_role() or "overview",
lambda: session.authentication_status,
+ lambda: authentication_service.current_user_has_admin_role(),
lambda: "connection_id" in session.current_page_args or "connections",
]
diff --git a/testgen/ui/views/test_definitions.py b/testgen/ui/views/test_definitions.py
index f3d17eb..795f319 100644
--- a/testgen/ui/views/test_definitions.py
+++ b/testgen/ui/views/test_definitions.py
@@ -39,6 +39,7 @@ def render(self, test_suite_id: str, table_name: str | None = None, column_name:
table_group = table_group_service.get_by_id(test_suite["table_groups_id"])
project_code = table_group["project_code"]
project_service.set_current_project(project_code)
+ user_can_edit = authentication_service.current_user_has_edit_role()
testgen.page_header(
"Test Definitions",
@@ -80,7 +81,7 @@ def render(self, test_suite_id: str, table_name: str | None = None, column_name:
str_help = "Toggle on to perform actions on multiple test definitions"
do_multi_select = st.toggle("Multi-Select", help=str_help)
- if actions_column.button(
+ if user_can_edit and actions_column.button(
":material/add: Add", help="Add a new Test Definition"
):
add_test_dialog(project_code, table_group, test_suite, table_name, column_name)
@@ -114,14 +115,14 @@ def render(self, test_suite_id: str, table_name: str | None = None, column_name:
if selected:
selected_test_def = selected[0]
- if actions_column.button(
+ if user_can_edit and actions_column.button(
":material/edit: Edit",
help="Edit the Test Definition",
disabled=not selected,
):
edit_test_dialog(project_code, table_group, test_suite, table_name, column_name, selected_test_def)
- if actions_column.button(
+ if user_can_edit and actions_column.button(
":material/delete: Delete",
help="Delete the selected Test Definition",
disabled=not selected,
diff --git a/testgen/ui/views/test_results.py b/testgen/ui/views/test_results.py
index f77e932..2c58271 100644
--- a/testgen/ui/views/test_results.py
+++ b/testgen/ui/views/test_results.py
@@ -12,7 +12,7 @@
from testgen.common import ConcatColumnList, date_service
from testgen.ui.components import widgets as testgen
from testgen.ui.navigation.page import Page
-from testgen.ui.services import project_service
+from testgen.ui.services import authentication_service, project_service
from testgen.ui.services.string_service import empty_if_null
from testgen.ui.session import session
from testgen.ui.views.profiling_modal import view_profiling_button
@@ -715,7 +715,8 @@ def show_result_detail(str_run_id, str_sel_test_status, test_type_id, sorting_co
with pg_col2:
v_col1, v_col2, v_col3 = st.columns([0.33, 0.33, 0.33])
- view_edit_test(v_col1, selected_row["test_definition_id_current"])
+ if authentication_service.current_user_has_edit_role():
+ view_edit_test(v_col1, selected_row["test_definition_id_current"])
if selected_row["test_scope"] == "column":
view_profiling_button(
v_col2, selected_row["table_name"], selected_row["column_names"],
diff --git a/testgen/ui/views/test_suites.py b/testgen/ui/views/test_suites.py
index 2bf5516..25d8cdc 100644
--- a/testgen/ui/views/test_suites.py
+++ b/testgen/ui/views/test_suites.py
@@ -24,10 +24,9 @@
class TestSuitesPage(Page):
path = "test-suites"
can_activate: typing.ClassVar = [
- lambda: authentication_service.current_user_has_admin_role() or "overview",
lambda: session.authentication_status,
]
- menu_item = MenuItem(icon="list_alt", label="Test Suites", order=4)
+ menu_item = MenuItem(icon="list_alt", label="Test Suites", order=3)
def render(self, project_code: str | None = None, table_group_id: str | None = None, **_kwargs) -> None:
project_code = st.session_state["project"]
@@ -52,43 +51,46 @@ def render(self, project_code: str | None = None, table_group_id: str | None = N
)
df = test_suite_service.get_by_project(project_code, table_group_id)
-
- with actions_column:
- st.button(
- ":material/add: Add Test Suite",
- key="test_suite:keys:add",
- help="Add a new test suite",
- on_click=lambda: add_test_suite_dialog(project_code, df_tg),
- )
+ user_can_edit = authentication_service.current_user_has_edit_role()
+
+ if user_can_edit:
+ with actions_column:
+ st.button(
+ ":material/add: Add Test Suite",
+ key="test_suite:keys:add",
+ help="Add a new test suite",
+ on_click=lambda: add_test_suite_dialog(project_code, df_tg),
+ )
for _, test_suite in df.iterrows():
subtitle = f"{test_suite['connection_name']} > {test_suite['table_groups_name']}"
with testgen.card(title=test_suite["test_suite"], subtitle=subtitle) as test_suite_card:
- with test_suite_card.actions:
- testgen.button(
- type_="icon",
- icon="output",
- tooltip="Export results to Observability",
- tooltip_position="right",
- on_click=partial(observability_export_dialog, test_suite),
- key=f"test_suite:keys:export:{test_suite['id']}",
- )
- testgen.button(
- type_="icon",
- icon="edit",
- tooltip="Edit test suite",
- tooltip_position="right",
- on_click=partial(edit_test_suite_dialog, project_code, df_tg, test_suite),
- key=f"test_suite:keys:edit:{test_suite['id']}",
- )
- testgen.button(
- type_="icon",
- icon="delete",
- tooltip="Delete test suite",
- tooltip_position="right",
- on_click=partial(delete_test_suite_dialog, test_suite),
- key=f"test_suite:keys:delete:{test_suite['id']}",
- )
+ if user_can_edit:
+ with test_suite_card.actions:
+ testgen.button(
+ type_="icon",
+ icon="output",
+ tooltip="Export results to Observability",
+ tooltip_position="right",
+ on_click=partial(observability_export_dialog, test_suite),
+ key=f"test_suite:keys:export:{test_suite['id']}",
+ )
+ testgen.button(
+ type_="icon",
+ icon="edit",
+ tooltip="Edit test suite",
+ tooltip_position="right",
+ on_click=partial(edit_test_suite_dialog, project_code, df_tg, test_suite),
+ key=f"test_suite:keys:edit:{test_suite['id']}",
+ )
+ testgen.button(
+ type_="icon",
+ icon="delete",
+ tooltip="Delete test suite",
+ tooltip_position="right",
+ on_click=partial(delete_test_suite_dialog, test_suite),
+ key=f"test_suite:keys:delete:{test_suite['id']}",
+ )
main_section, latest_run_section, actions_section = st.columns([.4, .4, .2])
@@ -134,19 +136,20 @@ def render(self, project_code: str | None = None, table_group_id: str | None = N
else:
st.markdown("--")
- with actions_section:
- testgen.button(
- type_="stroked",
- label="Run Tests",
- on_click=partial(run_tests_dialog, project_code, test_suite),
- key=f"test_suite:keys:runtests:{test_suite['id']}",
- )
- testgen.button(
- type_="stroked",
- label="Generate Tests",
- on_click=partial(generate_tests_dialog, test_suite),
- key=f"test_suite:keys:generatetests:{test_suite['id']}",
- )
+ if user_can_edit:
+ with actions_section:
+ testgen.button(
+ type_="stroked",
+ label="Run Tests",
+ on_click=partial(run_tests_dialog, project_code, test_suite),
+ key=f"test_suite:keys:runtests:{test_suite['id']}",
+ )
+ testgen.button(
+ type_="stroked",
+ label="Generate Tests",
+ on_click=partial(generate_tests_dialog, test_suite),
+ key=f"test_suite:keys:generatetests:{test_suite['id']}",
+ )
@st.cache_data(show_spinner=False)
From d74871cb84b8f92f75e21ec4d94395ec5e377ab6 Mon Sep 17 00:00:00 2001
From: Aarthy Adityan
Date: Wed, 25 Sep 2024 20:53:51 -0400
Subject: [PATCH 72/78] fix(ui): workaround for form inputs disappearing in
dialogs when button clicked
---
testgen/ui/assets/style.css | 8 +++++-
testgen/ui/views/connections.py | 33 ++++++++++++++++---------
testgen/ui/views/overview.py | 2 +-
testgen/ui/views/profiling_anomalies.py | 2 +-
testgen/ui/views/profiling_modal.py | 2 +-
testgen/ui/views/test_definitions.py | 6 +++++
testgen/ui/views/test_results.py | 2 +-
7 files changed, 38 insertions(+), 17 deletions(-)
diff --git a/testgen/ui/assets/style.css b/testgen/ui/assets/style.css
index 5b8386e..e183bf9 100644
--- a/testgen/ui/assets/style.css
+++ b/testgen/ui/assets/style.css
@@ -86,7 +86,7 @@ div[data-testid="stDialog"] div[role="dialog"] {
}
/* */
-/* Theming for buttons and form inputs */
+/* Theming for buttons, tabs and form inputs */
button[data-testid="stBaseButton-secondary"]:hover,
button[data-testid="stBaseButton-secondary"]:focus:not(:active),
button[data-testid="stBaseButton-secondaryFormSubmit"]:hover,
@@ -109,6 +109,12 @@ div[data-baseweb="select"] > div:has(input[aria-expanded="true"]) {
border-color: var(--primary-color);
}
+button[data-testid="stTab"][aria-selected="true"],
+button[data-testid="stTab"]:hover {
+ color: var(--primary-color);
+}
+
+div[data-baseweb="tab-highlight"],
label[data-baseweb="radio"]:has(input[tabindex="0"]) > div:first-child,
label[data-baseweb="checkbox"]:has(input[aria-checked="true"]) > div:first-child {
background-color: var(--primary-color);
diff --git a/testgen/ui/views/connections.py b/testgen/ui/views/connections.py
index 07a5031..33df711 100644
--- a/testgen/ui/views/connections.py
+++ b/testgen/ui/views/connections.py
@@ -168,7 +168,6 @@ def show_connection_form(self, selected_connection, mode, project_code):
bottom_left_column, bottom_right_column = st.columns([0.25, 0.75])
button_left_column, button_right_column = st.columns([0.20, 0.80])
connection_status_wrapper = st.container()
- connection_status_container = connection_status_wrapper.empty()
connection_id = selected_connection["connection_id"] if mode == "edit" else None
connection_name = selected_connection["connection_name"] if mode == "edit" else ""
@@ -382,18 +381,28 @@ def on_connect_by_url_change():
test_connection = button_left_column.button("Test Connection")
if test_connection:
- connection_status_container.empty()
- connection_status_container.info("Testing the connection...")
-
+ single_element_container = connection_status_wrapper.empty()
+ single_element_container.info("Connecting ...")
connection_status = self.test_connection(new_connection)
- renderer = {
- True: connection_status_container.success,
- False: connection_status_container.error,
- }[connection_status.successful]
-
- renderer(connection_status.message)
- if not connection_status.successful and connection_status.details:
- st.text_area("Connection Error Details", value=connection_status.details)
+
+ with single_element_container.container():
+ renderer = {
+ True: st.success,
+ False: st.error,
+ }[connection_status.successful]
+
+ renderer(connection_status.message)
+ if not connection_status.successful and connection_status.details:
+ st.caption("Connection Error Details")
+
+ with st.container(border=True):
+ st.markdown(connection_status.details)
+ else:
+ # This is needed to fix a strange bug in Streamlit when using dialog + input fields + button
+ # If an input field is changed and the button is clicked immediately (without unfocusing the input first),
+ # two fragment reruns happen successively, one for unfocusing the input and the other for clicking the button
+ # Some or all (it seems random) of the input fields disappear when this happens
+ time.sleep(0.1)
def test_connection(self, connection: dict) -> "ConnectionStatus":
if connection["connect_by_key"] and connection["connection_id"] is None:
diff --git a/testgen/ui/views/overview.py b/testgen/ui/views/overview.py
index 2423fef..132b66e 100644
--- a/testgen/ui/views/overview.py
+++ b/testgen/ui/views/overview.py
@@ -188,7 +188,7 @@ def render_test_suite_item(test_suite: pd.Series, column_spec: list[int], key: i
with generation_column:
if (latest_generation := test_suite["latest_auto_gen_date"]) and pd.notnull(latest_generation):
- st.html(f'