From ef58c8e809febdb2455f0f12e06c6e3bec8dfc8e Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Wed, 14 Aug 2024 01:04:33 -0400 Subject: [PATCH 01/78] refactor(streamlit): upgrade streamlit to latest version --- pyproject.toml | 3 +- testgen/__main__.py | 1 - testgen/ui/assets/style.css | 44 ++++--------------- testgen/ui/components/frontend/css/shared.css | 11 ++--- testgen/ui/services/form_service.py | 2 +- testgen/ui/views/connections_base.py | 2 +- testgen/ui/views/profiling_summary.py | 4 +- testgen/ui/views/table_groups.py | 6 +-- testgen/ui/views/test_runs.py | 2 +- testgen/ui/views/test_suites.py | 6 +-- 10 files changed, 27 insertions(+), 54 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 2978f92..1e9328e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -45,7 +45,7 @@ dependencies = [ "trogon==0.4.0", "numpy==1.25.2", "pandas==2.1.0", - "streamlit==1.26.0", + "streamlit==1.37.1", "streamlit-extras==0.3.0", "streamlit-aggrid==0.3.4.post3", "streamlit-antd-components==0.2.2", @@ -62,6 +62,7 @@ dependencies = [ "psutil==5.9.8", "concurrent_log_handler==0.9.25", "cryptography==42.0.8", + "validators==0.33.0", ] [project.optional-dependencies] diff --git a/testgen/__main__.py b/testgen/__main__.py index d964555..a63b1b6 100644 --- a/testgen/__main__.py +++ b/testgen/__main__.py @@ -727,7 +727,6 @@ def run(debug: bool): "streamlit", "run", app_file, - "--ui.hideSidebarNav=true", "--browser.gatherUsageStats=false", f"--server.sslCertFile={settings.SSL_CERT_FILE}" if use_ssl else "", f"--server.sslKeyFile={settings.SSL_KEY_FILE}" if use_ssl else "", diff --git a/testgen/ui/assets/style.css b/testgen/ui/assets/style.css index 8464393..f5f2659 100644 --- a/testgen/ui/assets/style.css +++ b/testgen/ui/assets/style.css @@ -37,39 +37,11 @@ footer { /* Sidebar */ section[data-testid="stSidebar"] { z-index: 999; + background-color: var(--sidebar-background-color); } -.appview-container > :nth-child(1 of section) { - top: unset; -} - -.appview-container > :nth-child(1 of section) > :nth-child(1 of div[class]) { - overflow: hidden; -} - -.appview-container > :nth-child(1 of section) > :nth-child(1 of div[class]) > div:nth-child(2) { - height: 100% !important; - padding: unset !important; -} - -.appview-container > :nth-child(1 of section) > :nth-child(1 of div[class]) > div:nth-child(2) > div { - height: 100% !important; -} - -.appview-container > :nth-child(1 of section) > :nth-child(1 of div[class]) > div:nth-child(2) > div > div:nth-child(1) { - height: 100% !important; -} - -.appview-container > :nth-child(1 of section) > :nth-child(1 of div[class]) > div:nth-child(2) > div > div:nth-child(1) > div > div:nth-child(1) { - height: 100% !important; -} - -/* .appview-container > :nth-child(1 of section) > :nth-child(1 of div[class]) > div:nth-child(2) > div > div:nth-child(1) > div > div:nth-child(1) > div > div { - height: 100% !important; -} */ - -.appview-container > :nth-child(1 of section) > :nth-child(1 of div[class]) > div:nth-child(2) > div > div:nth-child(1) > div > div:not(:nth-child(1)) { - /* Hide the instances of CookieManager from the sidebar when loging out */ +[data-testid="stSidebarNav"], +[data-testid="stSidebarUserContent"] { display: none; } /* */ @@ -112,13 +84,13 @@ button[title="Show password text"] { /* Dark mode */ @media (prefers-color-scheme: dark) { body { - --primary-text-color: rgba(255, 255, 255);; + --primary-text-color: rgba(255, 255, 255); --secondary-text-color: rgba(255, 255, 255, .7); - --disabled-text-color: rgba(255, 255, 255, .5);; + --disabled-text-color: rgba(255, 255, 255, .5); - --sidebar-background-color: rgba(14, 17, 23, .7); - --sidebar-item-hover-color: rgba(14, 17, 23, .5); - --sidebar-active-item-color: rgba(14, 17, 23, .5); + --sidebar-background-color: #14181f; + --sidebar-item-hover-color: #10141b; + --sidebar-active-item-color: #10141b; --sidebar-active-item-border-color: #b4e3c9; --dk-text-value-background: unset; } diff --git a/testgen/ui/components/frontend/css/shared.css b/testgen/ui/components/frontend/css/shared.css index ffba2ae..6d0c1f9 100644 --- a/testgen/ui/components/frontend/css/shared.css +++ b/testgen/ui/components/frontend/css/shared.css @@ -24,13 +24,14 @@ body { @media (prefers-color-scheme: dark) { body { - --primary-text-color: rgba(255, 255, 255);; + --primary-text-color: rgba(255, 255, 255); --secondary-text-color: rgba(255, 255, 255, .7); - --disabled-text-color: rgba(255, 255, 255, .5);; + --disabled-text-color: rgba(255, 255, 255, .5); - --sidebar-background-color: rgba(14, 17, 23, .7); - --sidebar-item-hover-color: rgba(14, 17, 23, .5); - --sidebar-active-item-color: rgba(14, 17, 23, .5); + --sidebar-background-color: #14181f; + --sidebar-item-hover-color: #10141b; + --sidebar-active-item-color: #10141b; --sidebar-active-item-border-color: #b4e3c9; + --dk-text-value-background: unset; } } diff --git a/testgen/ui/services/form_service.py b/testgen/ui/services/form_service.py index e18bf43..ac30e59 100644 --- a/testgen/ui/services/form_service.py +++ b/testgen/ui/services/form_service.py @@ -389,7 +389,7 @@ def reset_post_updates(str_message=None, as_toast=False, clear_cache=True, lst_c fcn.clear() else: st.cache_data.clear() - st.experimental_rerun() + st.rerun() def render_page_header( diff --git a/testgen/ui/views/connections_base.py b/testgen/ui/views/connections_base.py index 5cc9c6e..6af0c99 100644 --- a/testgen/ui/views/connections_base.py +++ b/testgen/ui/views/connections_base.py @@ -309,7 +309,7 @@ def on_connect_by_url_change(): time.sleep(1) if connection_modal: connection_modal.close() - st.experimental_rerun() + st.rerun() test_left_column, test_mid_column, test_right_column = st.columns([0.15, 0.15, 0.70]) test_connection = button_right_column.button("Test Connection") diff --git a/testgen/ui/views/profiling_summary.py b/testgen/ui/views/profiling_summary.py index b21d825..126777a 100644 --- a/testgen/ui/views/profiling_summary.py +++ b/testgen/ui/views/profiling_summary.py @@ -121,7 +121,7 @@ def open_drill_downs(dct_selected_rows, button_slots): st.session_state["drill_profile_run"] = dct_selected_row["profiling_run_id"] session.current_page = "profiling/results" session.current_page_args = {} - st.experimental_rerun() + st.rerun() if button_slots[1].button( f":{'gray' if not dct_selected_rows else 'green'}[Hygiene →]", @@ -133,7 +133,7 @@ def open_drill_downs(dct_selected_rows, button_slots): st.session_state["drill_profile_tg"] = dct_selected_row["table_groups_id"] session.current_page = "profiling/hygiene" session.current_page_args = {} - st.experimental_rerun() + st.rerun() def show_record_detail(dct_selected_row): diff --git a/testgen/ui/views/table_groups.py b/testgen/ui/views/table_groups.py index 80c9c3a..3aab04c 100644 --- a/testgen/ui/views/table_groups.py +++ b/testgen/ui/views/table_groups.py @@ -104,7 +104,7 @@ def render(self, connection_id: int | None = None) -> None: session.current_page = "connections/table-groups/test-suites" session.current_page_args = {"connection_id": connection_id, "table_group_id": selected[0]["id"]} - st.experimental_rerun() + st.rerun() if add_modal.is_open(): show_add_or_edit_modal(add_modal, "add", project_code, connection) @@ -285,7 +285,7 @@ def show_delete_modal(modal, selected=None): st.success(success_message) time.sleep(1) modal.close() - st.experimental_rerun() + st.rerun() def show_add_or_edit_modal(modal, mode, project_code, connection, selected=None): @@ -472,7 +472,7 @@ def show_add_or_edit_modal(modal, mode, project_code, connection, selected=None) st.success(success_message) time.sleep(1) modal.close() - st.experimental_rerun() + st.rerun() with table_groups_preview_tab: if mode == "edit": diff --git a/testgen/ui/views/test_runs.py b/testgen/ui/views/test_runs.py index 0dd9b85..d2d8f49 100644 --- a/testgen/ui/views/test_runs.py +++ b/testgen/ui/views/test_runs.py @@ -72,7 +72,7 @@ def render(self) -> None: st.session_state["drill_test_run"] = dct_selected_row["test_run_id"] session.current_page = "tests/results" session.current_page_args = {} - st.experimental_rerun() + st.rerun() if dct_selected_rows: open_record_detail( diff --git a/testgen/ui/views/test_suites.py b/testgen/ui/views/test_suites.py index b213127..f409b7f 100644 --- a/testgen/ui/views/test_suites.py +++ b/testgen/ui/views/test_suites.py @@ -115,7 +115,7 @@ def render(self, connection_id: str | None = None, table_group_id: str | None = "table_group_id": table_group_id, "test_suite_id": selected[0]["id"], } - st.experimental_rerun() + st.rerun() if add_modal.is_open(): show_add_or_edit_modal(add_modal, "add", project_code, connection, table_group) @@ -343,7 +343,7 @@ def show_delete_modal(modal, selected=None): st.success(success_message) time.sleep(1) modal.close() - st.experimental_rerun() + st.rerun() def show_add_or_edit_modal(modal, mode, project_code, connection, table_group, selected=None): @@ -445,7 +445,7 @@ def show_add_or_edit_modal(modal, mode, project_code, connection, table_group, s st.success(success_message) time.sleep(1) modal.close() - st.experimental_rerun() + st.rerun() def run_tests(modal, project_code, selected): From 586b3b077bb6b6f3de7f491e474db6e5899a2d1b Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Wed, 14 Aug 2024 01:16:20 -0400 Subject: [PATCH 02/78] refactor(router): use streamlit native multipage navigation --- testgen/ui/app.py | 27 +++---- testgen/ui/bootstrap.py | 2 +- testgen/ui/navigation/page.py | 22 ++++++ testgen/ui/navigation/router.py | 73 ++++++++++--------- testgen/ui/services/authentication_service.py | 64 ---------------- testgen/ui/services/javascript_service.py | 2 +- testgen/ui/services/user_session_service.py | 69 ++++++++++++++++++ testgen/ui/session.py | 14 +++- testgen/ui/views/login.py | 52 ++++++++----- 9 files changed, 187 insertions(+), 138 deletions(-) create mode 100644 testgen/ui/services/user_session_service.py diff --git a/testgen/ui/app.py b/testgen/ui/app.py index edcf2a9..193450b 100644 --- a/testgen/ui/app.py +++ b/testgen/ui/app.py @@ -8,8 +8,8 @@ from testgen.ui import bootstrap from testgen.ui.components import widgets as testgen from testgen.ui.queries import project_queries -from testgen.ui.services import authentication_service, javascript_service from testgen.ui.services import database_service as db +from testgen.ui.services import javascript_service, user_session_service from testgen.ui.session import session @@ -35,14 +35,11 @@ def render(log_level: int = logging.INFO): if not session.project and len(projects) > 0: set_current_project(projects[0]["code"]) - if session.renders is None: - session.renders = 0 + if session.authentication_status is None and not session.logging_out: + user_session_service.load_user_session() - if not session.logging_out and session.authentication_status is None: - authentication_service.load_user_session() - testgen.location(on_change=set_current_location) - - if session.authentication_status and not session.logging_out: + hide_sidebar = not session.authentication_status or session.logging_in + if not hide_sidebar: with st.sidebar: testgen.sidebar( menu=application.menu.update_version(application.get_version()), @@ -51,14 +48,8 @@ def render(log_level: int = logging.INFO): current_project=session.project, on_logout=authentication_service.end_user_session, ) - - if session.renders is not None: - session.renders += 1 - - if session.renders > 0 and session.current_page: - application.router.navigate(to=session.current_page, with_args=session.current_page_args) - - application.logger.debug(f"location status: {session.current_page} {session.current_page_args}") + + application.router.run(hide_sidebar) @st.cache_resource(validate=lambda _: not settings.IS_DEBUG, show_spinner=False) @@ -91,6 +82,10 @@ def set_current_project(project_code: str) -> None: session.project = project_code +def get_image_path(path: str) -> str: + return str(Path(__file__).parent / path) + + if __name__ == "__main__": log_level = logging.INFO if settings.IS_DEBUG_LOG_LEVEL or "--debug" in sys.argv: diff --git a/testgen/ui/bootstrap.py b/testgen/ui/bootstrap.py index ea6e65d..e4606d1 100644 --- a/testgen/ui/bootstrap.py +++ b/testgen/ui/bootstrap.py @@ -77,7 +77,7 @@ def run(log_level: int = logging.INFO) -> Application: pages.append(maybe_page) return Application( - router=Router(routes=pages, default=NotFoundPage), + router=Router(routes=pages), menu=Menu( items=list( { diff --git a/testgen/ui/navigation/page.py b/testgen/ui/navigation/page.py index 35b7a44..1937802 100644 --- a/testgen/ui/navigation/page.py +++ b/testgen/ui/navigation/page.py @@ -1,10 +1,15 @@ import abc +import logging import typing +import streamlit as st + import testgen.ui.navigation.router from testgen.ui.navigation.menu import MenuItem +from testgen.ui.session import session CanActivateGuard = typing.Callable[[], bool | str] +LOG = logging.getLogger("testgen") class Page(abc.ABC): @@ -14,6 +19,23 @@ class Page(abc.ABC): def __init__(self, router: testgen.ui.navigation.router.Router) -> None: self.router = router + self.streamlit_page = st.Page(self._navigate, url_path=self.path, title=self.path, default=not self.path) + + if "/" in self.path: + st.error(f"Cannot use multi-level path '{self.path}' in current Streamlit version: https://github.com/streamlit/streamlit/issues/8971") + st.stop() + + def _navigate(self) -> None: + for guard in self.can_activate or []: + can_activate = guard() + if type(can_activate) == str: + return self.router.navigate(to=can_activate) + + if not can_activate: + session.page_pending_login = self.path + return self.router.navigate(to="") + + self.render(**(session.current_page_args or {})) @abc.abstractmethod def render(self, **kwargs) -> None: diff --git a/testgen/ui/navigation/router.py b/testgen/ui/navigation/router.py index cad50d5..de31db6 100644 --- a/testgen/ui/navigation/router.py +++ b/testgen/ui/navigation/router.py @@ -1,63 +1,66 @@ from __future__ import annotations import logging -import typing import streamlit as st import testgen.ui.navigation.page +from testgen.ui.session import session from testgen.utils.singleton import Singleton -CanActivateGuard = typing.Callable[[], bool | str] - LOG = logging.getLogger("testgen") class Router(Singleton): - active: testgen.ui.navigation.page.Page | None - _default: type[testgen.ui.navigation.page.Page] | None - _routes: dict[str, type[testgen.ui.navigation.page.Page]] + _routes: dict[str, testgen.ui.navigation.page.Page] def __init__( self, /, routes: list[type[testgen.ui.navigation.page.Page]], - default: type[testgen.ui.navigation.page.Page] | None = None, ) -> None: - self._routes = {} - - for route in routes: - self._routes[route.path] = route - - self.active = None - self._default = default - if self._default: - self._routes[self._default.path] = self._default - + self._routes = {route.path: route(self) for route in routes} + + def run(self, hide_sidebar=False) -> None: + streamlit_pages = [route.streamlit_page for route in self._routes.values()] + + # Don't use position="hidden" when our custom sidebar needs to be displayed + # The default [data-testid="stSidebarNav"] element seems to be needed to keep the sidebar DOM stable + # Otherwise anything custom in the sidebar randomly flickers on page navigation + current_page = st.navigation(streamlit_pages, position="hidden" if hide_sidebar else "sidebar") + + # This hack is needed because the auth cookie is not retrieved on the first run + # We have to store the page and wait for the second run + + if not session.cookies_ready: + session.cookies_ready = True + session.page_pending_cookies = current_page + else: + current_page = session.page_pending_cookies or current_page + session.page_pending_cookies = None + + if session.page_args_pending_router: + session.current_page_args = session.page_args_pending_router + st.query_params.from_dict(session.page_args_pending_router) + session.page_args_pending_router = None + + session.current_page = current_page.url_path + current_page.run() + + def navigate(self, /, to: str, with_args: dict | None = None) -> None: try: - route = self._routes[to] - - bc_source = route(self).path - - for guard in route.can_activate or []: - can_activate = guard() - if type(can_activate) == str: - return self.navigate(to=can_activate, with_args={}) - - if not can_activate and self._default: - return self.navigate(to=self._default.path, with_args=with_args) - - if not isinstance(self.active, route): - self.active = route(self) + if to != session.current_page: + route = self._routes[to] + session.page_args_pending_router = with_args + st.switch_page(route.streamlit_page) - self.active.render(**(with_args or {})) except KeyError as k: - error_message = f"{bc_source}: {k!s}" + error_message = f"{to}: {k!s}" st.error(error_message) LOG.exception(error_message) - return self.navigate(to=self._default.path, with_args=with_args) + return self.navigate(to="", with_args=with_args) except Exception as e: - error_message = f"{bc_source}: {e!s}" + error_message = f"{to}: {e!s}" st.error(error_message) LOG.exception(error_message) diff --git a/testgen/ui/services/authentication_service.py b/testgen/ui/services/authentication_service.py index f279c4b..2dfc5a2 100644 --- a/testgen/ui/services/authentication_service.py +++ b/testgen/ui/services/authentication_service.py @@ -1,11 +1,8 @@ # ruff: noqa: S105 -import datetime import logging import typing -import extra_streamlit_components as stx -import jwt import streamlit as st from testgen.common.encrypt import encrypt_ui_password @@ -14,47 +11,9 @@ RoleType = typing.Literal["admin", "edit", "read"] -JWT_HASHING_KEY = "dk_signature_key" -AUTH_TOKEN_COOKIE_NAME = "dk_cookie_name" -AUTH_TOKEN_EXPIRATION_DAYS = 5 - LOG = logging.getLogger("testgen") -def load_user_session() -> None: - cookies = stx.CookieManager(key="testgen.cookies.get") - token = cookies.get(AUTH_TOKEN_COOKIE_NAME) - if token is not None: - try: - token = jwt.decode(token, JWT_HASHING_KEY, algorithms=["HS256"]) - if token["exp_date"] > datetime.datetime.utcnow().timestamp(): - start_user_session(token["name"], token["username"]) - except Exception: - LOG.debug("Invalid auth token found on cookies", exc_info=True, stack_info=True) - - -def start_user_session(name: str, username: str) -> None: - session.name = name - session.username = username - session.auth_role = get_role_for_user(get_auth_data(), username) - session.authentication_status = True - if not session.current_page or session.current_page == "login": - session.current_page = "overview" - session.current_page_args = {} - session.logging_out = False - - -def end_user_session() -> None: - session.auth_role = None - session.authentication_status = None - session.current_page = "login" - session.current_page_args = {} - session.logging_out = True - - del session.name - del session.username - - def add_user(user): encrypted_password = encrypt_ui_password(user["password"]) schema = st.session_state["dbschema"] @@ -72,29 +31,6 @@ def edit_user(user): authentication_queries.edit_user(schema, user, encrypted_password) -def get_auth_data(): - auth_data = authentication_queries.get_users(session.dbschema) - - usernames = {} - preauthorized_list = [] - - for item in auth_data.itertuples(): - usernames[item.username] = { - "email": item.email, - "name": item.name, - "password": item.password, - "role": item.role, - } - if item.preauthorized: - preauthorized_list.append(item.email) - - return { - "credentials": {"usernames": usernames}, - "cookie": {"expiry_days": AUTH_TOKEN_EXPIRATION_DAYS, "key": JWT_HASHING_KEY, "name": AUTH_TOKEN_COOKIE_NAME}, - "preauthorized": {"emails": preauthorized_list}, - } - - def get_users(): return authentication_queries.get_users(session.dbschema) diff --git a/testgen/ui/services/javascript_service.py b/testgen/ui/services/javascript_service.py index 424bb6e..7b4ea32 100644 --- a/testgen/ui/services/javascript_service.py +++ b/testgen/ui/services/javascript_service.py @@ -2,7 +2,7 @@ from streamlit_javascript import st_javascript -from testgen.ui.services.authentication_service import AUTH_TOKEN_COOKIE_NAME +from testgen.ui.services.user_session_service import AUTH_TOKEN_COOKIE_NAME LOG = logging.getLogger("testgen") diff --git a/testgen/ui/services/user_session_service.py b/testgen/ui/services/user_session_service.py new file mode 100644 index 0000000..019a2d3 --- /dev/null +++ b/testgen/ui/services/user_session_service.py @@ -0,0 +1,69 @@ +import datetime +import logging + +import extra_streamlit_components as stx +import jwt + +from testgen.ui.queries import authentication_queries +from testgen.ui.services.authentication_service import get_role_for_user +from testgen.ui.session import session + +JWT_HASHING_KEY = "dk_signature_key" +AUTH_TOKEN_COOKIE_NAME = "dk_cookie_name" # noqa: S105 +AUTH_TOKEN_EXPIRATION_DAYS = 5 + +LOG = logging.getLogger("testgen") + + +def load_user_session() -> None: + # Replacing this with st.context.cookies does not work + # Because it does not update when cookies are deleted on logout + cookies = stx.CookieManager(key="testgen.cookies.get") + token = cookies.get(AUTH_TOKEN_COOKIE_NAME) + if token is not None: + try: + token = jwt.decode(token, JWT_HASHING_KEY, algorithms=["HS256"]) + if token["exp_date"] > datetime.datetime.utcnow().timestamp(): + start_user_session(token["name"], token["username"]) + except Exception: + LOG.debug("Invalid auth token found on cookies", exc_info=True, stack_info=True) + + +def start_user_session(name: str, username: str) -> None: + session.name = name + session.username = username + session.auth_role = get_role_for_user(get_auth_data(), username) + session.authentication_status = True + session.logging_out = False + + +def end_user_session() -> None: + session.auth_role = None + session.authentication_status = None + session.logging_out = True + + del session.name + del session.username + + +def get_auth_data(): + auth_data = authentication_queries.get_users(session.dbschema) + + usernames = {} + preauthorized_list = [] + + for item in auth_data.itertuples(): + usernames[item.username] = { + "email": item.email, + "name": item.name, + "password": item.password, + "role": item.role, + } + if item.preauthorized: + preauthorized_list.append(item.email) + + return { + "credentials": {"usernames": usernames}, + "cookie": {"expiry_days": AUTH_TOKEN_EXPIRATION_DAYS, "key": JWT_HASHING_KEY, "name": AUTH_TOKEN_COOKIE_NAME}, + "preauthorized": {"emails": preauthorized_list}, + } diff --git a/testgen/ui/session.py b/testgen/ui/session.py index 9608f6c..2aaeba9 100644 --- a/testgen/ui/session.py +++ b/testgen/ui/session.py @@ -1,13 +1,20 @@ import typing -from streamlit import session_state +import streamlit as st from streamlit.runtime.state import SessionStateProxy from testgen.utils.singleton import Singleton class TestgenSession(Singleton): - renders: int + cookies_ready: bool + logging_in: bool + logging_out: bool + page_pending_cookies: st.Page + page_pending_login: str + page_pending_sidebar: str + page_args_pending_router: dict + current_page: str current_page_args: dict @@ -17,7 +24,6 @@ class TestgenSession(Singleton): username: str authentication_status: bool auth_role: typing.Literal["admin", "edit", "read"] - logging_out: bool project: str add_project: bool @@ -43,4 +49,4 @@ def __delattr__(self, key: str) -> None: del state[key] -session = TestgenSession(session_state) +session = TestgenSession(st.session_state) diff --git a/testgen/ui/views/login.py b/testgen/ui/views/login.py index b86c3d8..728a214 100644 --- a/testgen/ui/views/login.py +++ b/testgen/ui/views/login.py @@ -1,21 +1,24 @@ +import logging import typing import streamlit as st import streamlit_authenticator as stauth from testgen.ui.navigation.page import Page -from testgen.ui.services import authentication_service, javascript_service +from testgen.ui.services import javascript_service, user_session_service from testgen.ui.session import session +LOG = logging.getLogger("testgen") + class LoginPage(Page): - path = "login" + path = "" can_activate: typing.ClassVar = [ - lambda: not session.authentication_status or "overview", + lambda: not session.authentication_status or session.logging_in or "overview", ] def render(self) -> None: - auth_data = authentication_service.get_auth_data() + auth_data = user_session_service.get_auth_data() authenticator = stauth.Authenticate( auth_data["credentials"], @@ -25,16 +28,31 @@ def render(self) -> None: auth_data["preauthorized"], ) - name, authentication_status, username = authenticator.login("Login", "main") - - if authentication_status is False: - st.error("Username or password is incorrect.") - - if authentication_status is None: - st.warning("Please enter your username and password.") - javascript_service.clear_component_states() - - session.authentication_status = authentication_status - - if authentication_status: - authentication_service.start_user_session(name, username) + _column_1, column_2, _column_3 = st.columns([0.25, 0.5, 0.25]) + with column_2: + st.markdown(""" +


+

Welcome to DataKitchen DataOps TestGen

+ """, unsafe_allow_html=True) + name, authentication_status, username = authenticator.login("Login") + + if authentication_status is False: + st.error("Username or password is incorrect.") + + if authentication_status is None: + javascript_service.clear_component_states() + + session.authentication_status = authentication_status + + if authentication_status: + user_session_service.start_user_session(name, username) + + # This hack is needed because the auth cookie is not set if navigation happens immediately + if session.logging_in: + session.logging_in = False + next_route = session.page_pending_login or "overview" + session.page_pending_login = None + self.router.navigate(next_route) + else: + session.logging_in = True + \ No newline at end of file From 9546acec33c59803dea14d71cc39b199af538d43 Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Wed, 14 Aug 2024 01:21:08 -0400 Subject: [PATCH 03/78] refactor(components): use router in sidebar and breadcrumbs update dk logo --- testgen/ui/app.py | 9 ++- testgen/ui/assets/dk_icon.svg | 6 ++ testgen/ui/assets/favicon.ico | Bin 0 -> 9662 bytes .../frontend/js/components/breadcrumbs.js | 2 +- .../frontend/js/components/sidebar.js | 76 +++--------------- testgen/ui/components/frontend/js/main.js | 6 ++ testgen/ui/components/utils/component.py | 4 +- testgen/ui/components/widgets/breadcrumbs.py | 7 +- testgen/ui/components/widgets/sidebar.py | 62 +++++++------- 9 files changed, 64 insertions(+), 108 deletions(-) create mode 100755 testgen/ui/assets/dk_icon.svg create mode 100644 testgen/ui/assets/favicon.ico diff --git a/testgen/ui/app.py b/testgen/ui/app.py index 193450b..9192f0d 100644 --- a/testgen/ui/app.py +++ b/testgen/ui/app.py @@ -2,6 +2,7 @@ import sys import streamlit as st +from pathlib import Path from testgen import settings from testgen.common.docker_service import check_basic_configuration @@ -16,6 +17,7 @@ def render(log_level: int = logging.INFO): st.set_page_config( page_title="TestGen", + page_icon=get_image_path("assets/favicon.ico"), layout="wide", ) @@ -37,6 +39,11 @@ def render(log_level: int = logging.INFO): if session.authentication_status is None and not session.logging_out: user_session_service.load_user_session() + + st.logo( + image=get_image_path("assets/dk_logo.svg"), + icon_image=get_image_path("assets/dk_icon.svg") + ) hide_sidebar = not session.authentication_status or session.logging_in if not hide_sidebar: @@ -45,8 +52,6 @@ def render(log_level: int = logging.INFO): menu=application.menu.update_version(application.get_version()), username=session.username, current_page=session.current_page, - current_project=session.project, - on_logout=authentication_service.end_user_session, ) application.router.run(hide_sidebar) diff --git a/testgen/ui/assets/dk_icon.svg b/testgen/ui/assets/dk_icon.svg new file mode 100755 index 0000000..fb03eb4 --- /dev/null +++ b/testgen/ui/assets/dk_icon.svg @@ -0,0 +1,6 @@ + + + + + + diff --git a/testgen/ui/assets/favicon.ico b/testgen/ui/assets/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..fce46941a15d7cb82a64ba93331dfdbf25bf5a4d GIT binary patch literal 9662 zcmeI2e{2**6vr2_y(^+3BK`;@8h^x~0tx<3Osa6Rw+8_eKF(D@Yp+bUxftZLU zCi0`Aq)2S9rS07;)F6>S#6Tcmq3!K%OTk0~sL(?BB?ihtdp*B%cXPLUw|lpDx7YsT zHPhMb-n{v|H*aU&yg5M_g}>?31^jetFW5V7& zOL{WK?DFz*) z(NUr%zk~MIv&(Rr-!rn#FSR^(nuHp{)_rAV=VR5$n3D?N0`J1> zkZB>;8GeaQIYUDAIegU?KYS(Fte=;NR(~7#cbnU#cz!AHP#_^1MMeIQ^RwEc$jdYPDv-b7uB+m~)i&0bqIBt;``h;0xejb@0zZrKxQ?g` z;IAKhP29iYoKcQmreoJ(#B@H+U%9u|jXu+SC{KQWMZdUj*>JGOZ0u3xjhvs=qC(dh z?Uo0>-yQb;@?Ia>XIT#&vweb}c2{=Ov0MlAy};PNU4Hj~clRQ&+ly(8HY!S|i{jD1 zG|tar49LEcl5dCK9YIWUjwsD}#WbltisFwTHhPuwOWL+cM5BMEWar{XOtbwJus52l z9Yt~J=8)`i5I=m(`Cm9hE1*xeJ$}~^Vw$sv3oK%f?ER+tDDrz+@Egw0YS{l!O17o_ zha~yQ^Q7Z@Ct@1gwoY;BbXKTr;8*F6)c4H!rQlpuKNz|tX6mKmRtwoN1NH(W1`wpCF&I?1omFSFU7 z<)0*ZY9HyS9ohGm^Q-ir(f+~eZ2a;31DkuEANjc_uEP)9_N>n7|H=H1Dl_J< z+}t0xv%Fr{nYgpIqZ5c6<;7R%wjF0YpdKN|Yz&wV z&66*QrO5rn{cRAwk(@V~|0CDF%*bB2{rUNZ6nGf9x6P%9m5x`oH%;L0ZS0IprL9i$ z$HqZ^>I~`dI~gn8z`Xf3XJ@rXqjSM-(nsO`!^a?|LoD~a#fd%U5YE74_O5dNQs-~E z9?}jE%l+TX9OFFRZR9;kSMy}f&T7bGITI7x7w`X^Jzl#a-kq_t7rc9UFsHuT1HXAz z&mKF!P^e}t&l&$o>fv|aM4kxepLxehy*TfBNzcxF2(hGnRnb+qAYZ!}>^bJP67Vw}aP~_q&VF^iFaUq472CJnuV-i1VkkH(bFDJ- z$rAB19U8{5SjqG(9e%-bPz%g{Pr&>L&-j_Wew<9MGx>eBW$Tju%s7fETPEO~;0u0E zz;b~{VAprWZaQASr;XVkIR?@xOJHX?ViQ)gd-9s}u>Plv(HY4ysG?Kx;&{`aN##7pP-zA9+$0(R9Aih&w5dMP)^LQD?jS1 zbM41<6P|r0`O6oG4X#I))Cgrl6hGau_^^2fx_yOp>kqKAy_KEPg|S?!U*{~u|9^X! GEbt#XLdqHd literal 0 HcmV?d00001 diff --git a/testgen/ui/components/frontend/js/components/breadcrumbs.js b/testgen/ui/components/frontend/js/components/breadcrumbs.js index ce561b1..e8ba99e 100644 --- a/testgen/ui/components/frontend/js/components/breadcrumbs.js +++ b/testgen/ui/components/frontend/js/components/breadcrumbs.js @@ -42,7 +42,7 @@ const Breadcrumbs = (/** @type Properties */ props) => { }; function navigate(/** @type string */ path) { - window.parent.postMessage({ type: 'TestgenNavigationRequest', path: path }, '*'); + Streamlit.sendData(path); return false; } diff --git a/testgen/ui/components/frontend/js/components/sidebar.js b/testgen/ui/components/frontend/js/components/sidebar.js index 5df657b..56c5650 100644 --- a/testgen/ui/components/frontend/js/components/sidebar.js +++ b/testgen/ui/components/frontend/js/components/sidebar.js @@ -28,8 +28,7 @@ * @property {Menu} menu * @property {string} username * @property {string} current_page - * @property {string} current_project - * @property {string} auth_cookie_name + * @property {string} logout_path */ const van = window.top.van; const { a, button, div, i, img, label, option, select, span } = van.tags; @@ -46,7 +45,6 @@ const Sidebar = (/** @type {Properties} */ props) => { return div( {class: 'menu'}, - a({class: 'logo', href: `/#overview`, onclick: () => navigate('overview')}, img({ src: logo })), () => { const menuItems = van.val(props.menu).items; return div( @@ -58,7 +56,7 @@ const Sidebar = (/** @type {Properties} */ props) => { ); }, button( - { class: `tg-button logout`, onclick: () => Sidebar.onLogout(van.val(props.auth_cookie_name)) }, + { class: `tg-button logout`, onclick: () => navigate(van.val(props.logout_path)) }, i({class: 'material-symbols-rounded'}, 'logout'), span('Logout'), ), @@ -87,7 +85,7 @@ const MenuItem = (/** @type {MenuItem} */ item, /** @type {string} */ currentPag }); return a( - {class: classes, href: `/#${item.page}`, onclick: () => navigate(item.page)}, + {class: classes, href: `/${item.page}`, onclick: () => navigate(item.page, van.val(currentPage))}, i({class: 'menu--item--icon material-symbols-rounded'}, item.icon), span({class: 'menu--item--label'}, item.label), ); @@ -123,85 +121,33 @@ const VersionRow = (/** @type string */ label, /** @type string */ version, icon ); }; -function navigate(/** @type string */ path) { - window.parent.postMessage({ type: 'TestgenNavigationRequest', path: path }, '*'); +function navigate(/** @type string */ path, /** @type string */ currentPage = null) { + if (Sidebar.StreamlitInstance && path !== currentPage) { + Sidebar.StreamlitInstance.sendData(path); + } return false; } -function isCurrentPage(itemPath, currentPage) { +function isCurrentPage(/** @type string */ itemPath, /** @type string */ currentPage) { const normalizedItemPath = normalizePath(itemPath); const normalizedCurrentPagePath = normalizePath(currentPage); const isTheSamePage = normalizedItemPath === normalizedCurrentPagePath; - const isASubPage = normalizedCurrentPagePath.includes(normalizedItemPath); + const isASubPage = normalizedCurrentPagePath.startsWith(`${normalizedItemPath}:`); return isTheSamePage || isASubPage; } function normalizePath(path) { - if (!path) { - return ''; - } - - return path.split('/').filter(p => p.length > 0).join('/'); + return path || ''; } -const b64LogoString = `PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0idXRmLTgiPz4NCjwhLS0gR2VuZXJhdG9yOiBBZG9iZSBJbGx1c3RyYXRv -ciAyNC4xLjIsIFNWRyBFeHBvcnQgUGx1Zy1JbiAuIFNWRyBWZXJzaW9uOiA2LjAwIEJ1aWxkIDApICAtLT4NCjxzdmcgdmVyc2lvbj0iMS4xIiBpZD0iTGF -5ZXJfMSIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiB4bWxuczp4bGluaz0iaHR0cDovL3d3dy53My5vcmcvMTk5OS94bGluayIgeD0iMH -B4IiB5PSIwcHgiDQoJIHZpZXdCb3g9IjAgMCA2MDIuMiAxMTcuNyIgc3R5bGU9ImVuYWJsZS1iYWNrZ3JvdW5kOm5ldyAwIDAgNjAyLjIgMTE3Ljc7IiB4b -Ww6c3BhY2U9InByZXNlcnZlIj4NCjxzdHlsZSB0eXBlPSJ0ZXh0L2NzcyI+DQoJLnN0MHtmaWxsOiNBQUQwNDY7fQ0KCS5zdDF7ZmlsbDojMDZBMDRBO30N -Cjwvc3R5bGU+DQo8cGF0aCBkPSJNMzcuMywxMDcuNmMwLjYsMCwxLjEsMCwxLjYtMC4xYy0yLjIsMC00LjUsMC4xLTYuOSwwLjFIMzcuM3oiLz4NCjxwYXR -oIGQ9Ik0zNy4zLDkuOUgzMmMyLjQsMCw0LjcsMCw2LjksMC4xQzM4LjMsOS45LDM3LjgsOS45LDM3LjMsOS45eiIvPg0KPHBhdGggY2xhc3M9InN0MCIgZD -0iTTg1LjYsNTguM2MwLTEuNy0wLjEtMy4zLTAuMy00LjljLTAuNi03LjgtMi41LTE0LjgtNS45LTIwLjhDNzYsMjYuNSw3MS41LDIxLjcsNjUuOSwxOA0KC -WMtMS4yLTAuOC0yLjMtMS42LTMuNi0yLjJjLTIuNy0xLjQtNS41LTIuNS04LjUtMy40TDExLjcsNTguN2w0Mi4yLDQ2LjRjMi45LTAuOSw1LjgtMiw4LjUt -My40YzAuOS0wLjUsMS44LTEuMSwyLjctMS43DQoJYzYtMy44LDEwLjktOC44LDE0LjQtMTUuMmMzLjYtNi40LDUuNS0xMy45LDUuOS0yMi40YzAuMS0wLjk -sMC4xLTEuOCwwLjEtMi43YzAtMC40LDAuMS0wLjcsMC4xLTEuMWMwLTAuMSwwLTAuMSwwLTAuMg0KCUM4NS41LDU4LjUsODUuNiw1OC40LDg1LjYsNTguM3 -oiLz4NCjxwYXRoIGNsYXNzPSJzdDEiIGQ9Ik01My44LDEyLjNjLTQuNi0xLjQtOS42LTIuMi0xNC45LTIuNGMtMi4yLDAtNC41LTAuMS02LjktMC4xSDE2L -jVjLTIuNywwLTQuOCwyLjItNC44LDQuOHYyOS44djE0LjNMNTMuOCwxMi4zDQoJQzUzLjgsMTIuMyw1My44LDEyLjMsNTMuOCwxMi4zeiIvPg0KPHBhdGgg -Y2xhc3M9InN0MSIgZD0iTTExLjcsNzN2MjkuOGMwLDIuNywyLjIsNC44LDQuOCw0LjhIMzJjMi40LDAsNC43LDAsNi45LTAuMWM1LjMtMC4xLDEwLjMtMSw -xNC45LTIuNGMwLDAsMCwwLDAsMEwxMS43LDU4LjdWNzMNCgl6Ii8+DQo8cGF0aCBjbGFzcz0ic3QwIiBkPSJNOTQuNSw5LjlINjkuM2MwLjMsMC4yLDAuNS -wwLjQsMC44LDAuN2M2LjQsNC40LDExLjcsMTAsMTUuNiwxNy4xYzQsNy4yLDYuMywxNS41LDcsMjQuOA0KCWMwLjIsMS45LDAuMywzLjksMC4zLDUuOWMwL -DAuMSwwLDAuMiwwLDAuM2MwLDAuMSwwLDAuMSwwLDAuMmMwLDAuNC0wLjEsMC44LTAuMSwxLjNjMCwxLjEtMC4xLDIuMS0wLjIsMy4yDQoJYy0wLjUsMTAu -MS0yLjgsMTktNy4xLDI2LjdjLTQuMSw3LjQtOS43LDEzLjItMTYuNSwxNy42YzAsMCwwLDAuMSwwLDAuMWgyNS40YzIuNywwLDQuOC0yLjIsNC44LTQuOHY -tODgNCglDOTkuNCwxMi4xLDk3LjIsOS45LDk0LjUsOS45eiIvPg0KPHBhdGggY2xhc3M9InN0MSIgZD0iTTEzMiwyOGgyNC4xYzE3LjYsMCwyNy44LDEzLj -QsMjcuOCwzMC45YzAsMTcuNC0xMC4zLDMwLjYtMjcuOCwzMC42SDEzMlYyOHogTTE1Ni4xLDc5LjcNCgljMTEuMiwwLDE2LjktOS41LDE2LjktMjAuOWMwL -TExLjUtNS43LTIxLjEtMTYuOS0yMS4xaC0xMy4ydjQySDE1Ni4xeiIvPg0KPHBhdGggY2xhc3M9InN0MSIgZD0iTTIzMS45LDQ3Ljh2NDEuN2gtMTAuNHYt -NS42Yy0yLjksNC41LTguNyw2LjUtMTMuOCw2LjVjLTExLDAtMjAuNS04LjUtMjAuNS0yMS44YzAtMTMuNCw5LjUtMjEuNywyMC40LTIxLjcNCgljNS4zLDA -sMTEuMSwyLjEsMTMuOSw2LjR2LTUuNUgyMzEuOXogTTIyMS40LDY4LjVjMC03LjMtNi4xLTEyLTEyLTEyYy02LjQsMC0xMS43LDUtMTEuNywxMmMwLDcsNS -4zLDEyLjEsMTEuNywxMi4xDQoJQzIxNS43LDgwLjYsMjIxLjQsNzUuOCwyMjEuNCw2OC41eiIvPg0KPHBhdGggY2xhc3M9InN0MSIgZD0iTTI2Myw1Ni4xa -C04Ljh2MzMuNGgtMTAuNFY1Ni4xaC03LjV2LTguM2g3LjVWMzIuNWgxMC40djE1LjRoOC44VjU2LjF6Ii8+DQo8cGF0aCBjbGFzcz0ic3QxIiBkPSJNMzA5 -LDQ3Ljh2NDEuN2gtMTAuNHYtNS42Yy0yLjksNC41LTguNyw2LjUtMTMuOCw2LjVjLTExLDAtMjAuNS04LjUtMjAuNS0yMS44YzAtMTMuNCw5LjUtMjEuNyw -yMC40LTIxLjcNCgljNS4zLDAsMTEuMSwyLjEsMTMuOSw2LjR2LTUuNUgzMDl6IE0yOTguNCw2OC41YzAtNy4zLTYuMS0xMi0xMi0xMmMtNi40LDAtMTEuNy -w1LTExLjcsMTJjMCw3LDUuMywxMi4xLDExLjcsMTIuMQ0KCUMyOTIuNyw4MC42LDI5OC40LDc1LjgsMjk4LjQsNjguNXoiLz4NCjxwYXRoIGNsYXNzPSJzd -DEiIGQ9Ik0zNTQuNiw4OS41bC0yMS4yLTIzLjR2MjMuNGgtMTAuOFYyOGgxMC44djIzLjNMMzUwLjcsMjhoMTMuNWwtMjMuNSwzMC42bDI4LjYsMzAuOUgz -NTQuNnoiLz4NCjxwYXRoIGNsYXNzPSJzdDEiIGQ9Ik0zNzMuNywzMy4xYzAtMy43LDMuMS02LjMsNi44LTYuM2MzLjcsMCw2LjcsMi43LDYuNyw2LjNjMCw -zLjYtMi45LDYuMy02LjcsNi4zDQoJQzM3Ni45LDM5LjQsMzczLjcsMzYuNiwzNzMuNywzMy4xeiBNMzc1LjMsNDcuOGgxMC40djQxLjdoLTEwLjRWNDcuOH -oiLz4NCjxwYXRoIGNsYXNzPSJzdDEiIGQ9Ik00MTcuNCw1Ni4xaC04Ljh2MzMuNGgtMTAuNFY1Ni4xaC03LjV2LTguM2g3LjVWMzIuNWgxMC40djE1LjRoO -C44VjU2LjF6Ii8+DQo8cGF0aCBjbGFzcz0ic3QxIiBkPSJNNDE3LjYsNjguNmMwLTEzLjIsMTAuNi0yMS43LDIyLjctMjEuN2M3LjIsMCwxMy4xLDMuMSwx -Nyw4bC03LjQsNS44Yy0yLjEtMi42LTUuNy00LjItOS40LTQuMg0KCWMtNy4yLDAtMTIuNCw1LTEyLjQsMTJjMCw3LDUuMiwxMiwxMi40LDEyYzMuNywwLDc -uMi0xLjYsOS40LTQuMmw3LjQsNS44Yy0zLjgsNC44LTkuNyw4LTE3LDhDNDI4LjMsOTAuMyw0MTcuNiw4MS44LDQxNy42LDY4LjZ6Ig0KCS8+DQo8cGF0aC -BjbGFzcz0ic3QxIiBkPSJNNTAwLjcsNjYuMXYyMy40aC0xMC40VjY3LjFjMC02LjYtNC0xMC04LjctMTBjLTQuNywwLTEwLjYsMi42LTEwLjYsMTAuNnYyM -S44aC0xMC40di02NGgxMC40djI4LjYNCgljMi4xLTUsOC43LTcuMiwxMi45LTcuMkM0OTQuOCw0Ni45LDUwMC43LDU0LDUwMC43LDY2LjF6Ii8+DQo8cGF0 -aCBjbGFzcz0ic3QxIiBkPSJNNTQ3LjUsNzIuM2gtMzIuMmMxLjIsNS44LDUuNiw4LjcsMTEuOCw4LjdjNC42LDAsOC44LTEuOCwxMS4zLTUuMmw2LjksNS4 -zYy0zLjgsNi4xLTExLjIsOS4zLTE4LjcsOS4zDQoJYy0xMi41LDAtMjItOC43LTIyLTIxLjhjMC0xMy4zLDEwLTIxLjcsMjEuOS0yMS43YzEyLDAsMjEuMy -w4LjMsMjEuMywyMS4zQzU0Ny44LDY5LjQsNTQ3LjcsNzAuNyw1NDcuNSw3Mi4zeiBNNTM3LjQsNjUNCgljLTAuNi01LjctNS05LTEwLjgtOWMtNS42LDAtM -TAuMSwyLjctMTEuMyw5SDUzNy40eiIvPg0KPHBhdGggY2xhc3M9InN0MSIgZD0iTTU5MS45LDY2LjF2MjMuNGgtMTAuNFY2Ny4xYzAtNi42LTQtMTAtOC43 -LTEwYy00LjcsMC0xMC42LDIuNi0xMC42LDEwLjZ2MjEuOGgtMTAuNFY0Ny44aDEwLjR2Ni42DQoJYzIuMS01LjIsOC43LTcuNSwxMi45LTcuNUM1ODUuOSw -0Ni45LDU5MS45LDU0LDU5MS45LDY2LjF6Ii8+DQo8L3N2Zz4NCg==` -const logo = `data:image/svg+xml;base64,${b64LogoString}`; - const stylesheet = new CSSStyleSheet(); stylesheet.replace(` .menu { position: relative; display: flex; flex-direction: column; - height: 100%; - background: var(--sidebar-background-color); -} - -.menu .logo { - margin: 24px 16px 16px; + height: calc(100% - 76px); } .menu > .menu--username { diff --git a/testgen/ui/components/frontend/js/main.js b/testgen/ui/components/frontend/js/main.js index 6aa9099..3e0ce2a 100644 --- a/testgen/ui/components/frontend/js/main.js +++ b/testgen/ui/components/frontend/js/main.js @@ -49,6 +49,12 @@ window.addEventListener('message', (event) => { if (componentId === 'sidebar') { window.top.testgen.components.Sidebar.onLogout = logout; window.top.testgen.components.Sidebar.onProjectChanged = changeProject; + + // The parent element [data-testid="stSidebarUserContent"] randoms flickers on page navigation + // The [data-testid="stSidebarContent"] element seems to be stable + // But only when the default [data-testid="stSidebarNav"] navbar element is present + mountPoint = window.top.document.querySelector('[data-testid="stSidebarContent"]'); + window.top.testgen.components.Sidebar.StreamlitInstance = Streamlit; } diff --git a/testgen/ui/components/utils/component.py b/testgen/ui/components/utils/component.py index f3e0a7f..c81ced4 100644 --- a/testgen/ui/components/utils/component.py +++ b/testgen/ui/components/utils/component.py @@ -6,8 +6,8 @@ component_function = components.declare_component("testgen", path=components_dir) -def component(*, id_, props, key=None, default=None): +def component(*, id_, props, key=None, default=None, on_change=None): component_props = props if not component_props: component_props = {} - return component_function(id=id_, props=component_props, key=key, default=default) + return component_function(id=id_, props=component_props, key=key, default=default, on_change=on_change) diff --git a/testgen/ui/components/widgets/breadcrumbs.py b/testgen/ui/components/widgets/breadcrumbs.py index 8917be6..f997e9a 100644 --- a/testgen/ui/components/widgets/breadcrumbs.py +++ b/testgen/ui/components/widgets/breadcrumbs.py @@ -2,6 +2,7 @@ import typing from testgen.ui.components.utils.component import component +from testgen.ui.navigation.router import Router LOG = logging.getLogger("testgen") @@ -19,13 +20,13 @@ def breadcrumbs( :param breadcrumbs: list of dicts with label and path """ - component( + path = component( id_="breadcrumbs", key=key, - default={}, props={"breadcrumbs": breadcrumbs}, ) - + if path: + Router().navigate(to=path) class Breadcrumb(typing.TypedDict): path: str | None diff --git a/testgen/ui/components/widgets/sidebar.py b/testgen/ui/components/widgets/sidebar.py index 47d1fb0..2a9e880 100644 --- a/testgen/ui/components/widgets/sidebar.py +++ b/testgen/ui/components/widgets/sidebar.py @@ -1,24 +1,22 @@ -import dataclasses import logging -import typing -import streamlit as st - -from testgen.ui.components.utils.callbacks import register_callback from testgen.ui.components.utils.component import component from testgen.ui.navigation.menu import Menu -from testgen.ui.services import authentication_service +from testgen.ui.navigation.router import Router +from testgen.ui.services import javascript_service, user_session_service +from testgen.ui.session import session LOG = logging.getLogger("testgen") +SIDEBAR_KEY = "testgen:sidebar" +LOGOUT_PATH = "logout" + def sidebar( - key: str = "testgen:sidebar", + key: str = SIDEBAR_KEY, username: str | None = None, menu: Menu = None, current_page: str | None = None, - current_project: str | None = None, - on_logout: typing.Callable[[], None] | None = None, ) -> None: """ Testgen custom component to display a styled menu over streamlit's @@ -29,10 +27,12 @@ def sidebar( :param username: username to display at the bottom of the menu :param menu: menu object with all root pages :param current_page: page address to highlight the selected item - :param on_logout: callback for when user clicks logout - :param on_project_changed: callback for when user switches projects """ - register_callback(key, _handle_callbacks, key, on_logout) + + if session.page_pending_sidebar is not None: + path = session.page_pending_sidebar + session.page_pending_sidebar = None + Router().navigate(to=path) component( id_="sidebar", @@ -40,30 +40,22 @@ def sidebar( "username": username, "menu": menu.filter_for_current_user().sort_items().asdict(), "current_page": current_page, - "current_project": current_project, - "auth_cookie_name": authentication_service.AUTH_TOKEN_COOKIE_NAME, + "logout_path": LOGOUT_PATH, }, key=key, - default={}, + on_change=on_change, ) - -def _handle_callbacks( - key: str, - on_logout: typing.Callable[[], None] | None = None, -): - action = st.session_state[key] - action = MenuAction(**action) - - if action.logout and on_logout: - return on_logout() - - -class Project(typing.TypedDict): - code: str - name: str - - -@dataclasses.dataclass -class MenuAction: - logout: bool | None = None +def on_change(): + # We cannot navigate directly here + # because st.switch_page uses st.rerun under the hood + # and we get a "Calling st.rerun() within a callback is a noop" error + # So we store the path and navigate on the next run + + path = getattr(session, SIDEBAR_KEY) + if path == LOGOUT_PATH: + javascript_service.clear_component_states() + user_session_service.end_user_session() + session.page_pending_sidebar = "" + else: + session.page_pending_sidebar = path From 100d41a5a26ff3c3b0a79da039b860cba6b73943 Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Wed, 14 Aug 2024 01:23:49 -0400 Subject: [PATCH 04/78] refactor(pages): use router and single-level path for all pages --- testgen/ui/views/connections.py | 9 +++++---- testgen/ui/views/overview.py | 2 +- testgen/ui/views/profiling_anomalies.py | 4 ++-- testgen/ui/views/profiling_results.py | 4 ++-- testgen/ui/views/profiling_summary.py | 14 +++++--------- testgen/ui/views/project_settings.py | 4 ++-- testgen/ui/views/table_groups.py | 11 ++++++----- testgen/ui/views/test_definitions.py | 10 +++++----- testgen/ui/views/test_results.py | 6 +++--- testgen/ui/views/test_runs.py | 8 +++----- testgen/ui/views/test_suites.py | 21 +++++++++++---------- 11 files changed, 45 insertions(+), 48 deletions(-) diff --git a/testgen/ui/views/connections.py b/testgen/ui/views/connections.py index b6a15c0..5904b3c 100644 --- a/testgen/ui/views/connections.py +++ b/testgen/ui/views/connections.py @@ -18,7 +18,7 @@ class ConnectionsPage(Page): path = "connections" can_activate: typing.ClassVar = [ - lambda: session.authentication_status or "login", + lambda: session.authentication_status, ] menu_item = MenuItem(icon="database", label="Data Configuration", order=3) @@ -53,9 +53,10 @@ def render(self) -> None: ): st.session_state["connection"] = connection.to_dict() - session.current_page = "connections/table-groups" - session.current_page_args = {"connection_id": connection["connection_id"]} - st.experimental_rerun() + self.router.navigate( + "connections:table-groups", + {"connection_id": connection["connection_id"]}, + ) create_qc_schema_modal = testgen.Modal(title=None, key="dk-create-qc-schema-modal", max_width=1100) diff --git a/testgen/ui/views/overview.py b/testgen/ui/views/overview.py index 48a1954..43e2b6e 100644 --- a/testgen/ui/views/overview.py +++ b/testgen/ui/views/overview.py @@ -14,7 +14,7 @@ class OverviewPage(Page): path = "overview" can_activate: typing.ClassVar = [ - lambda: session.authentication_status or "login", + lambda: session.authentication_status, ] menu_item = MenuItem(icon="home", label="Overview", order=0) diff --git a/testgen/ui/views/profiling_anomalies.py b/testgen/ui/views/profiling_anomalies.py index cf42b01..c689604 100644 --- a/testgen/ui/views/profiling_anomalies.py +++ b/testgen/ui/views/profiling_anomalies.py @@ -14,9 +14,9 @@ class ProfilingAnomaliesPage(Page): - path = "profiling/hygiene" + path = "profiling:hygiene" can_activate: typing.ClassVar = [ - lambda: session.authentication_status or "login", + lambda: session.authentication_status, ] def render(self) -> None: diff --git a/testgen/ui/views/profiling_results.py b/testgen/ui/views/profiling_results.py index 71e2188..10ed4ad 100644 --- a/testgen/ui/views/profiling_results.py +++ b/testgen/ui/views/profiling_results.py @@ -14,9 +14,9 @@ class ProfilingResultsPage(Page): - path = "profiling/results" + path = "profiling:results" can_activate: typing.ClassVar = [ - lambda: session.authentication_status or "login", + lambda: session.authentication_status, ] def render(self) -> None: diff --git a/testgen/ui/views/profiling_summary.py b/testgen/ui/views/profiling_summary.py index 126777a..db1eb0d 100644 --- a/testgen/ui/views/profiling_summary.py +++ b/testgen/ui/views/profiling_summary.py @@ -19,7 +19,7 @@ class DataProfilingPage(Page): path = "profiling" can_activate: typing.ClassVar = [ - lambda: session.authentication_status or "login", + lambda: session.authentication_status, ] menu_item = MenuItem(icon="problem", label="Data Profiling", order=1) @@ -56,7 +56,7 @@ def render(self) -> None: dct_selected_rows = fm.render_grid_select(df, show_columns) - open_drill_downs(dct_selected_rows, tool_bar.short_slots) + open_drill_downs(dct_selected_rows, tool_bar.short_slots, self.router) if dct_selected_rows: show_record_detail(dct_selected_rows[0]) @@ -107,7 +107,7 @@ def get_db_profiling_runs(str_project_code, str_tg=None): return db.retrieve_data(str_sql), show_columns -def open_drill_downs(dct_selected_rows, button_slots): +def open_drill_downs(dct_selected_rows, button_slots, router): dct_selected_row = None if dct_selected_rows: dct_selected_row = dct_selected_rows[0] @@ -119,9 +119,7 @@ def open_drill_downs(dct_selected_rows, button_slots): disabled=not dct_selected_rows, ): st.session_state["drill_profile_run"] = dct_selected_row["profiling_run_id"] - session.current_page = "profiling/results" - session.current_page_args = {} - st.rerun() + router.navigate("profiling:results") if button_slots[1].button( f":{'gray' if not dct_selected_rows else 'green'}[Hygiene →]", @@ -131,9 +129,7 @@ def open_drill_downs(dct_selected_rows, button_slots): ): st.session_state["drill_profile_run"] = dct_selected_row["profiling_run_id"] st.session_state["drill_profile_tg"] = dct_selected_row["table_groups_id"] - session.current_page = "profiling/hygiene" - session.current_page_args = {} - st.rerun() + router.navigate("profiling:hygiene") def show_record_detail(dct_selected_row): diff --git a/testgen/ui/views/project_settings.py b/testgen/ui/views/project_settings.py index 38ba7e2..1aece63 100644 --- a/testgen/ui/views/project_settings.py +++ b/testgen/ui/views/project_settings.py @@ -11,9 +11,9 @@ class ProjectSettingsPage(Page): - path = "settings/project" + path = "settings" can_activate: typing.ClassVar = [ - lambda: session.authentication_status or "login", + lambda: session.authentication_status, lambda: session.project is not None or "overview", ] menu_item = MenuItem(icon="settings", label="Settings", order=100) diff --git a/testgen/ui/views/table_groups.py b/testgen/ui/views/table_groups.py index 3aab04c..a39c3eb 100644 --- a/testgen/ui/views/table_groups.py +++ b/testgen/ui/views/table_groups.py @@ -17,10 +17,10 @@ class TableGroupsPage(Page): - path = "connections/table-groups" + path = "connections:table-groups" can_activate: typing.ClassVar = [ lambda: authentication_service.current_user_has_admin_role() or "overview", - lambda: session.authentication_status or "login", + lambda: session.authentication_status, ] def render(self, connection_id: int | None = None) -> None: @@ -102,9 +102,10 @@ def render(self, connection_id: int | None = None) -> None: ): st.session_state["table_group"] = selected[0] - session.current_page = "connections/table-groups/test-suites" - session.current_page_args = {"connection_id": connection_id, "table_group_id": selected[0]["id"]} - st.rerun() + self.router.navigate( + "connections:test-suites", + {"connection_id": connection_id, "table_group_id": selected[0]["id"]}, + ) if add_modal.is_open(): show_add_or_edit_modal(add_modal, "add", project_code, connection) diff --git a/testgen/ui/views/test_definitions.py b/testgen/ui/views/test_definitions.py index d2d8a78..f8813ca 100644 --- a/testgen/ui/views/test_definitions.py +++ b/testgen/ui/views/test_definitions.py @@ -22,9 +22,9 @@ class TestDefinitionsPage(Page): - path = "tests/definitions" + path = "test-definitions" can_activate: typing.ClassVar = [ - lambda: session.authentication_status or "login", + lambda: session.authentication_status, ] breadcrumbs: typing.ClassVar = [ {"label": "Overview", "path": "overview"}, @@ -172,12 +172,12 @@ def render(self, **_) -> None: class TestDefinitionsPageFromSuite(TestDefinitionsPage): - path = "connections/table-groups/test-suites/test-definitions" + path = "connections:test-definitions" breadcrumbs: typing.ClassVar = [ {"label": "Overview", "path": "overview"}, {"label": "Connections", "path": "connections"}, - {"label": "Table Groups", "path": "connections/table-groups"}, - {"label": "Test Suites", "path": "connections/table-groups/test-suites"}, + {"label": "Table Groups", "path": "connections:table-groups"}, + {"label": "Test Suites", "path": "connections:test-suites"}, {"label": "Test Definitions", "path": None}, ] menu_item = None diff --git a/testgen/ui/views/test_results.py b/testgen/ui/views/test_results.py index 44c1ed2..3bfe916 100644 --- a/testgen/ui/views/test_results.py +++ b/testgen/ui/views/test_results.py @@ -22,9 +22,9 @@ class TestResultsPage(Page): - path = "tests/results" + path = "test-runs:results" can_activate: typing.ClassVar = [ - lambda: session.authentication_status or "login", + lambda: session.authentication_status, lambda: session.project != None or "overview", ] @@ -34,7 +34,7 @@ def render(self) -> None: "https://docs.datakitchen.io/article/dataops-testgen-help/test-results", lst_breadcrumbs=[ {"label": "Overview", "path": "overview"}, - {"label": "Test Runs", "path": "tests/runs"}, + {"label": "Test Runs", "path": "test-runs"}, {"label": "Test Results", "path": None}, ], ) diff --git a/testgen/ui/views/test_runs.py b/testgen/ui/views/test_runs.py index d2d8f49..f2d740d 100644 --- a/testgen/ui/views/test_runs.py +++ b/testgen/ui/views/test_runs.py @@ -15,9 +15,9 @@ class TestRunsPage(Page): - path = "tests/runs" + path = "test-runs" can_activate: typing.ClassVar = [ - lambda: session.authentication_status or "login", + lambda: session.authentication_status, lambda: session.project != None or "overview", ] menu_item = MenuItem(icon="labs", label="Data Quality Testing", order=2) @@ -70,9 +70,7 @@ def render(self) -> None: disabled=not dct_selected_row, ): st.session_state["drill_test_run"] = dct_selected_row["test_run_id"] - session.current_page = "tests/results" - session.current_page_args = {} - st.rerun() + self.router.navigate("test-runs:results") if dct_selected_rows: open_record_detail( diff --git a/testgen/ui/views/test_suites.py b/testgen/ui/views/test_suites.py index f409b7f..2d129e8 100644 --- a/testgen/ui/views/test_suites.py +++ b/testgen/ui/views/test_suites.py @@ -18,10 +18,10 @@ class TestSuitesPage(Page): - path = "connections/table-groups/test-suites" + path = "connections:test-suites" can_activate: typing.ClassVar = [ lambda: authentication_service.current_user_has_admin_role() or "overview", - lambda: session.authentication_status or "login", + lambda: session.authentication_status, ] def render(self, connection_id: str | None = None, table_group_id: str | None = None) -> None: @@ -31,7 +31,7 @@ def render(self, connection_id: str | None = None, table_group_id: str | None = lst_breadcrumbs=[ {"label": "Overview", "path": "overview"}, {"label": "Connections", "path": "connections"}, - {"label": "Table Groups", "path": "connections/table-groups"}, + {"label": "Table Groups", "path": "connections:table-groups"}, {"label": "Test Suites", "path": None}, ], ) @@ -109,13 +109,14 @@ def render(self, connection_id: str | None = None, table_group_id: str | None = ): st.session_state["test_suite"] = selected[0] - session.current_page = "connections/table-groups/test-suites/test-definitions" - session.current_page_args = { - "connection_id": connection, - "table_group_id": table_group_id, - "test_suite_id": selected[0]["id"], - } - st.rerun() + self.router.navigate( + "connections:test-definitions", + { + "connection_id": connection, + "table_group_id": table_group_id, + "test_suite_id": selected[0]["id"], + }, + ) if add_modal.is_open(): show_add_or_edit_modal(add_modal, "add", project_code, connection, table_group) From 52a400e7258b49bd09e5af5d908af1c5286be5d3 Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Wed, 14 Aug 2024 01:24:44 -0400 Subject: [PATCH 05/78] refactor(cleanup): remove dead code --- testgen/ui/app.py | 6 -- testgen/ui/assets/scripts.js | 6 -- testgen/ui/bootstrap.py | 1 - .../frontend/js/components/location.js | 62 ------------------ testgen/ui/components/frontend/js/main.js | 15 ----- testgen/ui/components/widgets/__init__.py | 1 - testgen/ui/components/widgets/location.py | 65 ------------------- testgen/ui/views/not_found.py | 16 ----- 8 files changed, 172 deletions(-) delete mode 100644 testgen/ui/components/frontend/js/components/location.js delete mode 100644 testgen/ui/components/widgets/location.py delete mode 100644 testgen/ui/views/not_found.py diff --git a/testgen/ui/app.py b/testgen/ui/app.py index 9192f0d..4d592ae 100644 --- a/testgen/ui/app.py +++ b/testgen/ui/app.py @@ -77,12 +77,6 @@ def get_projects(): return projects - -def set_current_location(change: testgen.LocationChanged) -> None: - session.current_page = change.page - session.current_page_args = change.args - - def set_current_project(project_code: str) -> None: session.project = project_code diff --git a/testgen/ui/assets/scripts.js b/testgen/ui/assets/scripts.js index f5c5259..45da923 100644 --- a/testgen/ui/assets/scripts.js +++ b/testgen/ui/assets/scripts.js @@ -7,12 +7,6 @@ window.addEventListener('load', function() { }); window.addEventListener('message', async function(event) { - if (event.data.type === 'TestgenNavigationRequest') { - if (window.location.hash.replace('#', '') !== event.data.path) { - window.location.hash = event.data.path; - } - } - if (event.data.type === 'TestgenCopyToClipboard') { await copyToClipboard(event.data.text || ''); } diff --git a/testgen/ui/bootstrap.py b/testgen/ui/bootstrap.py index e4606d1..1f9a8f2 100644 --- a/testgen/ui/bootstrap.py +++ b/testgen/ui/bootstrap.py @@ -14,7 +14,6 @@ from testgen.ui.session import session from testgen.ui.views.connections import ConnectionsPage from testgen.ui.views.login import LoginPage -from testgen.ui.views.not_found import NotFoundPage from testgen.ui.views.overview import OverviewPage from testgen.ui.views.profiling_anomalies import ProfilingAnomaliesPage from testgen.ui.views.profiling_results import ProfilingResultsPage diff --git a/testgen/ui/components/frontend/js/components/location.js b/testgen/ui/components/frontend/js/components/location.js deleted file mode 100644 index e492788..0000000 --- a/testgen/ui/components/frontend/js/components/location.js +++ /dev/null @@ -1,62 +0,0 @@ -/** - * @typedef Properties - * @type {object} - * @property {boolean} initialized - * @property {string} current_page_code - */ -import van from '../van.min.js'; -import { Streamlit } from '../streamlit.js'; - -const Location = (/** @type Properties */ props) => { - Streamlit.setFrameHeight('0'); - - van.derive(() => { - syncHashToCurrentPage(van.val(props.current_page_code)); - }); - - if (!van.val(props.initialized)) { - Streamlit.sendData(extractLocation()); - } - - window.top.addEventListener("hashchange", function(event) { - if (event.newURL.includes('login')) { - return; - } - - const urlChanged = event.oldURL !== event.newURL; - if (urlChanged) { - Streamlit.sendData(extractLocation()); - } - }); - - return ''; -}; - - -function extractLocation() { - const hash = decodeURI(window.top.location.hash).replace('#', ''); - const parts = hash.split('?') - const page = parts[0] ? `${parts[0]}` : undefined; - const args = (parts[1] || '').split('&').filter(pair => !!pair).reduce((allArgs, pair) => { - const pairParts = pair.split('='); - allArgs[pairParts[0].trim()] = pairParts[1].trim() || ''; - return allArgs; - }, {}); - - return { page: page, args: args }; -} - -function isHashSynchronized(/** @type string */ hash, /** @type string */ currentPageCode) { - return btoa(decodeURI(hash || '').replace('#', '')) === currentPageCode; -} - -function syncHashToCurrentPage(/** @type string */ currentPageCode) { - if (!currentPageCode) { - return; - } - - const path = atob(currentPageCode); - window.parent.postMessage({ type: 'TestgenNavigationRequest', path: path }, '*'); -} - -export { Location }; diff --git a/testgen/ui/components/frontend/js/main.js b/testgen/ui/components/frontend/js/main.js index 3e0ce2a..3b91dd0 100644 --- a/testgen/ui/components/frontend/js/main.js +++ b/testgen/ui/components/frontend/js/main.js @@ -9,7 +9,6 @@ import van from './van.min.js'; import { Streamlit } from './streamlit.js'; import { Button } from './components/button.js' import { Select } from './components/select.js' -import { Location } from './components/location.js' import { Breadcrumbs } from './components/breadcrumbs.js' let currentWindowVan = van; @@ -19,7 +18,6 @@ const TestGenComponent = (/** @type {string} */ id, /** @type {object} */ props) const componentById = { select: Button, button: Select, - location: Location, breadcrumbs: Breadcrumbs, sidebar: window.top.testgen.components.Sidebar, }; @@ -47,9 +45,6 @@ window.addEventListener('message', (event) => { } if (componentId === 'sidebar') { - window.top.testgen.components.Sidebar.onLogout = logout; - window.top.testgen.components.Sidebar.onProjectChanged = changeProject; - // The parent element [data-testid="stSidebarUserContent"] randoms flickers on page navigation // The [data-testid="stSidebarContent"] element seems to be stable // But only when the default [data-testid="stSidebarNav"] navbar element is present @@ -89,16 +84,6 @@ function shouldRenderOutsideFrame(componentId) { return 'sidebar' === componentId; } -function logout(authCookieName) { - window.parent.postMessage({ type: 'TestgenLogout', cookie: authCookieName }, '*'); - Streamlit.sendData({ logout: true }); - return false; -} - -function changeProject(/** @type string */ projectCode) { - Streamlit.sendData({ change_to_project: projectCode }); -} - window.testgen = { states: {}, loadedStylesheets: {}, diff --git a/testgen/ui/components/widgets/__init__.py b/testgen/ui/components/widgets/__init__.py index 34721c1..0c9f727 100644 --- a/testgen/ui/components/widgets/__init__.py +++ b/testgen/ui/components/widgets/__init__.py @@ -1,6 +1,5 @@ # ruff: noqa: F401 from testgen.ui.components.widgets.breadcrumbs import breadcrumbs -from testgen.ui.components.widgets.location import LocationChanged, location from testgen.ui.components.widgets.modal import Modal from testgen.ui.components.widgets.sidebar import sidebar diff --git a/testgen/ui/components/widgets/location.py b/testgen/ui/components/widgets/location.py deleted file mode 100644 index 6fa8a9b..0000000 --- a/testgen/ui/components/widgets/location.py +++ /dev/null @@ -1,65 +0,0 @@ -import base64 -import dataclasses -import logging -import typing - -import streamlit as st - -from testgen.ui.components.utils.callbacks import register_callback -from testgen.ui.components.utils.component import component -from testgen.ui.session import session - -LOG = logging.getLogger("testgen") - - -def location( - key: str = "testgen:location", - on_change: typing.Callable[["LocationChanged"], None] | None = None, -) -> None: - """ - Testgen component to listen for location changes in the url hash. - - # Parameters - :param key: unique key to give the component a persisting state - :param on_change: callback for when the browser location changes - """ - register_callback(key, _handle_location_change, key, on_change) - - initialized = bool(session.renders and session.renders > 1) - current_page_code = _encode_page(session.current_page, session.current_page_args or {}) - - change = component( - id_="location", - key=key, - default={}, - props={"initialized": initialized, "current_page_code": current_page_code}, - ) - - if not initialized and change: - change = LocationChanged(**change) - if _encode_page(change.page, change.args) != current_page_code: - _handle_location_change(key, on_change) - - -def _handle_location_change(key: str, callback: typing.Callable[["LocationChanged"], None] | None): - if callback: - change = st.session_state[key] - if "page" not in change: - change["page"] = "overview" - return callback(LocationChanged(**change)) - - -def _encode_page(page: str, args: dict) -> str | None: - page_code = None - if page: - query_params = "&".join([f"{name}={value}" for name, value in args.items()]) - if query_params: - page = f"{page}?{query_params}" - page_code = base64.b64encode(page.encode()).decode() - return page_code - - -@dataclasses.dataclass -class LocationChanged: - page: str - args: dict = dataclasses.field(default_factory=dict) diff --git a/testgen/ui/views/not_found.py b/testgen/ui/views/not_found.py deleted file mode 100644 index 8fb203a..0000000 --- a/testgen/ui/views/not_found.py +++ /dev/null @@ -1,16 +0,0 @@ -import typing - -import streamlit as st - -from testgen.ui.navigation.page import Page -from testgen.ui.session import session - - -class NotFoundPage(Page): - path = "404" - can_activate: typing.ClassVar = [ - lambda: session.authentication_status or "login", - ] - - def render(self, **_) -> None: - st.write("Page not found") From 5a71652bcee9d04e5030d3fe50a069526c49252d Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Wed, 14 Aug 2024 01:25:44 -0400 Subject: [PATCH 06/78] fix(logs): prevent duplicate logs --- testgen/common/logs.py | 59 +++++++++++++++++++++--------------------- 1 file changed, 30 insertions(+), 29 deletions(-) diff --git a/testgen/common/logs.py b/testgen/common/logs.py index 4f566b9..ff4511b 100644 --- a/testgen/common/logs.py +++ b/testgen/common/logs.py @@ -22,35 +22,36 @@ def configure_logging( logger = logging.getLogger("testgen") logger.setLevel(level) - formatter = logging.Formatter(log_format) - - console_out_handler = logging.StreamHandler(stream=sys.stdout) - if settings.IS_DEBUG: - console_out_handler.setLevel(level) - else: - console_out_handler.setLevel(logging.WARNING) - console_out_handler.setFormatter(formatter) - - console_err_handler = logging.StreamHandler(stream=sys.stderr) - console_err_handler.setLevel(logging.WARNING) - console_err_handler.setFormatter(formatter) - - logger.addHandler(console_out_handler) - logger.addHandler(console_err_handler) - - if settings.LOG_TO_FILE: - os.makedirs(settings.LOG_FILE_PATH, exist_ok=True) - - file_handler = ConcurrentTimedRotatingFileHandler( - get_log_full_path(), - when="D", - interval=1, - backupCount=int(settings.LOG_FILE_MAX_QTY), - ) - file_handler.setLevel(level) - file_handler.setFormatter(formatter) - - logger.addHandler(file_handler) + if not any(isinstance(handler, logging.StreamHandler) for handler in logger.handlers): + formatter = logging.Formatter(log_format) + + console_out_handler = logging.StreamHandler(stream=sys.stdout) + if settings.IS_DEBUG: + console_out_handler.setLevel(level) + else: + console_out_handler.setLevel(logging.WARNING) + console_out_handler.setFormatter(formatter) + + console_err_handler = logging.StreamHandler(stream=sys.stderr) + console_err_handler.setLevel(logging.WARNING) + console_err_handler.setFormatter(formatter) + + logger.addHandler(console_out_handler) + logger.addHandler(console_err_handler) + + if settings.LOG_TO_FILE: + os.makedirs(settings.LOG_FILE_PATH, exist_ok=True) + + file_handler = ConcurrentTimedRotatingFileHandler( + get_log_full_path(), + when="D", + interval=1, + backupCount=int(settings.LOG_FILE_MAX_QTY), + ) + file_handler.setLevel(level) + file_handler.setFormatter(formatter) + + logger.addHandler(file_handler) def get_log_full_path() -> str: From 5cb0cf08f48bfdaf307d966d29eed4e83ab33390 Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Wed, 14 Aug 2024 01:30:21 -0400 Subject: [PATCH 07/78] fix(modal): temporary fix for incompatible third-party modals --- pyproject.toml | 1 - testgen/ui/components/widgets/modal.py | 28 ++++++++++++++++++++++++-- 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 1e9328e..ddf3085 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,7 +54,6 @@ dependencies = [ "streamlit-option-menu==0.3.6", "streamlit-authenticator==0.2.3", "streamlit-javascript==0.1.5", - "streamlit-modal==0.1.0", "progress==1.6", "beautifulsoup4==4.12.3", "trino==0.327.0", diff --git a/testgen/ui/components/widgets/modal.py b/testgen/ui/components/widgets/modal.py index 0347c86..ee2290c 100644 --- a/testgen/ui/components/widgets/modal.py +++ b/testgen/ui/components/widgets/modal.py @@ -2,10 +2,34 @@ import streamlit import streamlit.components.v1 as components -from streamlit_modal import Modal as BaseModal +# This is a custom version of the "streamlit-modal" third-party library +# The original library is not compatible with streamlit 1.30+ +# https://github.com/teamtv/streamlit_modal/issues/19 + +# This is temporary until we replace our modals with the new native st.dialog feature +# https://docs.streamlit.io/develop/api-reference/execution-flow/st.dialog + +class Modal: + + def __init__(self, title, key, padding=20, max_width=None): + self.title = title + self.padding = padding + self.max_width = max_width + self.key = key + + def is_open(self): + return streamlit.session_state.get(f"{self.key}-opened", False) + + def open(self): + streamlit.session_state[f"{self.key}-opened"] = True + streamlit.rerun() + + def close(self, rerun=True): + streamlit.session_state[f"{self.key}-opened"] = False + if rerun: + streamlit.rerun() -class Modal(BaseModal): @contextmanager def container(self): streamlit.markdown(self._modal_styles(), unsafe_allow_html=True) From 7d7c37b67fc1da42f96b1343a8ea7c838fa732cf Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Fri, 16 Aug 2024 16:50:14 -0400 Subject: [PATCH 08/78] style: fix linting and test errors --- testgen/ui/app.py | 2 +- testgen/ui/navigation/page.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/testgen/ui/app.py b/testgen/ui/app.py index 4d592ae..437f483 100644 --- a/testgen/ui/app.py +++ b/testgen/ui/app.py @@ -1,8 +1,8 @@ import logging import sys +from pathlib import Path import streamlit as st -from pathlib import Path from testgen import settings from testgen.common.docker_service import check_basic_configuration diff --git a/testgen/ui/navigation/page.py b/testgen/ui/navigation/page.py index 1937802..c29f9c3 100644 --- a/testgen/ui/navigation/page.py +++ b/testgen/ui/navigation/page.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import abc import logging import typing From afb4ee03ed26067dd774ca67cc740ee52ea32e91 Mon Sep 17 00:00:00 2001 From: Ricardo Boni Date: Fri, 16 Aug 2024 18:47:46 -0400 Subject: [PATCH 09/78] feat(test definition): removing the cli import/export commands --- testgen/__main__.py | 37 ---- .../commands/run_observability_exporter.py | 9 +- testgen/commands/run_quick_start.py | 13 +- testgen/commands/run_test_definition.py | 146 -------------- testgen/common/display_service.py | 51 +---- .../updates/create_tmp_test_definition.sql | 19 -- .../template/updates/get_test_def_parms.sql | 38 ---- .../updates/populate_stg_test_definitions.sql | 184 ------------------ 8 files changed, 13 insertions(+), 484 deletions(-) delete mode 100644 testgen/commands/run_test_definition.py delete mode 100644 testgen/template/updates/create_tmp_test_definition.sql delete mode 100644 testgen/template/updates/get_test_def_parms.sql delete mode 100644 testgen/template/updates/populate_stg_test_definitions.sql diff --git a/testgen/__main__.py b/testgen/__main__.py index a63b1b6..1e2ac2d 100644 --- a/testgen/__main__.py +++ b/testgen/__main__.py @@ -34,7 +34,6 @@ from testgen.commands.run_profiling_bridge import run_profiling_queries from testgen.commands.run_quick_start import run_quick_start, run_quick_start_increment from testgen.commands.run_setup_profiling_tools import run_setup_profiling_tools -from testgen.commands.run_test_definition import get_test_def_parms, update_test_def_parms_dict from testgen.commands.run_upgrade_db_config import get_schema_revision, is_db_revision_up_to_date, run_upgrade_db_config from testgen.common import ( configure_logging, @@ -267,42 +266,6 @@ def list_test_generation(configuration: Configuration, project_key: str, test_su display_service.to_csv("list_test_generation.csv", rows, header) -@cli.command("get-test-properties", help="Fetches test details for a test suite (in an editable file).") -@click.option( - "-pk", - "--project-key", - required=False, - type=click.STRING, - help="The identifier for a TestGen project. Use a project_key shown in list-projects.", - default=settings.PROJECT_KEY, -) -@click.option( - "-ts", - "--test-suite-key", - help="The identifier for a test suite. Use a test_suite_key shown in list-test-suites.", - type=click.STRING, - required=True, - default=settings.DEFAULT_TEST_SUITE_KEY, -) -@click.option("-d", "--display", help="Show command output in the terminal.", is_flag=True, default=False) -@pass_configuration -def get_test_properties(configuration: Configuration, project_key: str, test_suite_key: str, display: bool): - LOG.info("CurrentStep: Main Program - List Test Properties") - yaml_dict = get_test_def_parms(project_key, test_suite_key) - display_service.to_yaml("get_test_parms.yaml", yaml_dict, display) - - -@cli.command("update-test-properties", help="Updates test properties from changes made in the YAML file.") -@click.option("-d", "--display", help="Show command output in the terminal.", is_flag=True, default=False) -@pass_configuration -def update_test_parms(configuration: Configuration, display: bool): - click.echo("update-test-properties is starting.") - LOG.info("CurrentStep: Update Test Def Parms") - yaml_dict = display_service.from_yaml("get_test_parms.yaml", display) - update_test_def_parms_dict(yaml_dict) - click.echo("update-test-properties has successfully finished.") - - @cli.command("list-tests", help="Lists the tests generated for a test suite.") @click.option( "-pk", diff --git a/testgen/commands/run_observability_exporter.py b/testgen/commands/run_observability_exporter.py index a708a2e..ff6cb88 100644 --- a/testgen/commands/run_observability_exporter.py +++ b/testgen/commands/run_observability_exporter.py @@ -5,11 +5,12 @@ from collections import namedtuple from urllib.parse import urlparse +import click import requests from requests_extensions import get_session from testgen import settings -from testgen.common import date_service, display_service, read_template_sql_file +from testgen.common import date_service, read_template_sql_file from testgen.common.database.database_service import ExecuteDBQuery, RetrieveDBResultsToDictList LOG = logging.getLogger("testgen") @@ -30,7 +31,7 @@ def calculate_chunk_size(test_outcomes): def post_event(event_type, payload, api_url, api_key, test_outcomes, is_test=False): qty_of_events = len(test_outcomes) if not is_test and qty_of_events == 0: - display_service.echo("Nothing to be sent to Observability") + click.echo("Nothing to be sent to Observability") return qty_of_events def chunkify(collection, chunk_size): @@ -309,7 +310,7 @@ def export_test_results(project_code, test_suite): max_qty_events = settings.OBSERVABILITY_EXPORT_LIMIT qty_of_exported_events = 0 while True: - display_service.echo(f"Observability Export Increment - {qty_of_exported_events} exported events so far") + click.echo(f"Observability Export Increment - {qty_of_exported_events} exported events so far") test_outcomes, updated_ids = collect_test_results(project_code, test_suite, max_qty_events) if len(test_outcomes) == 0: return qty_of_exported_events @@ -320,7 +321,7 @@ def export_test_results(project_code, test_suite): def run_observability_exporter(project_code, test_suite): LOG.info("CurrentStep: Observability Export - Test Results") qty_of_exported_events = export_test_results(project_code, test_suite) - display_service.echo(f"{qty_of_exported_events} events have been exported.") + click.echo(f"{qty_of_exported_events} events have been exported.") def test_observability_exporter(project_code, api_url, api_key): diff --git a/testgen/commands/run_quick_start.py b/testgen/commands/run_quick_start.py index 503ca34..67a22b5 100644 --- a/testgen/commands/run_quick_start.py +++ b/testgen/commands/run_quick_start.py @@ -1,10 +1,11 @@ import logging +import click + from testgen import settings from testgen.commands.run_get_entities import run_table_group_list from testgen.commands.run_launch_db_config import run_launch_db_config from testgen.commands.run_setup_profiling_tools import run_setup_profiling_tools -from testgen.common import display_service from testgen.common.database.database_service import ( AssignConnectParms, CreateDatabaseIfNotExists, @@ -112,17 +113,17 @@ def run_quick_start(delete_target_db: bool) -> None: # Create DB target_db_name = params_mapping["PROJECT_DB"] - display_service.echo(f"Creating target db : {target_db_name}") + click.echo(f"Creating target db : {target_db_name}") CreateDatabaseIfNotExists(target_db_name, params_mapping, delete_target_db, drop_users_and_roles=False) # run setup command = "testgen setup-system-db --delete-db --yes" - display_service.echo(f"Running CLI command: {command}") + click.echo(f"Running CLI command: {command}") delete_db = True run_launch_db_config(delete_db) # Schema and Populate target db - display_service.echo(f"Populating target db : {target_db_name}") + click.echo(f"Populating target db : {target_db_name}") queries = [ replace_params(read_template_sql_file("recreate_target_data_schema.sql", "quick_start"), params_mapping), replace_params(read_template_sql_file("populate_target_data.sql", "quick_start"), params_mapping), @@ -141,13 +142,13 @@ def run_quick_start(delete_target_db: bool) -> None: # run qc command = "testgen setup-target-db-functions --connection-id --create-qc-schema --yes" - display_service.echo(f"Running CLI command: {command}") + click.echo(f"Running CLI command: {command}") create_qc_schema = True db_user = params_mapping["TESTGEN_ADMIN_USER"] db_password = params_mapping["TESTGEN_ADMIN_PASSWORD"] dry_run = False project_qc_schema = run_setup_profiling_tools(connection_id, dry_run, create_qc_schema, db_user, db_password) - display_service.echo(f"Schema {project_qc_schema} has been created in the target db") + click.echo(f"Schema {project_qc_schema} has been created in the target db") def run_quick_start_increment(iteration): diff --git a/testgen/commands/run_test_definition.py b/testgen/commands/run_test_definition.py deleted file mode 100644 index 78234fd..0000000 --- a/testgen/commands/run_test_definition.py +++ /dev/null @@ -1,146 +0,0 @@ -from testgen.common import RetrieveDBResultsToDictList, RunActionQueryList, WriteListToDB, read_template_sql_file - - -def get_test_def_parms(project_code, test_suite): - lstResults = run_test_def_parms(project_code, test_suite) - - if lstResults is None: - raise ValueError("Test Definition Parameters not found") - - yaml_dict = {} - - for row in lstResults: - project_code = row["project_code"] - test_suite = row["test_suite"] - schema = row["schema_name"] - table_name = row["table_name"] - column_name = row["column_name"] - row_id = (row["id"],) - test_type = (row["test_type"],) - test_description = (row["test_description"],) - test_action = (row["test_action"],) - test_active = (row["test_active"],) - lock_refresh = (row["lock_refresh"],) - severity = (row["severity"],) - test_parameters = (row["test_parameters"],) - - if project_code not in yaml_dict: - yaml_dict[project_code] = {} - if test_suite not in yaml_dict[project_code]: - yaml_dict[project_code][test_suite] = {} - if schema not in yaml_dict[project_code][test_suite]: - yaml_dict[project_code][test_suite][schema] = {} - if table_name not in yaml_dict[project_code][test_suite][schema]: - yaml_dict[project_code][test_suite][schema][table_name] = {} - if column_name not in yaml_dict[project_code][test_suite][schema][table_name]: - yaml_dict[project_code][test_suite][schema][table_name][column_name] = [] - - parm_columns = test_parameters[0].split(",") - parm_dict = {} - - for column in parm_columns: - parm_dict[column] = row[column] - - yaml_dict[project_code][test_suite][schema][table_name][column_name].append( - { - "id": str(row_id[0]), - "test_type": str(test_type[0]), - "test_description": str(test_description[0]), - "test_action": str(test_action[0]), - "test_active": str(test_active[0]), - "lock_refresh": str(lock_refresh[0]), - "severity": str(severity[0]), - "test_parameters": parm_dict, - } - ) - - return yaml_dict - - -def run_test_def_parms(project_code, test_suite): - sql_template = read_template_sql_file("get_test_def_parms.sql", "updates") - - sql_template = sql_template.replace("{PROJECT_CODE}", project_code) - sql_template = sql_template.replace("{TEST_SUITE}", test_suite) - - return RetrieveDBResultsToDictList("DKTG", sql_template) - - -def update_test_def_parms_dict(yaml_dict): - if yaml_dict is None: - raise ValueError("Test Definition Parameters not found") - - updResults = update_test_definitions(yaml_dict) - RunActionQueryList("DKTG", updResults) - - -def update_test_definitions(data): - list_columns = [] - list_update_insert_queries = [] - - for project_code, test_suite_dict in data.items(): - for test_suite, schema_dict in test_suite_dict.items(): - for schema, table_dict in schema_dict.items(): - for table, column_dict in table_dict.items(): - for column, attributes_list in column_dict.items(): - for attribute in attributes_list: - id_col = attribute["id"] - test_type = attribute["test_type"] - test_description = attribute["test_description"] - test_action = attribute["test_action"] - test_active = attribute["test_active"] - lock_refresh = attribute["lock_refresh"] - severity = attribute["severity"] - test_parameters = attribute["test_parameters"] - - column_keys = test_parameters.keys() - column_values = test_parameters.values() - - for col, value in zip(column_keys, column_values, strict=False): - list_columns.append( - [ - project_code, - test_suite, - schema, - table, - column, - id_col, - test_type, - test_description, - test_action, - test_active, - lock_refresh, - severity, - col, - value, - ] - ) - - col_list = [ - "project_code", - "test_suite", - "schema_name", - "table_name", - "column_name", - "id", - "test_type", - "test_description", - "test_action", - "test_active", - "lock_refresh", - "severity", - "test_parameter", - "test_parameter_value", - ] - - list_create_queries = [] - create_table = read_template_sql_file("create_tmp_test_definition.sql", "updates") - list_create_queries.append(create_table) - RunActionQueryList("DKTG", list_create_queries) - - # Write to tmp_test_definition - WriteListToDB("DKTG", list_columns, col_list, "tmp_test_definition") - - sql_template = read_template_sql_file("populate_stg_test_definitions.sql", "updates") - list_update_insert_queries.append(sql_template) - return list_update_insert_queries diff --git a/testgen/common/display_service.py b/testgen/common/display_service.py index 665f675..9741580 100644 --- a/testgen/common/display_service.py +++ b/testgen/common/display_service.py @@ -1,13 +1,9 @@ import csv -import logging import os import click -import yaml from prettytable import PrettyTable -LOG = logging.getLogger("testgen") - def print_table(rows: list[dict], column_names: list[str]): table = PrettyTable(column_names) @@ -27,7 +23,7 @@ def to_csv(file_name: str, rows: list[dict], column_names: list[str]): writer.writerow(column_names) for row in rows: writer.writerow(row) - echo(f"Output written to: ~/testgen/file-out/{file_name}") + click.echo(f"Output written to: ~/testgen/file-out/{file_name}") def get_in_out_paths(): @@ -38,48 +34,3 @@ def get_in_out_paths(): os.makedirs(file_in_path, exist_ok=True) os.makedirs(file_out_path, exist_ok=True) return file_in_path, file_out_path - - -def write_to_file(full_path_and_name: str, file_content: str): - with open(full_path_and_name, "w") as file: - file.write(file_content) - - -def to_yaml(file_name: str, yaml_dict: dict, display: bool): - yaml_content = yaml.dump(yaml_dict, sort_keys=False) - yaml_content.replace("None", "null") - - _, file_out_path = get_in_out_paths() - full_path = os.path.join(file_out_path, file_name) - with open(full_path, "w", newline="") as file: - file.write(yaml_content) - - if display: - echo(yaml_content + "\n") - - echo(f"Output written to: ~/testgen/file-out/{file_name}") - - -def echo(message: str): - click.echo(message) - - -def from_yaml(file_name: str, display: bool): - echo(f"Attempting to read from : ~/testgen/file-in/{file_name}") - file_in_path, _ = get_in_out_paths() - full_path = os.path.join(file_in_path, file_name) - with open(full_path, newline="") as file: - yaml_content = yaml.safe_load(file) - - if display: - data = yaml.dump(yaml_content, sort_keys=False) - echo(data) - - return yaml_content - - -def check_config_file_presence(file_name: str) -> None: - file_in_path, _ = get_in_out_paths() - full_path = os.path.join(file_in_path, file_name) - if not os.path.exists(full_path): - echo(click.style(f"Warning: File ~/testgen/file-in/{file_name} is not present.", fg="yellow")) diff --git a/testgen/template/updates/create_tmp_test_definition.sql b/testgen/template/updates/create_tmp_test_definition.sql deleted file mode 100644 index 7e8312c..0000000 --- a/testgen/template/updates/create_tmp_test_definition.sql +++ /dev/null @@ -1,19 +0,0 @@ -DROP TABLE IF EXISTS tmp_test_definition CASCADE; - -CREATE TEMPORARY TABLE tmp_test_definition -( - project_code varchar(30), - test_suite varchar(200), - schema_name varchar(100), - table_name varchar(100), - column_name varchar(500), - id uuid, - test_type varchar(200), - test_description varchar(1000), - test_action varchar(100), - test_active varchar(10), - lock_refresh varchar(10), - severity varchar(10), - test_parameter varchar(100), - test_parameter_value varchar(1000) -); diff --git a/testgen/template/updates/get_test_def_parms.sql b/testgen/template/updates/get_test_def_parms.sql deleted file mode 100644 index f0d8d7f..0000000 --- a/testgen/template/updates/get_test_def_parms.sql +++ /dev/null @@ -1,38 +0,0 @@ -SELECT td.project_code, td.test_suite, - td.schema_name, td.table_name, td.column_name, - td.id::VARCHAR(50), td.test_type, - CASE WHEN td.test_description IS NOT NULL THEN td.test_description ELSE tt.test_description END - as test_description, - td.test_action, - td.test_active, - td.lock_refresh, - td.severity, - tt.default_parm_columns as test_parameters, - td.baseline_ct, - td.baseline_unique_ct, - td.baseline_value, - td.baseline_value_ct, - td.threshold_value, - td.baseline_sum, - td.baseline_avg, - td.baseline_sd, - td.subset_condition, - td.groupby_names, - td.having_condition, - td.window_date_column, - td.window_days, - td.match_schema_name, - td.match_table_name, - td.match_column_names, - td.match_subset_condition, - td.match_groupby_names, - td.match_having_condition, - td.custom_query - -FROM test_definitions td - INNER JOIN test_types tt - ON (td.test_type = tt.test_type) -WHERE project_code = '{PROJECT_CODE}' - AND test_suite = '{TEST_SUITE}' -ORDER BY td.project_code, td.test_suite, - td.schema_name, td.table_name, td.column_name, td.test_type; diff --git a/testgen/template/updates/populate_stg_test_definitions.sql b/testgen/template/updates/populate_stg_test_definitions.sql deleted file mode 100644 index 09ca01a..0000000 --- a/testgen/template/updates/populate_stg_test_definitions.sql +++ /dev/null @@ -1,184 +0,0 @@ -DROP TABLE IF EXISTS tmp_stg_test_definitions CASCADE; - -CREATE TEMPORARY TABLE tmp_stg_test_definitions -( - id uuid, - cat_test_id bigint, - project_code varchar(30), - table_groups_id uuid, - profile_run_id uuid, - test_type varchar(200), - test_suite varchar(200), - test_description varchar(1000), - test_action varchar(100), - schema_name varchar(100), - table_name varchar(100), - column_name varchar(500), - skip_errors integer, - baseline_ct varchar(1000), - baseline_unique_ct varchar(1000), - baseline_value varchar(1000), - baseline_value_ct varchar(1000), - threshold_value varchar(1000), - baseline_sum varchar(1000), - baseline_avg varchar(1000), - baseline_sd varchar(1000), - subset_condition varchar(500), - groupby_names varchar(200), - having_condition varchar(500), - window_date_column varchar(100), - window_days integer, - match_schema_name varchar(100), - match_table_name varchar(100), - match_column_names varchar(200), - match_subset_condition varchar(500), - match_groupby_names varchar(200), - match_having_condition varchar(500), - test_mode varchar(20), - custom_query varchar(4000), - test_active varchar(10), - severity varchar(10), - watch_level varchar(10), - check_result varchar(500), - lock_refresh varchar(10), - last_auto_gen_date timestamp, - profiling_as_of_date timestamp -); - - -INSERT INTO tmp_stg_test_definitions (project_code, test_suite, schema_name, table_name, column_name, - id, test_type, test_description, test_action, test_active,lock_refresh, severity, - baseline_ct,baseline_unique_ct,baseline_value,baseline_value_ct,threshold_value, - baseline_sum,baseline_avg,baseline_sd,subset_condition,groupby_names,having_condition, - window_date_column,window_days,match_schema_name,match_table_name,match_column_names, - match_subset_condition,match_groupby_names, match_having_condition ) -SELECT project_code, - test_suite, - schema_name, - table_name, - column_name, - id, - test_type, - test_description, - test_action, - CASE WHEN lower(test_active) = 'none' THEN 'N' ELSE test_active END as test_active, - CASE WHEN lower(lock_refresh) = 'none' THEN 'N' ELSE lock_refresh END as lock_refresh, - CASE WHEN lower(severity) IN ('warning','fail','ignore') THEN initcap(severity) ELSE NULL END as severity, - MAX(test_parameter_value) FILTER( WHERE test_parameter = 'baseline_ct' ) AS baseline_ct, - MAX(test_parameter_value) FILTER( WHERE test_parameter = 'baseline_unique_ct' ) AS baseline_unique_ct, - MAX(test_parameter_value) FILTER( WHERE test_parameter = 'baseline_value' ) AS baseline_value, - MAX(test_parameter_value) FILTER( WHERE test_parameter = 'baseline_value_ct' ) AS baseline_value_ct, - MAX(test_parameter_value) FILTER( WHERE test_parameter = 'threshold_value' ) AS threshold_value, - MAX(test_parameter_value) FILTER( WHERE test_parameter = 'baseline_sum' ) AS baseline_sum, - MAX(test_parameter_value) FILTER( WHERE test_parameter = 'baseline_avg' ) AS baseline_avg, - MAX(test_parameter_value) FILTER( WHERE test_parameter = 'baseline_sd' ) AS baseline_sd, - MAX(test_parameter_value) FILTER( WHERE test_parameter = 'subset_condition' ) AS subset_condition, - MAX(test_parameter_value) FILTER( WHERE test_parameter = 'groupby_names' ) AS groupby_names, - MAX(test_parameter_value) FILTER( WHERE test_parameter = 'having_condition' ) AS having_condition, - MAX(test_parameter_value) FILTER( WHERE test_parameter = 'window_date_column' ) AS window_date_column, - MAX(test_parameter_value) FILTER( WHERE test_parameter = 'window_days' ) :: integer AS window_days, - MAX(test_parameter_value) FILTER( WHERE test_parameter = 'match_schema_name' ) AS match_schema_name, - MAX(test_parameter_value) FILTER( WHERE test_parameter = 'match_table_name' ) AS match_table_name, - MAX(test_parameter_value) FILTER( WHERE test_parameter = 'match_column_names' ) AS match_column_names, - MAX(test_parameter_value) FILTER( WHERE test_parameter = 'match_subset_condition' ) AS match_subset_condition, - MAX(test_parameter_value) FILTER( WHERE test_parameter = 'match_groupby_names' ) AS match_groupby_names, - MAX(test_parameter_value) FILTER( WHERE test_parameter = 'match_having_condition' ) AS match_having_condition -FROM tmp_test_definition -GROUP BY 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 -ORDER BY 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12; - - ---- UPDATE - -UPDATE test_definitions -SET test_active = CASE WHEN lower(c.test_active) = 'none' THEN 'N' ELSE c.test_active END, - lock_refresh = CASE WHEN lower(c.lock_refresh) = 'none' THEN 'N' ELSE c.lock_refresh END, - severity = CASE WHEN lower(c.severity) IN ('warning','fail','ignore') THEN initcap(c.severity) ELSE NULL END, - last_manual_update = current_timestamp , - baseline_ct = c.baseline_ct , - baseline_unique_ct = c.baseline_unique_ct , - baseline_value = c.baseline_value , - baseline_value_ct = c.baseline_value_ct , - threshold_value = c.threshold_value , - baseline_sum = c.baseline_sum , - baseline_avg = c.baseline_avg , - baseline_sd = c.baseline_sd , - subset_condition = c.subset_condition , - groupby_names = c.groupby_names , - having_condition = c.having_condition , - window_date_column = c.window_date_column , - window_days = c.window_days , - match_schema_name = c.match_schema_name , - match_table_name = c.match_table_name , - match_column_names = c.match_column_names , - match_subset_condition = c.match_subset_condition, - match_groupby_names = c.match_groupby_names - FROM ( SELECT b.* FROM tmp_stg_test_definitions b - LEFT JOIN test_definitions a - ON a.project_code = b.project_code - AND a.test_suite = b.test_suite - AND a.schema_name = b.schema_name - AND a.table_name = b.table_name - AND a.column_name = b.column_name - AND a.test_type = b.test_type - WHERE (a.test_active != b.test_active) - OR (a.lock_refresh != b.lock_refresh) - OR (coalesce(a.severity,'') != coalesce(b.severity,'')) - OR (a.baseline_ct != b.baseline_ct ) - OR (a.baseline_unique_ct != b.baseline_unique_ct ) - OR (a.baseline_value != b.baseline_value ) - OR (a.baseline_value_ct != b.baseline_value_ct ) - OR (a.threshold_value != b.threshold_value ) - OR (a.baseline_sum != b.baseline_sum ) - OR (a.baseline_avg != b.baseline_avg ) - OR (a.baseline_sd != b.baseline_sd ) - OR (a.subset_condition != b.subset_condition ) - OR (a.groupby_names != b.groupby_names ) - OR (a.having_condition != b.having_condition ) - OR (a.window_date_column != b.window_date_column ) - OR (a.window_days != b.window_days ) - OR (a.match_schema_name != b.match_schema_name ) - OR (a.match_table_name != b.match_table_name ) - OR (a.match_column_names != b.match_column_names ) - OR (a.match_subset_condition != b.match_subset_condition) - OR (a.match_groupby_names != b.match_groupby_names ) - ) c -WHERE test_definitions.project_code = c.project_code - AND test_definitions.test_suite = c.test_suite - AND test_definitions.schema_name = c.schema_name - AND test_definitions.table_name = c.table_name - AND test_definitions.column_name = c.column_name - AND test_definitions.test_type = c.test_type ; - - --- INSERT -INSERT INTO test_definitions (project_code, test_suite, schema_name, table_name, column_name, - test_type, test_action, test_active, lock_refresh, severity, last_manual_update, - baseline_ct,baseline_unique_ct,baseline_value,baseline_value_ct,threshold_value, - baseline_sum,baseline_avg,baseline_sd,subset_condition,groupby_names,having_condition, - window_date_column,window_days,match_schema_name,match_table_name,match_column_names, - match_subset_condition,match_groupby_names, match_having_condition ) -SELECT a.project_code, a.test_suite, a.schema_name, a.table_name, a.column_name, - a.test_type, - CASE WHEN lower(a.test_action) = 'none' THEN NULL ELSE a.test_action END as test_action, - CASE WHEN lower(a.test_active) = 'none' THEN 'N' ELSE a.test_active END as test_active, - CASE WHEN lower(a.lock_refresh) = 'none' THEN 'N' ELSE a.lock_refresh END as lock_refresh, - CASE WHEN lower(a.severity) IN ('warning','fail','ignore') THEN initcap(a.severity) ELSE NULL END as severity, - current_timestamp as last_manual_update, - a.baseline_ct,a.baseline_unique_ct,a.baseline_value,a.baseline_value_ct,a.threshold_value, - a.baseline_sum,a.baseline_avg,a.baseline_sd,a.subset_condition,a.groupby_names,a.having_condition, - a.window_date_column,a.window_days,a.match_schema_name,a.match_table_name,a.match_column_names, - a.match_subset_condition,a.match_groupby_names, a.match_having_condition -FROM tmp_stg_test_definitions a - LEFT JOIN test_definitions b - ON a.project_code = b.project_code - AND a.test_suite = b.test_suite - AND a.schema_name = b.schema_name - AND a.table_name = b.table_name - AND a.column_name = b.column_name - AND a.test_type = b.test_type -WHERE a.id IS NULL AND b.id is NULL; - -DROP TABLE tmp_test_definition CASCADE; - -DROP TABLE tmp_stg_test_definitions CASCADE; From 87bf10c340e536300b0d3938657b3a69565456c7 Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Mon, 19 Aug 2024 21:56:08 -0400 Subject: [PATCH 10/78] refactor(dialog): use streamlit native dialog on all pages --- testgen/ui/assets/style.css | 18 +- testgen/ui/components/widgets/__init__.py | 1 - testgen/ui/components/widgets/modal.py | 121 ---- testgen/ui/services/form_service.py | 15 - testgen/ui/views/app_log_modal.py | 59 +- testgen/ui/views/connections.py | 13 +- testgen/ui/views/connections_base.py | 189 +++-- testgen/ui/views/profiling_anomalies.py | 71 +- testgen/ui/views/profiling_modal.py | 30 +- testgen/ui/views/table_groups.py | 561 +++++++-------- testgen/ui/views/test_definitions.py | 832 +++++++++++----------- testgen/ui/views/test_results.py | 75 +- testgen/ui/views/test_suites.py | 615 +++++++--------- 13 files changed, 1148 insertions(+), 1452 deletions(-) delete mode 100644 testgen/ui/components/widgets/modal.py diff --git a/testgen/ui/assets/style.css b/testgen/ui/assets/style.css index f5f2659..7be5a9d 100644 --- a/testgen/ui/assets/style.css +++ b/testgen/ui/assets/style.css @@ -56,20 +56,12 @@ section.main > :nth-child(1 of div).block-container { } /* */ -/* Customization of modal widget for scrollability */ -section div[data-modal-container='true'] { - top: 0 !important; - left: unset !important; -} - -section div[data-modal-container="true"][key^="dk-"] { - width: auto !important; -} - -div[data-modal-container="true"] > div:first-child > div:first-child { - max-height: 90vh; - overflow-y: auto !important; +/* Dialog - sets the width of all st.dialog */ +/* There is no way to target "large" and "small" dialogs reliably */ +div[data-testid="stModal"] div[role="dialog"] { + width: calc(55rem); } +/* */ button[title="Show password text"] { display: none; diff --git a/testgen/ui/components/widgets/__init__.py b/testgen/ui/components/widgets/__init__.py index 0c9f727..1294f8d 100644 --- a/testgen/ui/components/widgets/__init__.py +++ b/testgen/ui/components/widgets/__init__.py @@ -1,5 +1,4 @@ # ruff: noqa: F401 from testgen.ui.components.widgets.breadcrumbs import breadcrumbs -from testgen.ui.components.widgets.modal import Modal from testgen.ui.components.widgets.sidebar import sidebar diff --git a/testgen/ui/components/widgets/modal.py b/testgen/ui/components/widgets/modal.py deleted file mode 100644 index ee2290c..0000000 --- a/testgen/ui/components/widgets/modal.py +++ /dev/null @@ -1,121 +0,0 @@ -from contextlib import contextmanager - -import streamlit -import streamlit.components.v1 as components - -# This is a custom version of the "streamlit-modal" third-party library -# The original library is not compatible with streamlit 1.30+ -# https://github.com/teamtv/streamlit_modal/issues/19 - -# This is temporary until we replace our modals with the new native st.dialog feature -# https://docs.streamlit.io/develop/api-reference/execution-flow/st.dialog - -class Modal: - - def __init__(self, title, key, padding=20, max_width=None): - self.title = title - self.padding = padding - self.max_width = max_width - self.key = key - - def is_open(self): - return streamlit.session_state.get(f"{self.key}-opened", False) - - def open(self): - streamlit.session_state[f"{self.key}-opened"] = True - streamlit.rerun() - - def close(self, rerun=True): - streamlit.session_state[f"{self.key}-opened"] = False - if rerun: - streamlit.rerun() - - @contextmanager - def container(self): - streamlit.markdown(self._modal_styles(), unsafe_allow_html=True) - with streamlit.container(): - _container = streamlit.container() - if self.title: - _container.markdown(f"

{self.title}

", unsafe_allow_html=True) - - close_ = streamlit.button("X", key=f"{self.key}-close") - if close_: - self.close() - - if not close_: - components.html(self._modal_script(), height=0, width=0) - - with _container: - yield _container - - def _modal_styles(self) -> str: - max_width = f"{self.max_width}px" if self.max_width else "unset" - - return f""" - - """ - - def _modal_script(self) -> str: - return f""" - - """ diff --git a/testgen/ui/services/form_service.py b/testgen/ui/services/form_service.py index ac30e59..9adef9f 100644 --- a/testgen/ui/services/form_service.py +++ b/testgen/ui/services/form_service.py @@ -425,21 +425,6 @@ def render_page_header( return bcol2 -def render_modal_header(str_title, str_help_link=None, str_prompt=None): - hcol1, hcol2 = st.columns([9, 1]) - hcol1.markdown(f"#### {str_title}") - if str_help_link: - with hcol2: - st.caption(" ") - render_icon_link(str_help_link) - st.write( - '
', - unsafe_allow_html=True, - ) - show_prompt(str_prompt) - - def render_select( str_label, df_options, str_show_column, str_return_column, boo_required=True, str_default=None, boo_disabled=False ): diff --git a/testgen/ui/views/app_log_modal.py b/testgen/ui/views/app_log_modal.py index d5f7ca1..3043020 100644 --- a/testgen/ui/views/app_log_modal.py +++ b/testgen/ui/views/app_log_modal.py @@ -6,9 +6,7 @@ import streamlit as st import testgen.common.logs as logs -import testgen.ui.services.form_service as fm from testgen.common import display_service -from testgen.ui.components import widgets as testgen LOG = logging.getLogger("testgen") @@ -45,48 +43,45 @@ def _search_text(log_data, search_query): def view_log_file(button_container): - log_file_modal = testgen.Modal(title=None, key="dk-view-log-modal", max_width=1100) - with button_container: if st.button( "Troubleshooting →", help="Open and review TestGen Log files", use_container_width=True ): - log_file_modal.open() + application_logs_dialog() - if log_file_modal.is_open(): - with log_file_modal.container(): - fm.render_modal_header("TestGen App Log", None, "Review/Troubleshoot daily log files") - _, file_out_path = display_service.get_in_out_paths() +@st.dialog(title="Application Logs") +def application_logs_dialog(): + _, file_out_path = display_service.get_in_out_paths() - col1, col2, col3 = st.columns([33, 33, 33]) - log_date = col1.date_input("Log Date", value=datetime.today()) + col1, col2, col3 = st.columns([33, 33, 33]) + log_date = col1.date_input("Log Date", value=datetime.today()) - log_file_location = logs.get_log_full_path() + log_file_location = logs.get_log_full_path() - if log_date != date.today(): - log_file_location += log_date.strftime(".%Y-%m-%d") + if log_date != date.today(): + log_file_location += log_date.strftime(".%Y-%m-%d") - log_file_name = os.path.basename(log_file_location) + log_file_name = os.path.basename(log_file_location) - log_data = _read_log(log_file_location) + log_data = _read_log(log_file_location) - search_query = col2.text_input("Filter by Text") - if search_query: - show_data = _search_text(log_data, search_query) - else: - show_data = log_data + search_query = col2.text_input("Filter by Text") + if search_query: + show_data = _search_text(log_data, search_query) + else: + show_data = log_data - # Refresh button - col3.write(" \n ") - if col3.button("Refresh"): - # Clear cache to refresh the log data - st.cache_data.clear() + # Refresh button + col3.markdown("
", unsafe_allow_html=True) + if col3.button("Refresh"): + # Clear cache to refresh the log data + st.cache_data.clear() - if log_data: - st.markdown(f"**Log File:** {log_file_name}") - # TOO SLOW: st.code(body=''.join(show_data), language="log", line_numbers=True) - st.text_area("Log Data", value="".join(show_data), height=400) + if log_data: + st.markdown(f"**Log File:** {log_file_name}") + # TOO SLOW: st.code(body=''.join(show_data), language="log", line_numbers=True) + st.text_area("Log Data", value="".join(show_data), height=400) - # Download button - st.download_button("Download", data="".join(show_data), file_name=log_file_name) + # Download button + st.download_button("Download", data="".join(show_data), file_name=log_file_name) diff --git a/testgen/ui/views/connections.py b/testgen/ui/views/connections.py index 5904b3c..99e0aa2 100644 --- a/testgen/ui/views/connections.py +++ b/testgen/ui/views/connections.py @@ -5,12 +5,11 @@ import testgen.ui.services.form_service as fm import testgen.ui.services.toolbar_service as tb -from testgen.ui.components import widgets as testgen from testgen.ui.navigation.menu import MenuItem from testgen.ui.navigation.page import Page from testgen.ui.services import connection_service from testgen.ui.session import session -from testgen.ui.views.connections_base import show_connection, show_create_qc_schema_modal +from testgen.ui.views.connections_base import create_qc_schema_dialog, show_connection_form LOG = logging.getLogger("testgen") @@ -42,9 +41,8 @@ def render(self) -> None: form_container = st.expander("", expanded=True) with form_container: - connection_modal = None mode = "edit" - show_connection(connection_modal, connection, mode, project_code, show_header=False) + show_connection_form(connection, mode, project_code) if tool_bar.long_slots[-1].button( f":{'gray' if not enable_table_groups else 'green'}[Table Groups →]", @@ -58,8 +56,6 @@ def render(self) -> None: {"connection_id": connection["connection_id"]}, ) - create_qc_schema_modal = testgen.Modal(title=None, key="dk-create-qc-schema-modal", max_width=1100) - _, col2 = st.columns([70, 30]) if col2.button( @@ -67,7 +63,4 @@ def render(self) -> None: help="Creates the required Utility schema and related functions in the target database", use_container_width=True, ): - create_qc_schema_modal.open() - - if create_qc_schema_modal.is_open(): - show_create_qc_schema_modal(create_qc_schema_modal, connection) + create_qc_schema_dialog(connection) diff --git a/testgen/ui/views/connections_base.py b/testgen/ui/views/connections_base.py index 6af0c99..e3765a9 100644 --- a/testgen/ui/views/connections_base.py +++ b/testgen/ui/views/connections_base.py @@ -4,118 +4,113 @@ import streamlit as st import testgen.ui.services.database_service as db -import testgen.ui.services.form_service as fm from testgen.commands.run_setup_profiling_tools import get_setup_profiling_tools_queries from testgen.common.database.database_service import empty_cache from testgen.ui.services import authentication_service, connection_service -def show_create_qc_schema_modal(modal, selected): - with modal.container(): - fm.render_modal_header("Configure QC Utility Schema", None) - selected_connection = selected - connection_id = selected_connection["connection_id"] - project_qc_schema = selected_connection["project_qc_schema"] - sql_flavor = selected_connection["sql_flavor"] - user = selected_connection["project_user"] - - create_qc_schema = st.toggle("Create QC Utility Schema", value=True) - grant_privileges = st.toggle("Grant access privileges to TestGen user", value=True) - - user_role = None - - # TODO ALEX: This textbox may be needed if we want to grant permissions to user role - # if sql_flavor == "snowflake": - # user_role_textbox_label = f"Primary role for database user {user}" - # user_role = st.text_input(label=user_role_textbox_label, max_chars=100) - - admin_credentials_expander = st.expander("Admin credential options", expanded=True) - with admin_credentials_expander: - admin_connection_option_index = 0 - admin_connection_options = ["Do not use admin credentials", "Use admin credentials with Password"] - if sql_flavor == "snowflake": - admin_connection_options.append("Use admin credentials with Key-Pair") - - admin_connection_option = st.radio( - "Admin credential options", - label_visibility="hidden", - options=admin_connection_options, - index=admin_connection_option_index, - horizontal=True, - ) +@st.dialog(title="Configure QC Utility Schema") +def create_qc_schema_dialog(selected_connection): + connection_id = selected_connection["connection_id"] + project_qc_schema = selected_connection["project_qc_schema"] + sql_flavor = selected_connection["sql_flavor"] + user = selected_connection["project_user"] - st.markdown("

 
", unsafe_allow_html=True) + create_qc_schema = st.toggle("Create QC Utility Schema", value=True) + grant_privileges = st.toggle("Grant access privileges to TestGen user", value=True) - db_user = None - db_password = None - admin_private_key_passphrase = None - admin_private_key = None - if admin_connection_option == admin_connection_options[0]: - st.markdown(":orange[User created in the connection dialog will be used.]") - else: - db_user = st.text_input(label="Admin db user", max_chars=40) - if admin_connection_option == admin_connection_options[1]: - db_password = st.text_input( - label="Admin db password", max_chars=40, type="password" - ) - st.markdown(":orange[Note: Admin credentials are not stored, are only used for this operation.]") - - if len(admin_connection_options) > 2 and admin_connection_option == admin_connection_options[2]: - admin_private_key_passphrase = st.text_input( - label="Private Key Passphrase", - key="create-qc-schema-private-key-password", - type="password", - max_chars=200, - help="Passphrase used while creating the private Key (leave empty if not applicable)", - ) + user_role = None - admin_uploaded_file = st.file_uploader("Upload private key (rsa_key.p8)", key="admin-uploaded-file") - if admin_uploaded_file: - admin_private_key = admin_uploaded_file.getvalue().decode("utf-8") + # TODO ALEX: This textbox may be needed if we want to grant permissions to user role + # if sql_flavor == "snowflake": + # user_role_textbox_label = f"Primary role for database user {user}" + # user_role = st.text_input(label=user_role_textbox_label, max_chars=100) - st.markdown(":orange[Note: Admin credentials are not stored, are only used for this operation.]") + admin_credentials_expander = st.expander("Admin credential options", expanded=True) + with admin_credentials_expander: + admin_connection_option_index = 0 + admin_connection_options = ["Do not use admin credentials", "Use admin credentials with Password"] + if sql_flavor == "snowflake": + admin_connection_options.append("Use admin credentials with Key-Pair") - submit = st.button("Update Configuration") + admin_connection_option = st.radio( + "Admin credential options", + label_visibility="hidden", + options=admin_connection_options, + index=admin_connection_option_index, + horizontal=True, + ) - if submit: - empty_cache() - script_expander = st.expander("Script Details") + st.markdown("

 
", unsafe_allow_html=True) - operation_status = st.empty() - operation_status.info(f"Configuring QC Utility Schema '{project_qc_schema}'...") + db_user = None + db_password = None + admin_private_key_passphrase = None + admin_private_key = None + if admin_connection_option == admin_connection_options[0]: + st.markdown(":orange[User created in the connection dialog will be used.]") + else: + db_user = st.text_input(label="Admin db user", max_chars=40) + if admin_connection_option == admin_connection_options[1]: + db_password = st.text_input( + label="Admin db password", max_chars=40, type="password" + ) + st.markdown(":orange[Note: Admin credentials are not stored, are only used for this operation.]") + + if len(admin_connection_options) > 2 and admin_connection_option == admin_connection_options[2]: + admin_private_key_passphrase = st.text_input( + label="Private Key Passphrase", + key="create-qc-schema-private-key-password", + type="password", + max_chars=200, + help="Passphrase used while creating the private Key (leave empty if not applicable)", + ) - try: - skip_granting_privileges = not grant_privileges - queries = get_setup_profiling_tools_queries(sql_flavor, create_qc_schema, skip_granting_privileges, project_qc_schema, user, user_role) - with script_expander: - st.code( - os.linesep.join(queries), - language="sql", - line_numbers=True) - - connection_service.create_qc_schema( - connection_id, - create_qc_schema, - db_user if db_user else None, - db_password if db_password else None, - skip_granting_privileges, - admin_private_key_passphrase=admin_private_key_passphrase, - admin_private_key=admin_private_key, - user_role=user_role, - ) - operation_status.empty() - operation_status.success("Operation has finished successfully.") + admin_uploaded_file = st.file_uploader("Upload private key (rsa_key.p8)", key="admin-uploaded-file") + if admin_uploaded_file: + admin_private_key = admin_uploaded_file.getvalue().decode("utf-8") - except Exception as e: - operation_status.empty() - operation_status.error("Error configuring QC Utility Schema.") - error_message = e.args[0] - st.text_area("Error Details", value=error_message) + st.markdown(":orange[Note: Admin credentials are not stored, are only used for this operation.]") + + submit = st.button("Update Configuration") + + if submit: + empty_cache() + script_expander = st.expander("Script Details") + + operation_status = st.empty() + operation_status.info(f"Configuring QC Utility Schema '{project_qc_schema}'...") + + try: + skip_granting_privileges = not grant_privileges + queries = get_setup_profiling_tools_queries(sql_flavor, create_qc_schema, skip_granting_privileges, project_qc_schema, user, user_role) + with script_expander: + st.code( + os.linesep.join(queries), + language="sql", + line_numbers=True) + + connection_service.create_qc_schema( + connection_id, + create_qc_schema, + db_user if db_user else None, + db_password if db_password else None, + skip_granting_privileges, + admin_private_key_passphrase=admin_private_key_passphrase, + admin_private_key=admin_private_key, + user_role=user_role, + ) + operation_status.empty() + operation_status.success("Operation has finished successfully.") + + except Exception as e: + operation_status.empty() + operation_status.error("Error configuring QC Utility Schema.") + error_message = e.args[0] + st.text_area("Error Details", value=error_message) -def show_connection(connection_modal, selected_connection, mode, project_code, show_header=True): - if show_header: - fm.render_modal_header("Add Connection" if mode == "add" else "Edit Connection", None) +def show_connection_form(selected_connection, mode, project_code): flavor_options = ["redshift", "snowflake", "mssql", "postgresql"] connection_options = ["Connect by Password", "Connect by Key-Pair"] @@ -307,8 +302,6 @@ def on_connect_by_url_change(): ) st.success(success_message) time.sleep(1) - if connection_modal: - connection_modal.close() st.rerun() test_left_column, test_mid_column, test_right_column = st.columns([0.15, 0.15, 0.70]) diff --git a/testgen/ui/views/profiling_anomalies.py b/testgen/ui/views/profiling_anomalies.py index c689604..05c73b9 100644 --- a/testgen/ui/views/profiling_anomalies.py +++ b/testgen/ui/views/profiling_anomalies.py @@ -7,10 +7,9 @@ import testgen.ui.services.form_service as fm import testgen.ui.services.query_service as dq import testgen.ui.services.toolbar_service as tb -from testgen.ui.components import widgets as testgen from testgen.ui.navigation.page import Page from testgen.ui.session import session -from testgen.ui.views.profiling_modal import view_profiling_modal +from testgen.ui.views.profiling_modal import view_profiling_button class ProfilingAnomaliesPage(Page): @@ -144,11 +143,15 @@ def render(self) -> None: with col2: # _, v_col2 = st.columns([0.3, 0.7]) v_col1, v_col2 = st.columns([0.5, 0.5]) - view_profiling_modal( + view_profiling_button( v_col1, selected_row["table_name"], selected_row["column_name"], str_profile_run_id=str_profile_run_id ) - view_bad_data(v_col2, selected_row) + with v_col2: + if st.button( + ":green[Source Data →]", help="Review current source data for highlighted issue", use_container_width=True + ): + source_data_dialog(selected_row) # Need to render toolbar buttons after grid, so selection status is maintained if tool_bar.button_slots[0].button( @@ -449,41 +452,31 @@ def write_frequency_graph(df_tests): st.plotly_chart(fig) -def view_bad_data(button_container, selected_row): - str_header = f"Column: {selected_row['column_name']}, Table: {selected_row['table_name']}" - bad_data_modal = testgen.Modal(title=None, key="dk-anomaly-data-modal", max_width=1100) - - with button_container: - if st.button( - ":green[Source Data →]", help="Review current source data for highlighted issue", use_container_width=True - ): - bad_data_modal.open() - - if bad_data_modal.is_open(): - with bad_data_modal.container(): - fm.render_modal_header(selected_row["anomaly_name"], None) - st.caption(selected_row["anomaly_description"]) - fm.show_prompt(str_header) - - # Show the detail line - fm.render_html_list(selected_row, ["detail"], None, 700, ["Hygiene Issue Detail"]) - - with st.spinner("Retrieving source data..."): - bad_data_status, bad_data_msg, df_bad = get_bad_data(selected_row) - if bad_data_status in {"ND", "NA"}: - st.info(bad_data_msg) - elif bad_data_status == "ERR": - st.error(bad_data_msg) - elif df_bad is None: - st.error("An unknown error was encountered.") - else: - if bad_data_msg: - st.info(bad_data_msg) - # Pretify the dataframe - df_bad.columns = [col.replace("_", " ").title() for col in df_bad.columns] - df_bad.fillna("[NULL]", inplace=True) - # Display the dataframe - st.dataframe(df_bad, height=500, width=1050, hide_index=True) +@st.dialog(title="Source Data") +def source_data_dialog(selected_row): + st.markdown(f"#### {selected_row['anomaly_name']}") + st.caption(selected_row["anomaly_description"]) + fm.show_prompt(f"Column: {selected_row['column_name']}, Table: {selected_row['table_name']}") + + # Show the detail line + fm.render_html_list(selected_row, ["detail"], None, 700, ["Hygiene Issue Detail"]) + + with st.spinner("Retrieving source data..."): + bad_data_status, bad_data_msg, df_bad = get_bad_data(selected_row) + if bad_data_status in {"ND", "NA"}: + st.info(bad_data_msg) + elif bad_data_status == "ERR": + st.error(bad_data_msg) + elif df_bad is None: + st.error("An unknown error was encountered.") + else: + if bad_data_msg: + st.info(bad_data_msg) + # Pretify the dataframe + df_bad.columns = [col.replace("_", " ").title() for col in df_bad.columns] + df_bad.fillna("[NULL]", inplace=True) + # Display the dataframe + st.dataframe(df_bad, height=500, width=1050, hide_index=True) def do_disposition_update(selected, str_new_status): diff --git a/testgen/ui/views/profiling_modal.py b/testgen/ui/views/profiling_modal.py index 5ebeeb1..5a8ea45 100644 --- a/testgen/ui/views/profiling_modal.py +++ b/testgen/ui/views/profiling_modal.py @@ -4,37 +4,31 @@ import testgen.ui.queries.profiling_queries as profiling_queries import testgen.ui.services.form_service as fm -from testgen.ui.components import widgets as testgen from testgen.ui.views.profiling_details import show_profiling_detail LOG = logging.getLogger("testgen") BUTTON_TEXT = ":green[Profiling →]" # Profiling ⚲ BUTTON_HELP = "Review profiling for highlighted column" -FORM_HEADER = "Profiling Results" -def view_profiling_modal(button_container, str_table_name, str_column_name, +def view_profiling_button(button_container, str_table_name, str_column_name, str_profile_run_id=None, str_table_groups_id=None): - str_prompt = f"Column: {str_column_name}, Table: {str_table_name}" - - modal_viewer = testgen.Modal(title=None, key="dk-view", max_width=1100) - with button_container: if st.button( BUTTON_TEXT, help=BUTTON_HELP, use_container_width=True ): - modal_viewer.open() + profiling_results_dialog(str_table_name, str_column_name, str_profile_run_id, str_table_groups_id) - if modal_viewer.is_open(): - with modal_viewer.container(): - if not str_profile_run_id: - if str_table_groups_id: - str_profile_run_id = profiling_queries.get_latest_profile_run(str_table_groups_id) - if str_profile_run_id: - df = profiling_queries.get_profiling_detail(str_profile_run_id, str_table_name, str_column_name) - if not df.empty: - fm.render_modal_header(str_title=FORM_HEADER, str_prompt=str_prompt) - show_profiling_detail(df.iloc[0], 300) +@st.dialog(title="Profiling Results") +def profiling_results_dialog(str_table_name, str_column_name, str_profile_run_id=None, str_table_groups_id=None): + if not str_profile_run_id: + if str_table_groups_id: + str_profile_run_id = profiling_queries.get_latest_profile_run(str_table_groups_id) + if str_profile_run_id: + df = profiling_queries.get_profiling_detail(str_profile_run_id, str_table_name, str_column_name) + if not df.empty: + fm.show_prompt(f"Column: {str_column_name}, Table: {str_table_name}") + show_profiling_detail(df.iloc[0], 300) diff --git a/testgen/ui/views/table_groups.py b/testgen/ui/views/table_groups.py index a39c3eb..21b1791 100644 --- a/testgen/ui/views/table_groups.py +++ b/testgen/ui/views/table_groups.py @@ -10,7 +10,6 @@ import testgen.ui.services.table_group_service as table_group_service import testgen.ui.services.toolbar_service as tb from testgen.commands.run_profiling_bridge import run_profiling_in_background -from testgen.ui.components import widgets as testgen from testgen.ui.navigation.page import Page from testgen.ui.services.string_service import empty_if_null from testgen.ui.session import session @@ -72,28 +71,22 @@ def render(self, connection_id: int | None = None) -> None: selected = fm.render_grid_select(df, show_columns, show_column_headers=show_column_headers) - add_modal = testgen.Modal(title=None, key="dk-add-table-group-modal", max_width=1100) - edit_modal = testgen.Modal(title=None, key="dk-edit-table-group-modal", max_width=1100) - delete_modal = testgen.Modal(title=None, key="dk-delete-table-group-modal", max_width=1100) - profile_cli_command_modal = testgen.Modal( - title=None, key="dk-profiling-cli-command-modal", max_width=1100 - ) - profile_command_modal = testgen.Modal(title=None, key="dk-profiling-command-modal", max_width=1100) - if tool_bar.short_slots[1].button( "➕ Add", help="Add a new Table Group", use_container_width=True # NOQA RUF001 ): - add_modal.open() + add_table_group_dialog(project_code, connection) disable_buttons = selected is None if tool_bar.short_slots[2].button( "🖊️ Edit", help="Edit the selected Table Group", disabled=disable_buttons, use_container_width=True ): - edit_modal.open() + edit_table_group_dialog(project_code, connection, selected) + if tool_bar.short_slots[3].button( "❌ Delete", help="Delete the selected Table Group", disabled=disable_buttons, use_container_width=True ): - delete_modal.open() + delete_table_group_dialog(selected) + if tool_bar.short_slots[4].button( f":{'gray' if disable_buttons else 'green'}[Test Suites →]", help="Create or edit Test Suites for the selected Table Group", @@ -107,28 +100,13 @@ def render(self, connection_id: int | None = None) -> None: {"connection_id": connection_id, "table_group_id": selected[0]["id"]}, ) - if add_modal.is_open(): - show_add_or_edit_modal(add_modal, "add", project_code, connection) - - if edit_modal.is_open(): - show_add_or_edit_modal(edit_modal, "edit", project_code, connection, selected) - - if delete_modal.is_open(): - show_delete_modal(delete_modal, selected) - - if profile_cli_command_modal.is_open(): - show_profile_cli_command(profile_cli_command_modal, selected) - - if profile_command_modal.is_open(): - show_profile_command(profile_command_modal, selected) - if not selected: st.markdown(":orange[Select a row to see Table Group details.]") else: - show_record_detail(selected[0], profile_cli_command_modal, profile_command_modal) + show_record_detail(selected[0]) -def show_record_detail(selected, profile_cli_command_modal, profile_command_modal): +def show_record_detail(selected): left_column, right_column = st.columns([0.5, 0.5]) with left_column: @@ -191,299 +169,286 @@ def show_record_detail(selected, profile_cli_command_modal, profile_command_moda _, button_column = st.columns([0.3, 0.7]) with button_column: if st.button("Run Profiling", help="Performs profiling on the Table Group", use_container_width=True): - profile_command_modal.open() + run_profiling_dialog(selected) if st.button( "Show Run Profile CLI Command", help="Shows the run-profile CLI command", use_container_width=True ): - profile_cli_command_modal.open() - + run_profiling_cli_dialog(selected) -def show_profile_command(modal, selected): - selected_table_group = selected[0] - - with modal.container(): - fm.render_modal_header("Profiling Command", None) - container = st.empty() - with container: - st.markdown( - ":green[Execute Profile for the Table Group (since can take time, it is performed in background)]" - ) - button_container = st.empty() - status_container = st.empty() +@st.dialog(title="Run Profiling") +def run_profiling_dialog(selected_table_group): + container = st.empty() + with container: + st.markdown( + ":green[Execute Profile for the Table Group (since can take time, it is performed in background)]" + ) - with button_container: - start_process_button_message = "Start" - profile_button = st.button(start_process_button_message) + button_container = st.empty() + status_container = st.empty() - if profile_button: - button_container.empty() + with button_container: + start_process_button_message = "Start" + profile_button = st.button(start_process_button_message) - table_group_id = selected_table_group["id"] - status_container.info("Executing Profiling...") + if profile_button: + button_container.empty() - try: - run_profiling_in_background(table_group_id) - except Exception as e: - status_container.empty() - status_container.error(f"Process started with errors: {e!s}.") + table_group_id = selected_table_group["id"] + status_container.info("Executing Profiling...") + try: + run_profiling_in_background(table_group_id) + except Exception as e: status_container.empty() - status_container.success( - "Process has successfully started. Check 'Data Profiling' item in the menu to see the progress." - ) + status_container.error(f"Process started with errors: {e!s}.") + status_container.empty() + status_container.success( + "Process has successfully started. Check 'Data Profiling' item in the menu to see the progress." + ) -def show_profile_cli_command(modal, selected): - with modal.container(): - fm.render_modal_header("Profiling CLI Command", None) - selected_table_group = selected[0] - table_group_id = selected_table_group["id"] - profile_command = f"testgen run-profile --table-group-id {table_group_id}" - st.code(profile_command, language="shellSession") +@st.dialog(title="Run Profiling CLI Command") +def run_profiling_cli_dialog(selected_table_group): + table_group_id = selected_table_group["id"] + profile_command = f"testgen run-profile --table-group-id {table_group_id}" + st.code(profile_command, language="shellSession") -def show_delete_modal(modal, selected=None): - selected_table_group = selected[0] - with modal.container(): - fm.render_modal_header("Delete Table Group", None) - table_group_id = selected_table_group["id"] - table_group_name = selected_table_group["table_groups_name"] +@st.dialog(title="Delete Table Group") +def delete_table_group_dialog(selected): + selected_table_group = selected[0] + table_group_name = selected_table_group["table_groups_name"] + can_be_deleted = table_group_service.cascade_delete([table_group_name], dry_run=True) - can_be_deleted = table_group_service.cascade_delete([table_group_name], dry_run=True) + fm.render_html_list( + selected_table_group, + [ + "id", + "table_groups_name", + "table_group_schema", + ], + "Table Group Information", + int_data_width=700, + ) + + if not can_be_deleted: + st.markdown( + ":orange[This Table Group has related data, which may include profiling, test definitions and test results. If you proceed, all related data will be permanently deleted.
Are you sure you want to proceed?]", + unsafe_allow_html=True, + ) + accept_cascade_delete = st.toggle("I accept deletion of this Table Group and all related TestGen data.") - fm.render_html_list( - selected_table_group, - [ - "id", - "table_groups_name", - "table_group_schema", - ], - "Table Group Information", - int_data_width=700, + with st.form("Delete Table Group", clear_on_submit=True): + disable_delete_button = authentication_service.current_user_has_read_role() or ( + not can_be_deleted and not accept_cascade_delete ) + delete = st.form_submit_button("Delete", disabled=disable_delete_button) - if not can_be_deleted: - st.markdown( - ":orange[This Table Group has related data, which may include profiling, test definitions and test results. If you proceed, all related data will be permanently deleted.
Are you sure you want to proceed?]", - unsafe_allow_html=True, - ) - accept_cascade_delete = st.toggle("I accept deletion of this Table Group and all related TestGen data.") + if delete: + if table_group_service.are_table_groups_in_use([table_group_name]): + st.error("This Table Group is in use by a running process and cannot be deleted.") + else: + table_group_service.cascade_delete([table_group_name]) + success_message = f"Table Group {table_group_name} has been deleted. " + st.success(success_message) + time.sleep(1) + st.rerun() - with st.form("Delete Table Group", clear_on_submit=True): - disable_delete_button = authentication_service.current_user_has_read_role() or ( - not can_be_deleted and not accept_cascade_delete - ) - delete = st.form_submit_button("Delete", disabled=disable_delete_button) - if delete: - if table_group_service.are_table_groups_in_use([table_group_name]): - st.error("This Table Group is in use by a running process and cannot be deleted.") - else: - table_group_service.cascade_delete([table_group_name]) - success_message = f"Table Group {table_group_name} has been deleted. " - st.success(success_message) - time.sleep(1) - modal.close() - st.rerun() +def show_table_group_form(mode, project_code, connection, selected=None): + connection_id = connection["connection_id"] + table_groups_settings_tab, table_groups_preview_tab = st.tabs(["Table Group Settings", "Test"]) + with table_groups_settings_tab: + selected_table_group = selected[0] if mode == "edit" else None -def show_add_or_edit_modal(modal, mode, project_code, connection, selected=None): - connection_id = connection["connection_id"] - with modal.container(): - fm.render_modal_header("Edit Table Group" if mode == "edit" else "Add Table Group", None) - table_groups_settings_tab, table_groups_preview_tab = st.tabs(["Table Group Settings", "Test"]) - - with table_groups_settings_tab: - selected_table_group = selected[0] if mode == "edit" else None - - # establish default values - table_group_id = selected_table_group["id"] if mode == "edit" else None - table_groups_name = ( - selected_table_group["table_groups_name"] - if mode == "edit" - else f'{connection["connection_name"]}_table_group' - ) - table_group_schema = selected_table_group["table_group_schema"] if mode == "edit" else "" - profiling_table_set = ( - selected_table_group["profiling_table_set"] - if mode == "edit" and selected_table_group["profiling_table_set"] - else "" - ) - profiling_include_mask = selected_table_group["profiling_include_mask"] if mode == "edit" else "%" - profiling_exclude_mask = selected_table_group["profiling_exclude_mask"] if mode == "edit" else "tmp%" - profile_id_column_mask = selected_table_group["profile_id_column_mask"] if mode == "edit" else "%_id" - profile_sk_column_mask = selected_table_group["profile_sk_column_mask"] if mode == "edit" else "%_sk" - profile_use_sampling = selected_table_group["profile_use_sampling"] == "Y" if mode == "edit" else False - profile_sample_percent = int(selected_table_group["profile_sample_percent"]) if mode == "edit" else 30 - profile_sample_min_count = ( - int(selected_table_group["profile_sample_min_count"]) if mode == "edit" else 15000 - ) - profiling_delay_days = int(selected_table_group["profiling_delay_days"]) if mode == "edit" else 0 - - left_column, right_column = st.columns([0.50, 0.50]) - - profile_sampling_expander = st.expander("Sampling Parameters", expanded=False) - with profile_sampling_expander: - expander_left_column, expander_right_column = st.columns([0.50, 0.50]) - - provenance_expander = st.expander("Data Provenance (Optional)", expanded=False) - with provenance_expander: - provenance_left_column, provenance_right_column = st.columns([0.50, 0.50]) - - with st.form("Table Group Add / Edit", clear_on_submit=True): - entity = { - "id": table_group_id, - "project_code": project_code, - "connection_id": connection["connection_id"], - "table_groups_name": left_column.text_input( - label="Name", - max_chars=40, - value=table_groups_name, - help="A unique name to describe the table group", - ), - "profiling_include_mask": left_column.text_input( - label="Tables to Include Mask", - max_chars=40, - value=profiling_include_mask, - help="A SQL filter supported by your database's LIKE operator for table names to include", - ), - "profiling_exclude_mask": left_column.text_input( - label="Tables to Exclude Mask", - max_chars=40, - value=profiling_exclude_mask, - help="A SQL filter supported by your database's LIKE operator for table names to exclude", - ), - "profiling_table_set": left_column.text_input( - label="Explicit Table List", - max_chars=2000, - value=profiling_table_set, - help="A list of specific table names to include, separated by commas", - ), - "table_group_schema": right_column.text_input( - label="Schema", - max_chars=40, - value=table_group_schema, - help="The database schema containing the tables in the Table Group", - ), - "profile_id_column_mask": right_column.text_input( - label="Profiling ID column mask", - max_chars=40, - value=profile_id_column_mask, - help="A SQL filter supported by your database's LIKE operator representing ID columns (optional)", - ), - "profile_sk_column_mask": right_column.text_input( - label="Profiling Surrogate Key column mask", - max_chars=40, - value=profile_sk_column_mask, - help="A SQL filter supported by your database's LIKE operator representing surrogate key columns (optional)", - ), - "profiling_delay_days": right_column.number_input( - label="Min Profiling Age, Days", - min_value=0, - max_value=999, - value=profiling_delay_days, - help="The number of days to wait before new profiling will be available to generate tests", - ), - "profile_use_sampling": left_column.toggle( - "Use profile sampling", - value=profile_use_sampling, - help="Toggle on to base profiling on a sample of records instead of the full table", - ), - "profile_sample_percent": str( - expander_left_column.number_input( - label="Sample percent", - min_value=1, - max_value=100, - value=profile_sample_percent, - help="Percent of records to include in the sample, unless the calculated count falls below the specified minimum.", - ) - ), - "profile_sample_min_count": expander_right_column.number_input( - label="Min Sample Record Count", + # establish default values + table_group_id = selected_table_group["id"] if mode == "edit" else None + table_groups_name = ( + selected_table_group["table_groups_name"] + if mode == "edit" + else f'{connection["connection_name"]}_table_group' + ) + table_group_schema = selected_table_group["table_group_schema"] if mode == "edit" else "" + profiling_table_set = ( + selected_table_group["profiling_table_set"] + if mode == "edit" and selected_table_group["profiling_table_set"] + else "" + ) + profiling_include_mask = selected_table_group["profiling_include_mask"] if mode == "edit" else "%" + profiling_exclude_mask = selected_table_group["profiling_exclude_mask"] if mode == "edit" else "tmp%" + profile_id_column_mask = selected_table_group["profile_id_column_mask"] if mode == "edit" else "%_id" + profile_sk_column_mask = selected_table_group["profile_sk_column_mask"] if mode == "edit" else "%_sk" + profile_use_sampling = selected_table_group["profile_use_sampling"] == "Y" if mode == "edit" else False + profile_sample_percent = int(selected_table_group["profile_sample_percent"]) if mode == "edit" else 30 + profile_sample_min_count = ( + int(selected_table_group["profile_sample_min_count"]) if mode == "edit" else 15000 + ) + profiling_delay_days = int(selected_table_group["profiling_delay_days"]) if mode == "edit" else 0 + + left_column, right_column = st.columns([0.50, 0.50]) + + profile_sampling_expander = st.expander("Sampling Parameters", expanded=False) + with profile_sampling_expander: + expander_left_column, expander_right_column = st.columns([0.50, 0.50]) + + provenance_expander = st.expander("Data Provenance (Optional)", expanded=False) + with provenance_expander: + provenance_left_column, provenance_right_column = st.columns([0.50, 0.50]) + + with st.form("Table Group Add / Edit", clear_on_submit=True): + entity = { + "id": table_group_id, + "project_code": project_code, + "connection_id": connection["connection_id"], + "table_groups_name": left_column.text_input( + label="Name", + max_chars=40, + value=table_groups_name, + help="A unique name to describe the table group", + ), + "profiling_include_mask": left_column.text_input( + label="Tables to Include Mask", + max_chars=40, + value=profiling_include_mask, + help="A SQL filter supported by your database's LIKE operator for table names to include", + ), + "profiling_exclude_mask": left_column.text_input( + label="Tables to Exclude Mask", + max_chars=40, + value=profiling_exclude_mask, + help="A SQL filter supported by your database's LIKE operator for table names to exclude", + ), + "profiling_table_set": left_column.text_input( + label="Explicit Table List", + max_chars=2000, + value=profiling_table_set, + help="A list of specific table names to include, separated by commas", + ), + "table_group_schema": right_column.text_input( + label="Schema", + max_chars=40, + value=table_group_schema, + help="The database schema containing the tables in the Table Group", + ), + "profile_id_column_mask": right_column.text_input( + label="Profiling ID column mask", + max_chars=40, + value=profile_id_column_mask, + help="A SQL filter supported by your database's LIKE operator representing ID columns (optional)", + ), + "profile_sk_column_mask": right_column.text_input( + label="Profiling Surrogate Key column mask", + max_chars=40, + value=profile_sk_column_mask, + help="A SQL filter supported by your database's LIKE operator representing surrogate key columns (optional)", + ), + "profiling_delay_days": right_column.number_input( + label="Min Profiling Age, Days", + min_value=0, + max_value=999, + value=profiling_delay_days, + help="The number of days to wait before new profiling will be available to generate tests", + ), + "profile_use_sampling": left_column.toggle( + "Use profile sampling", + value=profile_use_sampling, + help="Toggle on to base profiling on a sample of records instead of the full table", + ), + "profile_sample_percent": str( + expander_left_column.number_input( + label="Sample percent", min_value=1, - max_value=1000000, - value=profile_sample_min_count, - help="The minimum number of records to be included in any sample (if available)", - ), - "data_source": provenance_left_column.text_input( - label="Data Source", - max_chars=40, - value=empty_if_null(selected_table_group["data_source"]) if mode == "edit" else "", - help="Original source of all tables in this dataset. This can be overridden at the table level. (Optional)", - ), - "source_system": provenance_left_column.text_input( - label="System of Origin", - max_chars=40, - value=empty_if_null(selected_table_group["source_system"]) if mode == "edit" else "", - help="Enterprise system source for all tables in this dataset. " - "This can be overridden at the table level. (Optional)", - ), - "business_domain": provenance_left_column.text_input( - label="Business Domain", - max_chars=40, - value=empty_if_null(selected_table_group["business_domain"]) if mode == "edit" else "", - help="Business division responsible for all tables in this dataset. " - "e.g. Finance, Sales, Manufacturing. (Optional)", - ), - "data_location": provenance_left_column.text_input( - label="Location", - max_chars=40, - value=empty_if_null(selected_table_group["data_location"]) if mode == "edit" else "", - help="Physical or virtual location of all tables in this dataset. " - "e.g. Headquarters, Cloud, etc. (Optional)", - ), - "transform_level": provenance_right_column.text_input( - label="Transform Level", - max_chars=40, - value=empty_if_null(selected_table_group["transform_level"]) if mode == "edit" else "", - help="Data warehouse processing layer. " - "Indicates the processing stage: e.g. Raw, Conformed, Processed, Reporting. (Optional)", - ), - "source_process": provenance_right_column.text_input( - label="Source Process", - max_chars=40, - value=empty_if_null(selected_table_group["source_process"]) if mode == "edit" else "", - help="The process, program or data flow that produced this data. (Optional)", - ), - "stakeholder_group": provenance_right_column.text_input( - label="Stakeholder Group", - max_chars=40, - value=empty_if_null(selected_table_group["stakeholder_group"]) if mode == "edit" else "", - help="Designator for data owners or stakeholders who are responsible for this data. (Optional)", - ), - } - - submit_button_text = "Save" if mode == "edit" else "Add" - submit = st.form_submit_button( - submit_button_text, disabled=authentication_service.current_user_has_read_role() - ) - - if submit: - if mode == "edit": - table_group_service.edit(entity) - else: - table_group_service.add(entity) - success_message = ( - "Changes have been saved successfully. " - if mode == "edit" - else "New Table Group added successfully. " + max_value=100, + value=profile_sample_percent, + help="Percent of records to include in the sample, unless the calculated count falls below the specified minimum.", ) - st.success(success_message) - time.sleep(1) - modal.close() - st.rerun() + ), + "profile_sample_min_count": expander_right_column.number_input( + label="Min Sample Record Count", + min_value=1, + max_value=1000000, + value=profile_sample_min_count, + help="The minimum number of records to be included in any sample (if available)", + ), + "data_source": provenance_left_column.text_input( + label="Data Source", + max_chars=40, + value=empty_if_null(selected_table_group["data_source"]) if mode == "edit" else "", + help="Original source of all tables in this dataset. This can be overridden at the table level. (Optional)", + ), + "source_system": provenance_left_column.text_input( + label="System of Origin", + max_chars=40, + value=empty_if_null(selected_table_group["source_system"]) if mode == "edit" else "", + help="Enterprise system source for all tables in this dataset. " + "This can be overridden at the table level. (Optional)", + ), + "business_domain": provenance_left_column.text_input( + label="Business Domain", + max_chars=40, + value=empty_if_null(selected_table_group["business_domain"]) if mode == "edit" else "", + help="Business division responsible for all tables in this dataset. " + "e.g. Finance, Sales, Manufacturing. (Optional)", + ), + "data_location": provenance_left_column.text_input( + label="Location", + max_chars=40, + value=empty_if_null(selected_table_group["data_location"]) if mode == "edit" else "", + help="Physical or virtual location of all tables in this dataset. " + "e.g. Headquarters, Cloud, etc. (Optional)", + ), + "transform_level": provenance_right_column.text_input( + label="Transform Level", + max_chars=40, + value=empty_if_null(selected_table_group["transform_level"]) if mode == "edit" else "", + help="Data warehouse processing layer. " + "Indicates the processing stage: e.g. Raw, Conformed, Processed, Reporting. (Optional)", + ), + "source_process": provenance_right_column.text_input( + label="Source Process", + max_chars=40, + value=empty_if_null(selected_table_group["source_process"]) if mode == "edit" else "", + help="The process, program or data flow that produced this data. (Optional)", + ), + "stakeholder_group": provenance_right_column.text_input( + label="Stakeholder Group", + max_chars=40, + value=empty_if_null(selected_table_group["stakeholder_group"]) if mode == "edit" else "", + help="Designator for data owners or stakeholders who are responsible for this data. (Optional)", + ), + } + + submit_button_text = "Save" if mode == "edit" else "Add" + submit = st.form_submit_button( + submit_button_text, disabled=authentication_service.current_user_has_read_role() + ) - with table_groups_preview_tab: + if submit: if mode == "edit": - preview_left_column, preview_right_column = st.columns([0.5, 0.5]) - status_preview = preview_right_column.empty() - preview = preview_left_column.button("Test Table Group") - if preview: - table_group_preview(entity, connection_id, project_code, status_preview) + table_group_service.edit(entity) else: - st.write("No preview available while adding a Table Group. Save the configuration first.") + table_group_service.add(entity) + success_message = ( + "Changes have been saved successfully. " + if mode == "edit" + else "New Table Group added successfully. " + ) + st.success(success_message) + time.sleep(1) + st.rerun() + + with table_groups_preview_tab: + if mode == "edit": + preview_left_column, preview_right_column = st.columns([0.5, 0.5]) + status_preview = preview_right_column.empty() + preview = preview_left_column.button("Test Table Group") + if preview: + table_group_preview(entity, connection_id, project_code, status_preview) + else: + st.write("No preview available while adding a Table Group. Save the configuration first.") def table_group_preview(entity, connection_id, project_code, status): @@ -529,3 +494,13 @@ def show_test_results(schemas, tables, columns, qc_results): tables_df = pd.DataFrame({"[tables]": list(tables)}) fm.render_grid_select(tables_df, ["[tables]"]) + + +@st.dialog(title="Add Table Group") +def add_table_group_dialog(project_code, connection): + show_table_group_form("add", project_code, connection) + + +@st.dialog(title="Edit Table Group") +def edit_table_group_dialog(project_code, connection, selected): + show_table_group_form("edit", project_code, connection, selected) diff --git a/testgen/ui/views/test_definitions.py b/testgen/ui/views/test_definitions.py index f8813ca..76f1f0c 100644 --- a/testgen/ui/views/test_definitions.py +++ b/testgen/ui/views/test_definitions.py @@ -10,13 +10,12 @@ import testgen.ui.services.test_definition_service as test_definition_service import testgen.ui.services.toolbar_service as tb from testgen.common import date_service -from testgen.ui.components import widgets as testgen from testgen.ui.navigation.menu import MenuItem from testgen.ui.navigation.page import Page from testgen.ui.services import authentication_service from testgen.ui.services.string_service import empty_if_null, snake_case_to_title_case from testgen.ui.session import session -from testgen.ui.views.profiling_modal import view_profiling_modal +from testgen.ui.views.profiling_modal import view_profiling_button LOG = logging.getLogger("testgen") @@ -53,12 +52,6 @@ def render(self, **_) -> None: tool_bar = tb.ToolBar(5, 6, 4, None, multiline=True) - add_test_definition_modal = testgen.Modal(title=None, key="dk-add-test-definition", max_width=1100) - edit_test_definition_modal = testgen.Modal(title=None, key="dk-edit-test-definition", max_width=1100) - delete_test_definition_modal = testgen.Modal( - title=None, key="dk-delete-test-definition", max_width=1100 - ) - with tool_bar.long_slots[0]: str_connection_id, connection = prompt_for_connection(session.project, connection) @@ -86,7 +79,7 @@ def render(self, **_) -> None: if tool_bar.short_slots[0].button( "➕ Add", help="Add a new Test Definition", use_container_width=True # NOQA RUF001 ): - add_test_definition_modal.open() + add_test_dialog(project_code, table_group, test_suite, str_table_name, str_column_name) selected = show_test_defs_grid( session.project, str_test_suite, str_table_name, str_column_name, do_multi_select, export_container, @@ -127,13 +120,16 @@ def render(self, **_) -> None: lst_cached_functions=[], ) + if selected: + selected_test_def = selected[0] + if tool_bar.short_slots[1].button( "🖊️ Edit", # RUF001 help="Edit the Test Definition", use_container_width=True, disabled=not selected, ): - edit_test_definition_modal.open() + edit_test_dialog(project_code, table_group, test_suite, str_table_name, str_column_name, selected_test_def) if tool_bar.short_slots[2].button( "❌ Delete", @@ -141,34 +137,10 @@ def render(self, **_) -> None: use_container_width=True, disabled=not selected, ): - delete_test_definition_modal.open() - - if selected: - selected_test_def = selected[0] + delete_test_dialog(selected_test_def) else: - st.markdown(":orange[Select a Test Suite and Table Name to view Test Definition details.]") - - # ----- Modal forms - if add_test_definition_modal.is_open(): - show_add_edit_modal( - add_test_definition_modal, "add", project_code, table_group, test_suite, str_table_name, str_column_name - ) - - if edit_test_definition_modal.is_open(): - show_add_edit_modal( - edit_test_definition_modal, - "edit", - project_code, - table_group, - test_suite, - str_table_name, - str_column_name, - selected_test_def, - ) - - if delete_test_definition_modal.is_open(): - show_delete_modal(delete_test_definition_modal, selected_test_def) + st.markdown(":orange[Select a Test Suite and Table Name to view Test Definition details.]") class TestDefinitionsPageFromSuite(TestDefinitionsPage): @@ -183,49 +155,48 @@ class TestDefinitionsPageFromSuite(TestDefinitionsPage): menu_item = None -def show_delete_modal(modal, selected_test_definition=None): - with modal.container(): - fm.render_modal_header("Delete Test", None) - test_definition_id = selected_test_definition["id"] - test_name_short = selected_test_definition["test_name_short"] - - can_be_deleted = test_definition_service.delete([test_definition_id], dry_run=True) - - fm.render_html_list( - selected_test_definition, - [ - "id", - "project_code", - "schema_name", - "table_name", - "column_name", - "test_name_short", - "table_groups_id", - "test_suite", - "test_active_display", - "test_description", - "last_manual_update", - ], - "Test Definition Information", - int_data_width=700, - ) +@st.dialog("Delete Test") +def delete_test_dialog(selected_test_definition): + test_definition_id = selected_test_definition["id"] + test_name_short = selected_test_definition["test_name_short"] - with st.form("Delete Test Definition", clear_on_submit=True): - disable_delete_button = authentication_service.current_user_has_read_role() or not can_be_deleted - delete = st.form_submit_button("Delete", disabled=disable_delete_button) + can_be_deleted = test_definition_service.delete([test_definition_id], dry_run=True) - if delete: - test_definition_service.delete([test_definition_id]) - success_message = f"Test Definition {test_name_short} has been deleted. " - st.success(success_message) - time.sleep(1) - modal.close() + fm.render_html_list( + selected_test_definition, + [ + "id", + "project_code", + "schema_name", + "table_name", + "column_name", + "test_name_short", + "table_groups_id", + "test_suite", + "test_active_display", + "test_description", + "last_manual_update", + ], + "Test Definition Information", + int_data_width=700, + ) + + with st.form("Delete Test Definition", clear_on_submit=True): + disable_delete_button = authentication_service.current_user_has_read_role() or not can_be_deleted + delete = st.form_submit_button("Delete", disabled=disable_delete_button) + + if delete: + test_definition_service.delete([test_definition_id]) + success_message = f"Test Definition {test_name_short} has been deleted. " + st.success(success_message) + time.sleep(1) + st.rerun() - if not can_be_deleted: - st.markdown(":orange[This Test Definition cannot be deleted because it is being used in existing tests.]") + if not can_be_deleted: + st.markdown(":orange[This Test Definition cannot be deleted because it is being used in existing tests.]") -def show_add_edit_modal_by_test_definition(test_definition_modal, test_definition_id): +def show_test_form_by_id(test_definition_id): selected_test_raw = test_definition_service.get_test_definitions(test_definition_ids=[test_definition_id]) test_definition = selected_test_raw.iloc[0].to_dict() @@ -243,13 +214,12 @@ def show_add_edit_modal_by_test_definition(test_definition_modal, test_definitio if not test_suite_raw.empty: test_suite = test_suite_raw.iloc[0].to_dict() - show_add_edit_modal( - test_definition_modal, mode, project_code, table_group, test_suite, table_name, column_name, test_definition + edit_test_dialog( + project_code, table_group, test_suite, table_name, column_name, test_definition ) -def show_add_edit_modal( - test_definition_modal, +def show_test_form( mode, project_code, table_group, @@ -258,383 +228,391 @@ def show_add_edit_modal( str_column_name, selected_test_def=None, ): - with test_definition_modal.container(): - fm.render_modal_header("Add Test" if mode == "add" else "Edit Test", None) - # test_type logic - if mode == "add": - selected_test_type, selected_test_type_row = prompt_for_test_type() - test_type = selected_test_type - else: - test_type = selected_test_def["test_type"] - df = run_test_type_lookup_query() - selected_test_type_row = df[df["test_type"] == test_type].iloc[0] - test_type_display = selected_test_type_row["test_name_short"] - - if selected_test_type_row is None: - return - - # run type - run_type = selected_test_type_row["run_type"] # Can be "QUERY" or "CAT" - test_scope = selected_test_type_row["test_scope"] # Can be "column", "table", "referential", "custom" - - # test_description - test_description = empty_if_null(selected_test_def["test_description"]) if mode == "edit" else "" - test_type_test_description = selected_test_type_row["test_description"] - test_description_help = ( - "You may enter a description here to override the default description above for the Test Type." - ) - test_description_placeholder = f"Inherited ({test_type_test_description})" + # test_type logic + if mode == "add": + selected_test_type, selected_test_type_row = prompt_for_test_type() + test_type = selected_test_type + else: + test_type = selected_test_def["test_type"] + df = run_test_type_lookup_query() + selected_test_type_row = df[df["test_type"] == test_type].iloc[0] + test_type_display = selected_test_type_row["test_name_short"] + + if selected_test_type_row is None: + return + + # run type + run_type = selected_test_type_row["run_type"] # Can be "QUERY" or "CAT" + test_scope = selected_test_type_row["test_scope"] # Can be "column", "table", "referential", "custom" + + # test_description + test_description = empty_if_null(selected_test_def["test_description"]) if mode == "edit" else "" + test_type_test_description = selected_test_type_row["test_description"] + test_description_help = ( + "You may enter a description here to override the default description above for the Test Type." + ) + test_description_placeholder = f"Inherited ({test_type_test_description})" - # severity - test_suite_severity = test_suite["severity"] - test_types_severity = selected_test_type_row["default_severity"] - inherited_severity = test_suite_severity if test_suite_severity else test_types_severity + # severity + test_suite_severity = test_suite["severity"] + test_types_severity = selected_test_type_row["default_severity"] + inherited_severity = test_suite_severity if test_suite_severity else test_types_severity - severity_options = [f"Inherited ({inherited_severity})", "Warning", "Fail"] - if mode == "add" or selected_test_def["severity"] is None: - severity_index = 0 - else: - severity_index = severity_options.index(selected_test_def["severity"]) - - # general value parsing - entity_id = selected_test_def["id"] if mode == "edit" else "" - cat_test_id = selected_test_def["cat_test_id"] if mode == "edit" else "" - project_code = selected_test_def["project_code"] if mode == "edit" else project_code - table_groups_id = selected_test_def["table_groups_id"] if mode == "edit" else table_group["id"] - profile_run_id = selected_test_def["profile_run_id"] if mode == "edit" else "" - test_suite_name = selected_test_def["test_suite"] if mode == "edit" else test_suite["test_suite"] - test_suite_id = test_suite["id"] - test_action = empty_if_null(selected_test_def["test_action"]) if mode == "edit" else "" - schema_name = selected_test_def["schema_name"] if mode == "edit" else table_group["table_group_schema"] - table_name = empty_if_null(selected_test_def["table_name"]) if mode == "edit" else empty_if_null(str_table_name) - skip_errors = selected_test_def["skip_errors"] if mode == "edit" else 0 - test_active = selected_test_def["test_active"] == "Y" if mode == "edit" else True - lock_refresh = selected_test_def["lock_refresh"] == "Y" if mode == "edit" else False - test_definition_status = selected_test_def["test_definition_status"] if mode == "edit" else "" - check_result = selected_test_def["check_result"] if mode == "edit" else None - column_name = empty_if_null(selected_test_def["column_name"]) if mode == "edit" else "" - - # dynamic attributes - custom_query = empty_if_null(selected_test_def["custom_query"]) if mode == "edit" else "" - baseline_ct = empty_if_null(selected_test_def["baseline_ct"]) if mode == "edit" else "" - baseline_unique_ct = empty_if_null(selected_test_def["baseline_unique_ct"]) if mode == "edit" else "" - baseline_value = empty_if_null(selected_test_def["baseline_value"]) if mode == "edit" else "" - baseline_value_ct = empty_if_null(selected_test_def["baseline_value_ct"]) if mode == "edit" else "" - threshold_value = empty_if_null(selected_test_def["threshold_value"]) if mode == "edit" else 0 - baseline_sum = empty_if_null(selected_test_def["baseline_sum"]) if mode == "edit" else "" - baseline_avg = empty_if_null(selected_test_def["baseline_avg"]) if mode == "edit" else "" - baseline_sd = empty_if_null(selected_test_def["baseline_sd"]) if mode == "edit" else "" - subset_condition = empty_if_null(selected_test_def["subset_condition"]) if mode == "edit" else "" - groupby_names = empty_if_null(selected_test_def["groupby_names"]) if mode == "edit" else "" - having_condition = empty_if_null(selected_test_def["having_condition"]) if mode == "edit" else "" - window_date_column = empty_if_null(selected_test_def["window_date_column"]) if mode == "edit" else "" - match_schema_name = empty_if_null(selected_test_def["match_schema_name"]) if mode == "edit" else "" - match_table_name = empty_if_null(selected_test_def["match_table_name"]) if mode == "edit" else "" - match_column_names = empty_if_null(selected_test_def["match_column_names"]) if mode == "edit" else "" - match_subset_condition = empty_if_null(selected_test_def["match_subset_condition"]) if mode == "edit" else "" - match_groupby_names = empty_if_null(selected_test_def["match_groupby_names"]) if mode == "edit" else "" - match_having_condition = empty_if_null(selected_test_def["match_having_condition"]) if mode == "edit" else "" - window_days = selected_test_def["window_days"] if mode == "edit" and selected_test_def["window_days"] else 0 - test_mode = empty_if_null(selected_test_def["test_mode"]) if mode == "edit" else "" - - # export_to_observability - test_suite_export_to_observability = test_suite["export_to_observability"] - inherited_export_to_observability = "Yes" if test_suite_export_to_observability == "Y" else "No" - - inherited_legend = f"Inherited ({inherited_export_to_observability})" - export_to_observability_options = [inherited_legend, "Yes", "No"] - if mode == "edit": - match selected_test_def["export_to_observability_raw"]: - case "N": - export_to_observability = "No" - case "Y": - export_to_observability = "Yes" - case _: - export_to_observability = inherited_legend - else: - export_to_observability = inherited_legend - export_to_observability_index = export_to_observability_options.index(export_to_observability) - - # watch_level - watch_level = selected_test_def["watch_level"] if mode == "edit" else "WARN" - - # dynamic attributes - dynamic_attributes_raw = selected_test_type_row["default_parm_columns"] - dynamic_attributes = dynamic_attributes_raw.split(",") - - dynamic_attributes_labels_raw = selected_test_type_row["default_parm_prompts"] - dynamic_attributes_labels = dynamic_attributes_labels_raw.split(",") - - dynamic_attributes_help_raw = selected_test_type_row["default_parm_help"] - if not dynamic_attributes_help_raw: - dynamic_attributes_help_raw = "No help is available" - # Split on pipe -- could contain commas - dynamic_attributes_help = dynamic_attributes_help_raw.split("|") - - if mode == "edit": - st.text_input(label="Test Type", value=test_type_display, disabled=True), - - # Using the test_type, display the default description and usage_notes - if selected_test_type_row["test_description"]: - st.markdown( - f""" -
- {selected_test_type_row['test_description']} -

- """, - unsafe_allow_html=True, - ) - - if selected_test_type_row["usage_notes"]: - st.info(f"**Usage Notes:**\n\n{selected_test_type_row['usage_notes']}") + severity_options = [f"Inherited ({inherited_severity})", "Warning", "Fail"] + if mode == "add" or selected_test_def["severity"] is None: + severity_index = 0 + else: + severity_index = severity_options.index(selected_test_def["severity"]) + + # general value parsing + entity_id = selected_test_def["id"] if mode == "edit" else "" + cat_test_id = selected_test_def["cat_test_id"] if mode == "edit" else "" + project_code = selected_test_def["project_code"] if mode == "edit" else project_code + table_groups_id = selected_test_def["table_groups_id"] if mode == "edit" else table_group["id"] + profile_run_id = selected_test_def["profile_run_id"] if mode == "edit" else "" + test_suite_name = selected_test_def["test_suite"] if mode == "edit" else test_suite["test_suite"] + test_suite_id = test_suite["id"] + test_action = empty_if_null(selected_test_def["test_action"]) if mode == "edit" else "" + schema_name = selected_test_def["schema_name"] if mode == "edit" else table_group["table_group_schema"] + table_name = empty_if_null(selected_test_def["table_name"]) if mode == "edit" else empty_if_null(str_table_name) + skip_errors = selected_test_def["skip_errors"] if mode == "edit" else 0 + test_active = selected_test_def["test_active"] == "Y" if mode == "edit" else True + lock_refresh = selected_test_def["lock_refresh"] == "Y" if mode == "edit" else False + test_definition_status = selected_test_def["test_definition_status"] if mode == "edit" else "" + check_result = selected_test_def["check_result"] if mode == "edit" else None + column_name = empty_if_null(selected_test_def["column_name"]) if mode == "edit" else "" + + # dynamic attributes + custom_query = empty_if_null(selected_test_def["custom_query"]) if mode == "edit" else "" + baseline_ct = empty_if_null(selected_test_def["baseline_ct"]) if mode == "edit" else "" + baseline_unique_ct = empty_if_null(selected_test_def["baseline_unique_ct"]) if mode == "edit" else "" + baseline_value = empty_if_null(selected_test_def["baseline_value"]) if mode == "edit" else "" + baseline_value_ct = empty_if_null(selected_test_def["baseline_value_ct"]) if mode == "edit" else "" + threshold_value = empty_if_null(selected_test_def["threshold_value"]) if mode == "edit" else 0 + baseline_sum = empty_if_null(selected_test_def["baseline_sum"]) if mode == "edit" else "" + baseline_avg = empty_if_null(selected_test_def["baseline_avg"]) if mode == "edit" else "" + baseline_sd = empty_if_null(selected_test_def["baseline_sd"]) if mode == "edit" else "" + subset_condition = empty_if_null(selected_test_def["subset_condition"]) if mode == "edit" else "" + groupby_names = empty_if_null(selected_test_def["groupby_names"]) if mode == "edit" else "" + having_condition = empty_if_null(selected_test_def["having_condition"]) if mode == "edit" else "" + window_date_column = empty_if_null(selected_test_def["window_date_column"]) if mode == "edit" else "" + match_schema_name = empty_if_null(selected_test_def["match_schema_name"]) if mode == "edit" else "" + match_table_name = empty_if_null(selected_test_def["match_table_name"]) if mode == "edit" else "" + match_column_names = empty_if_null(selected_test_def["match_column_names"]) if mode == "edit" else "" + match_subset_condition = empty_if_null(selected_test_def["match_subset_condition"]) if mode == "edit" else "" + match_groupby_names = empty_if_null(selected_test_def["match_groupby_names"]) if mode == "edit" else "" + match_having_condition = empty_if_null(selected_test_def["match_having_condition"]) if mode == "edit" else "" + window_days = selected_test_def["window_days"] if mode == "edit" and selected_test_def["window_days"] else 0 + test_mode = empty_if_null(selected_test_def["test_mode"]) if mode == "edit" else "" + + # export_to_observability + test_suite_export_to_observability = test_suite["export_to_observability"] + inherited_export_to_observability = "Yes" if test_suite_export_to_observability == "Y" else "No" + + inherited_legend = f"Inherited ({inherited_export_to_observability})" + export_to_observability_options = [inherited_legend, "Yes", "No"] + if mode == "edit": + match selected_test_def["export_to_observability_raw"]: + case "N": + export_to_observability = "No" + case "Y": + export_to_observability = "Yes" + case _: + export_to_observability = inherited_legend + else: + export_to_observability = inherited_legend + export_to_observability_index = export_to_observability_options.index(export_to_observability) + + # watch_level + watch_level = selected_test_def["watch_level"] if mode == "edit" else "WARN" + + # dynamic attributes + dynamic_attributes_raw = selected_test_type_row["default_parm_columns"] + dynamic_attributes = dynamic_attributes_raw.split(",") + + dynamic_attributes_labels_raw = selected_test_type_row["default_parm_prompts"] + dynamic_attributes_labels = dynamic_attributes_labels_raw.split(",") + + dynamic_attributes_help_raw = selected_test_type_row["default_parm_help"] + if not dynamic_attributes_help_raw: + dynamic_attributes_help_raw = "No help is available" + # Split on pipe -- could contain commas + dynamic_attributes_help = dynamic_attributes_help_raw.split("|") + + if mode == "edit": + st.text_input(label="Test Type", value=test_type_display, disabled=True), + + # Using the test_type, display the default description and usage_notes + if selected_test_type_row["test_description"]: + st.markdown( + f""" +
+ {selected_test_type_row['test_description']} +

+ """, + unsafe_allow_html=True, + ) - left_column, right_column = st.columns([0.5, 0.5]) + if selected_test_type_row["usage_notes"]: + st.info(f"**Usage Notes:**\n\n{selected_test_type_row['usage_notes']}") + + left_column, right_column = st.columns([0.5, 0.5]) + + test_definition = { + "id": entity_id, + "cat_test_id": cat_test_id, + "watch_level": watch_level, + "project_code": project_code, + "table_groups_id": table_groups_id, + "profile_run_id": profile_run_id, + "test_type": test_type, + "test_suite": left_column.text_input( + label="Test Suite Name", max_chars=200, value=test_suite_name, disabled=True + ), + "test_suite_id": test_suite_id, + "test_description": left_column.text_area( + label="Test Description Override", + max_chars=1000, + height=3, + placeholder=test_description_placeholder, + value=test_description, + help=test_description_help, + ), + "test_action": test_action, + "test_mode": test_mode, + "lock_refresh": left_column.toggle( + label="Lock Refresh", + value=lock_refresh, + help="Protects test parameters from being overwritten when tests in this Test Suite are regenerated.", + ), + "schema_name": right_column.text_input( + label="Schema Name", max_chars=100, value=schema_name, disabled=True + ), + "test_active": left_column.toggle(label="Test Active", value=test_active), + "check_result": check_result, + "custom_query": custom_query, + "baseline_ct": baseline_ct, + "baseline_unique_ct": baseline_unique_ct, + "baseline_value": baseline_value, + "baseline_value_ct": baseline_value_ct, + "threshold_value": threshold_value, + "baseline_sum": baseline_sum, + "baseline_avg": baseline_avg, + "baseline_sd": baseline_sd, + "subset_condition": subset_condition, + "groupby_names": groupby_names, + "having_condition": having_condition, + "window_date_column": window_date_column, + "match_schema_name": match_schema_name, + "match_table_name": match_table_name, + "column_name": column_name, + "match_column_names": match_column_names, + "match_subset_condition": match_subset_condition, + "match_groupby_names": match_groupby_names, + "match_having_condition": match_having_condition, + "window_days": window_days, + } + + # test_definition_status + test_definition["test_definition_status"] = test_definition_status + if mode == "edit": + test_definition_status_display = test_definition_status if test_definition_status else "OK" + left_column.text_input( + label="Validation Status", max_chars=200, value=test_definition_status_display, disabled=True + ) - test_definition = { - "id": entity_id, - "cat_test_id": cat_test_id, - "watch_level": watch_level, - "project_code": project_code, - "table_groups_id": table_groups_id, - "profile_run_id": profile_run_id, - "test_type": test_type, - "test_suite": left_column.text_input( - label="Test Suite Name", max_chars=200, value=test_suite_name, disabled=True - ), - "test_suite_id": test_suite_id, - "test_description": left_column.text_area( - label="Test Description Override", - max_chars=1000, - height=3, - placeholder=test_description_placeholder, - value=test_description, - help=test_description_help, - ), - "test_action": test_action, - "test_mode": test_mode, - "lock_refresh": left_column.toggle( - label="Lock Refresh", - value=lock_refresh, - help="Protects test parameters from being overwritten when tests in this Test Suite are regenerated.", - ), - "schema_name": right_column.text_input( - label="Schema Name", max_chars=100, value=schema_name, disabled=True - ), - "test_active": left_column.toggle(label="Test Active", value=test_active), - "check_result": check_result, - "custom_query": custom_query, - "baseline_ct": baseline_ct, - "baseline_unique_ct": baseline_unique_ct, - "baseline_value": baseline_value, - "baseline_value_ct": baseline_value_ct, - "threshold_value": threshold_value, - "baseline_sum": baseline_sum, - "baseline_avg": baseline_avg, - "baseline_sd": baseline_sd, - "subset_condition": subset_condition, - "groupby_names": groupby_names, - "having_condition": having_condition, - "window_date_column": window_date_column, - "match_schema_name": match_schema_name, - "match_table_name": match_table_name, - "column_name": column_name, - "match_column_names": match_column_names, - "match_subset_condition": match_subset_condition, - "match_groupby_names": match_groupby_names, - "match_having_condition": match_having_condition, - "window_days": window_days, - } - - # test_definition_status - test_definition["test_definition_status"] = test_definition_status - if mode == "edit": - test_definition_status_display = test_definition_status if test_definition_status else "OK" - left_column.text_input( - label="Validation Status", max_chars=200, value=test_definition_status_display, disabled=True - ) + # export_to_observability + export_to_observability_help = "Send results to DataKitchen Observability - overrides Test Suite toggle" + test_definition["export_to_observability_raw"] = right_column.selectbox( + label="Send to Observability - Override", + options=export_to_observability_options, + index=export_to_observability_index, + help=export_to_observability_help, + ) - # export_to_observability - export_to_observability_help = "Send results to DataKitchen Observability - overrides Test Suite toggle" - test_definition["export_to_observability_raw"] = right_column.selectbox( - label="Send to Observability - Override", - options=export_to_observability_options, - index=export_to_observability_index, - help=export_to_observability_help, - ) + # severity + severity_help = "Urgency is defined by default for the Test Type, but can be overridden for all tests in the Test Suite, and ultimately here for each individual test." + test_definition["severity"] = right_column.selectbox( + label="Urgency Override", + options=severity_options, + index=severity_index, + help=severity_help, + ) - # severity - severity_help = "Urgency is defined by default for the Test Type, but can be overridden for all tests in the Test Suite, and ultimately here for each individual test." - test_definition["severity"] = right_column.selectbox( - label="Urgency Override", - options=severity_options, - index=severity_index, - help=severity_help, - ) + st.divider() - st.divider() + # table_name + test_definition["table_name"] = st.text_input( + label="Table Name", max_chars=100, value=table_name, disabled=False + ) - # table_name - test_definition["table_name"] = st.text_input( - label="Table Name", max_chars=100, value=table_name, disabled=False + # column_name + if selected_test_type_row["column_name_prompt"]: + column_name_label = selected_test_type_row["column_name_prompt"] + else: + column_name_label = "Test Focus" + if selected_test_type_row["column_name_help"]: + column_name_help = selected_test_type_row["column_name_help"] + else: + column_name_help = "Help is not available" + + if test_scope == "table": + test_definition["column_name"] = None + column_name_label = None + elif test_scope == "referential": + column_name_disabled = False + test_definition["column_name"] = st.text_input( + label=column_name_label, + value=column_name, + max_chars=500, + help=column_name_help, + disabled=column_name_disabled, ) - - # column_name - if selected_test_type_row["column_name_prompt"]: - column_name_label = selected_test_type_row["column_name_prompt"] - else: - column_name_label = "Test Focus" - if selected_test_type_row["column_name_help"]: - column_name_help = selected_test_type_row["column_name_help"] + elif test_scope == "custom": + if str_column_name: + if mode == "add": # query add present + column_name_disabled = False + column_name = str_column_name + else: # query edit present + column_name_disabled = False + column_name = str_column_name else: - column_name_help = "Help is not available" + if mode == "add": # query add not-present + column_name_disabled = False + else: # query edit not-present + column_name_disabled = False - if test_scope == "table": - test_definition["column_name"] = None - column_name_label = None - elif test_scope == "referential": - column_name_disabled = False - test_definition["column_name"] = st.text_input( - label=column_name_label, - value=column_name, - max_chars=500, - help=column_name_help, - disabled=column_name_disabled, - ) - elif test_scope == "custom": - if str_column_name: - if mode == "add": # query add present - column_name_disabled = False - column_name = str_column_name - else: # query edit present - column_name_disabled = False - column_name = str_column_name + test_definition["column_name"] = st.text_input( + label=column_name_label, + value=column_name, + max_chars=100, + help=column_name_help, + disabled=column_name_disabled, + ) + elif test_scope == "column": # CAT column test + if str_column_name: + column_name_disabled = True + if mode == "add": + column_name = str_column_name # CAT add present else: - if mode == "add": # query add not-present - column_name_disabled = False - else: # query edit not-present - column_name_disabled = False - - test_definition["column_name"] = st.text_input( - label=column_name_label, - value=column_name, - max_chars=100, - help=column_name_help, - disabled=column_name_disabled, - ) - elif test_scope == "column": # CAT column test - if str_column_name: - column_name_disabled = True - if mode == "add": - column_name = str_column_name # CAT add present - else: - pass # CAT edit present + pass # CAT edit present + else: + column_name_disabled = False + if mode == "add": + pass # CAT add not-present else: - column_name_disabled = False - if mode == "add": - pass # CAT add not-present - else: - pass # CAT edit not-present - - column_name_label = "Column Name" - column_name_options = get_column_names(table_groups_id, test_definition["table_name"]) - column_name_help = "Select the column to test" - column_name_index = column_name_options.index(column_name) if column_name else 0 - test_definition["column_name"] = st.selectbox( - label=column_name_label, - options=column_name_options, - index=column_name_index, - help=column_name_help, - key="column-name-form", - disabled=column_name_disabled, - ) + pass # CAT edit not-present + + column_name_label = "Column Name" + column_name_options = get_column_names(table_groups_id, test_definition["table_name"]) + column_name_help = "Select the column to test" + column_name_index = column_name_options.index(column_name) if column_name else 0 + test_definition["column_name"] = st.selectbox( + label=column_name_label, + options=column_name_options, + index=column_name_index, + help=column_name_help, + key="column-name-form", + disabled=column_name_disabled, + ) - st.divider() + st.divider() - # dynamic attributes - mid_left_column, mid_right_column = st.columns([0.5, 0.5]) + # dynamic attributes + mid_left_column, mid_right_column = st.columns([0.5, 0.5]) - current_column = mid_left_column - show_custom_query = False - dynamic_attributes_length = len(dynamic_attributes) - dynamic_attributes_half_length = max(round((dynamic_attributes_length + 0.5) / 2), 1) - for i, dynamic_attribute in enumerate(dynamic_attributes): - if i >= dynamic_attributes_half_length: - current_column = mid_right_column + current_column = mid_left_column + show_custom_query = False + dynamic_attributes_length = len(dynamic_attributes) + dynamic_attributes_half_length = max(round((dynamic_attributes_length + 0.5) / 2), 1) + for i, dynamic_attribute in enumerate(dynamic_attributes): + if i >= dynamic_attributes_half_length: + current_column = mid_right_column - value = empty_if_null(selected_test_def[dynamic_attribute]) if mode == "edit" else "" + value = empty_if_null(selected_test_def[dynamic_attribute]) if mode == "edit" else "" - actual_dynamic_attributes_labels = ( - dynamic_attributes_labels[i] - if dynamic_attributes_labels and len(dynamic_attributes_labels) > i - else "Help text is not available." - ) - - actual_dynamic_attributes_help = ( - dynamic_attributes_help[i] - if dynamic_attributes_help and len(dynamic_attributes_help) > i - else snake_case_to_title_case(dynamic_attribute) - ) + actual_dynamic_attributes_labels = ( + dynamic_attributes_labels[i] + if dynamic_attributes_labels and len(dynamic_attributes_labels) > i + else "Help text is not available." + ) - if dynamic_attribute in ["custom_query"]: - show_custom_query = True - else: - test_definition[dynamic_attribute] = current_column.text_input( - label=actual_dynamic_attributes_labels, - max_chars=4000 if dynamic_attribute in ["match_column_names", "match_groupby_names", "groupby_names"] else 1000, - value=value, - help=actual_dynamic_attributes_help, - ) + actual_dynamic_attributes_help = ( + dynamic_attributes_help[i] + if dynamic_attributes_help and len(dynamic_attributes_help) > i + else snake_case_to_title_case(dynamic_attribute) + ) - # Custom Query - if show_custom_query: - if test_type == "Condition_Flag": - custom_query_default = "EXAMPLE: status = 'SHIPPED' and qty_shipped = 0" - custom_query_height = 75 - elif test_type == "CUSTOM": - custom_query_default = "EXAMPLE: SELECT product, SUM(qty_sold) as sum_sold, SUM(qty_shipped) as qty_shipped \n FROM {DATA_SCHEMA}.sales_history \n GROUP BY product \n HAVING SUM(qty_shipped) > SUM(qty_sold)" - custom_query_height = 150 - else: - custom_query_default = None - custom_query_height = 75 - test_definition["custom_query"] = st.text_area( + if dynamic_attribute in ["custom_query"]: + show_custom_query = True + else: + test_definition[dynamic_attribute] = current_column.text_input( label=actual_dynamic_attributes_labels, - value=custom_query, - placeholder=custom_query_default, - height=custom_query_height, + max_chars=4000 if dynamic_attribute in ["match_column_names", "match_groupby_names", "groupby_names"] else 1000, + value=value, help=actual_dynamic_attributes_help, ) - # skip_errors - if run_type == "QUERY": - test_definition["skip_errors"] = left_column.number_input(label="Threshold Error Count", value=skip_errors) + # Custom Query + if show_custom_query: + if test_type == "Condition_Flag": + custom_query_default = "EXAMPLE: status = 'SHIPPED' and qty_shipped = 0" + custom_query_height = 75 + elif test_type == "CUSTOM": + custom_query_default = "EXAMPLE: SELECT product, SUM(qty_sold) as sum_sold, SUM(qty_shipped) as qty_shipped \n FROM {DATA_SCHEMA}.sales_history \n GROUP BY product \n HAVING SUM(qty_shipped) > SUM(qty_sold)" + custom_query_height = 150 else: - test_definition["skip_errors"] = skip_errors + custom_query_default = None + custom_query_height = 75 + test_definition["custom_query"] = st.text_area( + label=actual_dynamic_attributes_labels, + value=custom_query, + placeholder=custom_query_default, + height=custom_query_height, + help=actual_dynamic_attributes_help, + ) - # submit logic - bottom_left_column, bottom_right_column = st.columns([0.5, 0.5]) + # skip_errors + if run_type == "QUERY": + test_definition["skip_errors"] = left_column.number_input(label="Threshold Error Count", value=skip_errors) + else: + test_definition["skip_errors"] = skip_errors - # Add Validate button - if test_type in ("Condition_Flag", "CUSTOM"): - validate = bottom_left_column.button( - "Validate", disabled=authentication_service.current_user_has_read_role() - ) - if validate: - try: - test_definition_service.validate_test(test_definition) - bottom_right_column.success("Validation is successful.") - except Exception as e: - bottom_right_column.error(f"Test validation failed with error: {e}") + # submit logic + bottom_left_column, bottom_right_column = st.columns([0.5, 0.5]) + + # Add Validate button + if test_type in ("Condition_Flag", "CUSTOM"): + validate = bottom_left_column.button( + "Validate", disabled=authentication_service.current_user_has_read_role() + ) + if validate: + try: + test_definition_service.validate_test(test_definition) + bottom_right_column.success("Validation is successful.") + except Exception as e: + bottom_right_column.error(f"Test validation failed with error: {e}") + + submit = bottom_left_column.button("Save", disabled=authentication_service.current_user_has_read_role()) + + if submit: + if validate_form(test_scope, test_type, test_definition, column_name_label): + if mode == "edit": + test_definition_service.update(test_definition) + st.rerun() + else: + test_definition_service.add(test_definition) + st.rerun() + + +@st.dialog(title="Add Test") +def add_test_dialog(project_code, table_group, test_suite, str_table_name, str_column_name): + show_test_form("add", project_code, table_group, test_suite, str_table_name, str_column_name) - submit = bottom_left_column.button("Save", disabled=authentication_service.current_user_has_read_role()) - if submit: - if validate_form(test_scope, test_type, test_definition, column_name_label): - if mode == "edit": - test_definition_service.update(test_definition) - test_definition_modal.close() - else: - test_definition_service.add(test_definition) - test_definition_modal.close() +@st.dialog(title="Edit Test") +def edit_test_dialog(project_code, table_group, test_suite, str_table_name, str_column_name, selected_test_def): + show_test_form("edit", project_code, table_group, test_suite, str_table_name, str_column_name, selected_test_def) def validate_form(test_scope, test_type, test_definition, column_name_label): @@ -838,7 +816,7 @@ def show_test_defs_grid( _, col_profile_button = right_column.columns([0.7, 0.3]) if selected_row["test_scope"] == "column": - view_profiling_modal( + view_profiling_button( col_profile_button, selected_row["table_name"], selected_row["column_name"], str_table_groups_id=str_table_groups_id ) diff --git a/testgen/ui/views/test_results.py b/testgen/ui/views/test_results.py index 3bfe916..8d13487 100644 --- a/testgen/ui/views/test_results.py +++ b/testgen/ui/views/test_results.py @@ -11,12 +11,11 @@ import testgen.ui.services.query_service as dq import testgen.ui.services.toolbar_service as tb from testgen.common import ConcatColumnList, date_service -from testgen.ui.components import widgets as testgen from testgen.ui.navigation.page import Page from testgen.ui.services.string_service import empty_if_null from testgen.ui.session import session -from testgen.ui.views.profiling_modal import view_profiling_modal -from testgen.ui.views.test_definitions import show_add_edit_modal_by_test_definition +from testgen.ui.views.profiling_modal import view_profiling_button +from testgen.ui.views.test_definitions import show_test_form_by_id ALWAYS_SPIN = False @@ -673,7 +672,7 @@ def show_result_detail(str_run_id, str_sel_test_status, do_multi_select, export_ v_col1, v_col2, v_col3 = st.columns([0.33, 0.33, 0.33]) view_edit_test(v_col1, selected_row["test_definition_id_current"]) if selected_row["test_scope"] == "column": - view_profiling_modal( + view_profiling_button( v_col2, selected_row["table_name"], selected_row["column_names"], str_table_groups_id=selected_row["table_groups_id"] ) @@ -857,52 +856,46 @@ def do_disposition_update(selected, str_new_status): def view_bad_data(button_container, selected_row): - str_header = f"Column: {selected_row['column_names']}, Table: {selected_row['table_name']}" - bad_data_modal = testgen.Modal(title=None, key="dk-test-data-modal", max_width=1100) - with button_container: if st.button( ":green[Source Data →]", help="Review current source data for highlighted result", use_container_width=True ): - bad_data_modal.open() + source_data_dialog(selected_row) - if bad_data_modal.is_open(): - with bad_data_modal.container(): - fm.render_modal_header(selected_row["test_name_short"], None) - st.caption(selected_row["test_description"]) - fm.show_prompt(str_header) - # Show detail - fm.render_html_list( - selected_row, ["input_parameters", "result_message"], None, 700, ["Test Parameters", "Result Detail"] - ) +@st.dialog(title="Source Data") +def source_data_dialog(selected_row): + st.markdown(f"#### {selected_row['test_name_short']}") + st.caption(selected_row["test_description"]) + fm.show_prompt(f"Column: {selected_row['column_names']}, Table: {selected_row['table_name']}") - with st.spinner("Retrieving source data..."): - if selected_row["test_type"] == "CUSTOM": - bad_data_status, bad_data_msg, df_bad = do_source_data_lookup_custom(selected_row) - else: - bad_data_status, bad_data_msg, df_bad = do_source_data_lookup(selected_row) - if bad_data_status in {"ND", "NA"}: - st.info(bad_data_msg) - elif bad_data_status == "ERR": - st.error(bad_data_msg) - elif df_bad is None: - st.error("An unknown error was encountered.") - else: - if bad_data_msg: - st.info(bad_data_msg) - # Pretify the dataframe - df_bad.columns = [col.replace("_", " ").title() for col in df_bad.columns] - df_bad.fillna("[NULL]", inplace=True) - # Display the dataframe - st.dataframe(df_bad, height=500, width=1050, hide_index=True) + # Show detail + fm.render_html_list( + selected_row, ["input_parameters", "result_message"], None, 700, ["Test Parameters", "Result Detail"] + ) + + with st.spinner("Retrieving source data..."): + if selected_row["test_type"] == "CUSTOM": + bad_data_status, bad_data_msg, df_bad = do_source_data_lookup_custom(selected_row) + else: + bad_data_status, bad_data_msg, df_bad = do_source_data_lookup(selected_row) + if bad_data_status in {"ND", "NA"}: + st.info(bad_data_msg) + elif bad_data_status == "ERR": + st.error(bad_data_msg) + elif df_bad is None: + st.error("An unknown error was encountered.") + else: + if bad_data_msg: + st.info(bad_data_msg) + # Pretify the dataframe + df_bad.columns = [col.replace("_", " ").title() for col in df_bad.columns] + df_bad.fillna("[NULL]", inplace=True) + # Display the dataframe + st.dataframe(df_bad, height=500, width=1050, hide_index=True) def view_edit_test(button_container, test_definition_id): - edit_test_definition_modal = testgen.Modal(title=None, key="dk-test-definition-edit-modal", max_width=1100) with button_container: if st.button("🖊️ Edit Test", help="Edit the Test Definition", use_container_width=True): - edit_test_definition_modal.open() - - if edit_test_definition_modal.is_open(): - show_add_edit_modal_by_test_definition(edit_test_definition_modal, test_definition_id) + show_test_form_by_id(test_definition_id) diff --git a/testgen/ui/views/test_suites.py b/testgen/ui/views/test_suites.py index 2d129e8..c341788 100644 --- a/testgen/ui/views/test_suites.py +++ b/testgen/ui/views/test_suites.py @@ -10,7 +10,6 @@ from testgen.commands.run_execute_tests import run_execution_steps_in_background from testgen.commands.run_generate_tests import run_test_gen_queries from testgen.commands.run_observability_exporter import export_test_results -from testgen.ui.components import widgets as testgen from testgen.ui.navigation.page import Page from testgen.ui.services import connection_service, table_group_service from testgen.ui.services.string_service import empty_if_null @@ -70,36 +69,19 @@ def render(self, connection_id: str | None = None, table_group_id: str | None = selected = fm.render_grid_select(df, show_columns) - add_modal = testgen.Modal(title=None, key="dk-add-test_suite-modal", max_width=1100) - edit_modal = testgen.Modal(title=None, key="dk-edit-test_suite-modal", max_width=1100) - delete_modal = testgen.Modal(title=None, key="dk-delete-test_suite-modal", max_width=1100) - run_tests_command_modal = testgen.Modal(title=None, key="dk-run-tests-command-modal", max_width=1100) - - show_test_run_command_modal = testgen.Modal( - title=None, key="dk-show-test-run-command-modal", max_width=1100 - ) - run_test_generation_modal = testgen.Modal(title=None, key="dk-run-test-generation-modal", max_width=1100) - show_run_test_generation_modal = testgen.Modal( - title=None, key="dk-show-test-generation-modal", max_width=1100 - ) - - run_export_command_modal = testgen.Modal(title=None, key="dk-run-export-modal", max_width=1100) - show_export_command_modal = testgen.Modal( - title=None, key="dk-show-export-modal", max_width=1100 - ) - if tool_bar.short_slots[1].button("➕ Add", help="Add a new Test Run", use_container_width=True): # NOQA RUF001 - add_modal.open() + add_test_suite_dialog(project_code, connection, table_group) disable_buttons = selected is None if tool_bar.short_slots[2].button( "🖊️ Edit", help="Edit the selected Test Run", disabled=disable_buttons, use_container_width=True ): - edit_modal.open() + edit_test_suite_dialog(project_code, connection, table_group, selected) + if tool_bar.short_slots[3].button( "❌ Delete", help="Delete the selected Test Run", disabled=disable_buttons, use_container_width=True ): - delete_modal.open() + delete_test_suite_dialog(selected) if tool_bar.short_slots[4].button( f":{'gray' if disable_buttons else 'green'}[Tests →]", @@ -118,56 +100,13 @@ def render(self, connection_id: str | None = None, table_group_id: str | None = }, ) - if add_modal.is_open(): - show_add_or_edit_modal(add_modal, "add", project_code, connection, table_group) - - if edit_modal.is_open(): - show_add_or_edit_modal(edit_modal, "edit", project_code, connection, table_group, selected) - - if delete_modal.is_open(): - show_delete_modal(delete_modal, selected) - - if run_tests_command_modal.is_open(): - run_tests(run_tests_command_modal, project_code, selected) - - if show_test_run_command_modal.is_open(): - show_test_run_command(show_test_run_command_modal, project_code, selected) - - if run_test_generation_modal.is_open(): - show_run_test_generation(run_test_generation_modal, selected) - - if show_run_test_generation_modal.is_open(): - show_test_generation_command(show_run_test_generation_modal, selected) - - if show_export_command_modal.is_open(): - show_export_command(show_export_command_modal, selected) - - if run_export_command_modal.is_open(): - run_export_command(run_export_command_modal, selected) - if not selected: st.markdown(":orange[Select a row to see Test Suite details.]") else: - show_record_detail( - selected[0], - show_test_run_command_modal, - run_test_generation_modal, - show_run_test_generation_modal, - run_tests_command_modal, - show_export_command_modal, - run_export_command_modal, - ) + show_record_detail(project_code, selected[0]) -def show_record_detail( - selected, - show_test_run_command_modal, - run_test_generation_modal, - show_run_test_generation_modal, - run_tests_command_modal, - show_export_command_modal, - run_export_command_modal, -): +def show_record_detail(project_code, selected): left_column, right_column = st.columns([0.5, 0.5]) with left_column: @@ -202,345 +141,333 @@ def show_record_detail( help="Shows the run-test-generation CLI command", use_container_width=True, ): - show_run_test_generation_modal.open() + generate_tests_cli_dialog(selected) if st.button( "Test Execution Command", help="Shows the run-tests CLI command", use_container_width=True, ): - show_test_run_command_modal.open() + run_tests_cli_dialog(project_code, selected) if st.button( "Observability Export Command", help="Shows the export-observability CLI command", use_container_width=True, ): - show_export_command_modal.open() + observability_export_cli_dialog(selected) with run_now_commands_tab: if st.button("Run Test Generation", help="Run Test Generation", use_container_width=True): - run_test_generation_modal.open() + generate_tests_dialog(selected) if st.button("Run Test Execution", help="Run the tests", use_container_width=True): - run_tests_command_modal.open() + run_tests_dialog(project_code, selected) if st.button( "Run Observability Export", help="Exports test results to Observability for the current Test Suite", use_container_width=True, ): - run_export_command_modal.open() - - -def show_run_test_generation(modal, selected): - selected_test_suite = selected[0] - - with modal.container(): - fm.render_modal_header("Run Test Generation", None) - container = st.empty() - with container: - st.markdown(":green[**Execute Test Generation for the Test Suite**]") - - warning_container = st.container() - options_container = st.container() - button_container = st.empty() - status_container = st.empty() - - test_ct, unlocked_test_ct, unlocked_edits_ct = test_suite_service.get_test_suite_refresh_warning( - selected_test_suite["table_groups_id"], selected_test_suite["test_suite"] - ) - if test_ct: - warning_msg = "" - counts_msg = f"\n\nAuto-Generated Tests: {test_ct}, Unlocked: {unlocked_test_ct}, Edited Unlocked: {unlocked_edits_ct}" - if unlocked_edits_ct > 0: - if unlocked_edits_ct > 1: - - warning_msg = "Manual changes have been made to auto-generated tests in this Test Suite that have not been locked. " - else: - warning_msg = "A manual change has been made to an auto-generated test in this Test Suite that has not been locked. " - elif unlocked_test_ct > 0: - warning_msg = "Auto-generated tests are present in this Test Suite that have not been locked. " - warning_msg = f"{warning_msg}Generating tests now will overwrite unlocked tests subject to auto-generation based on the latest profiling.{counts_msg}" - with warning_container: - st.warning(warning_msg) - if unlocked_edits_ct > 0: - lock_edits_button = st.button("Lock Edited Tests") - if lock_edits_button: - edits_locked = test_suite_service.lock_edited_tests(selected_test_suite["test_suite"]) - if edits_locked: - st.info("Edited tests have been successfully locked.") - - with options_container: - lst_generation_sets = test_suite_service.get_generation_set_choices() - if lst_generation_sets: - lst_generation_sets.insert(0, "(All Test Types)") - str_generation_set = st.selectbox("Generation Set", lst_generation_sets) - if str_generation_set == "(All Test Types)": - str_generation_set = "" + observability_export_dialog(selected) + + +@st.dialog(title="Generate Tests") +def generate_tests_dialog(selected_test_suite): + container = st.empty() + with container: + st.markdown(":green[**Execute Test Generation for the Test Suite**]") + + warning_container = st.container() + options_container = st.container() + button_container = st.empty() + status_container = st.empty() + + test_ct, unlocked_test_ct, unlocked_edits_ct = test_suite_service.get_test_suite_refresh_warning( + selected_test_suite["table_groups_id"], selected_test_suite["test_suite"] + ) + if test_ct: + warning_msg = "" + counts_msg = f"\n\nAuto-Generated Tests: {test_ct}, Unlocked: {unlocked_test_ct}, Edited Unlocked: {unlocked_edits_ct}" + if unlocked_edits_ct > 0: + if unlocked_edits_ct > 1: + + warning_msg = "Manual changes have been made to auto-generated tests in this Test Suite that have not been locked. " else: + warning_msg = "A manual change has been made to an auto-generated test in this Test Suite that has not been locked. " + elif unlocked_test_ct > 0: + warning_msg = "Auto-generated tests are present in this Test Suite that have not been locked. " + warning_msg = f"{warning_msg}Generating tests now will overwrite unlocked tests subject to auto-generation based on the latest profiling.{counts_msg}" + with warning_container: + st.warning(warning_msg) + if unlocked_edits_ct > 0: + lock_edits_button = st.button("Lock Edited Tests") + if lock_edits_button: + edits_locked = test_suite_service.lock_edited_tests(selected_test_suite["test_suite"]) + if edits_locked: + st.info("Edited tests have been successfully locked.") + + with options_container: + lst_generation_sets = test_suite_service.get_generation_set_choices() + if lst_generation_sets: + lst_generation_sets.insert(0, "(All Test Types)") + str_generation_set = st.selectbox("Generation Set", lst_generation_sets) + if str_generation_set == "(All Test Types)": str_generation_set = "" + else: + str_generation_set = "" - with button_container: - start_process_button_message = "Start" - test_generation_button = st.button(start_process_button_message) - - if test_generation_button: - button_container.empty() + with button_container: + start_process_button_message = "Start" + test_generation_button = st.button(start_process_button_message) - table_group_id = selected_test_suite["table_groups_id"] - test_suite_key = selected_test_suite["test_suite"] - status_container.info("Executing Test Generation...") + if test_generation_button: + button_container.empty() - try: - run_test_gen_queries(table_group_id, test_suite_key, str_generation_set) - except Exception as e: - status_container.empty() - status_container.error(f"Process had errors: {e!s}.") + table_group_id = selected_test_suite["table_groups_id"] + test_suite_key = selected_test_suite["test_suite"] + status_container.info("Executing Test Generation...") + try: + run_test_gen_queries(table_group_id, test_suite_key, str_generation_set) + except Exception as e: status_container.empty() - status_container.success("Process has successfully finished.") + status_container.error(f"Process had errors: {e!s}.") + status_container.empty() + status_container.success("Process has successfully finished.") -def show_delete_modal(modal, selected=None): - selected_test_suite = selected[0] - with modal.container(): - fm.render_modal_header("Delete Test Suite", None) - test_suite_name = selected_test_suite["test_suite"] - - can_be_deleted = test_suite_service.cascade_delete([test_suite_name], dry_run=True) +@st.dialog(title="Delete Test Suite") +def delete_test_suite_dialog(selected): + selected_test_suite = selected[0] + test_suite_name = selected_test_suite["test_suite"] + can_be_deleted = test_suite_service.cascade_delete([test_suite_name], dry_run=True) - fm.render_html_list( - selected_test_suite, - [ - "id", - "test_suite", - "test_suite_description", - ], - "Test Suite Information", - int_data_width=700, + fm.render_html_list( + selected_test_suite, + [ + "id", + "test_suite", + "test_suite_description", + ], + "Test Suite Information", + int_data_width=700, + ) + + if not can_be_deleted: + st.markdown( + ":orange[This Test Suite has related data, which includes test definitions and may include test results. If you proceed, all related data will be permanently deleted.
Are you sure you want to proceed?]", + unsafe_allow_html=True, ) + accept_cascade_delete = st.toggle("I accept deletion of this Test Suite and all related TestGen data.") - if not can_be_deleted: - st.markdown( - ":orange[This Test Suite has related data, which includes test definitions and may include test results. If you proceed, all related data will be permanently deleted.
Are you sure you want to proceed?]", - unsafe_allow_html=True, - ) - accept_cascade_delete = st.toggle("I accept deletion of this Test Suite and all related TestGen data.") - - with st.form("Delete Test Suite", clear_on_submit=True): - disable_delete_button = authentication_service.current_user_has_read_role() or ( - not can_be_deleted and not accept_cascade_delete - ) - delete = st.form_submit_button("Delete", disabled=disable_delete_button) + with st.form("Delete Test Suite", clear_on_submit=True): + disable_delete_button = authentication_service.current_user_has_read_role() or ( + not can_be_deleted and not accept_cascade_delete + ) + delete = st.form_submit_button("Delete", disabled=disable_delete_button) - if delete: - if test_suite_service.are_test_suites_in_use([test_suite_name]): - st.error("This Test Suite is in use by a running process and cannot be deleted.") - else: - test_suite_service.cascade_delete([test_suite_name]) - success_message = f"Test Suite {test_suite_name} has been deleted. " - st.success(success_message) - time.sleep(1) - modal.close() - st.rerun() + if delete: + if test_suite_service.are_test_suites_in_use([test_suite_name]): + st.error("This Test Suite is in use by a running process and cannot be deleted.") + else: + test_suite_service.cascade_delete([test_suite_name]) + success_message = f"Test Suite {test_suite_name} has been deleted. " + st.success(success_message) + time.sleep(1) + st.rerun() -def show_add_or_edit_modal(modal, mode, project_code, connection, table_group, selected=None): +def show_test_suite(mode, project_code, connection, table_group, selected=None): connection_id = connection["connection_id"] table_group_id = table_group["id"] - with modal.container(): - fm.render_modal_header("Edit Test Suite" if mode == "edit" else "Add Test Suite", None) - severity_options = ["Inherit", "Failed", "Warning"] - - selected_test_suite = selected[0] if mode == "edit" else None - - if mode == "edit" and not selected_test_suite["severity"]: - selected_test_suite["severity"] = severity_options[0] - - # establish default values - test_suite_id = selected_test_suite["id"] if mode == "edit" else None - test_suite = empty_if_null(selected_test_suite["test_suite"]) if mode == "edit" else "" - connection_id = selected_test_suite["connection_id"] if mode == "edit" else connection_id - table_groups_id = selected_test_suite["table_groups_id"] if mode == "edit" else table_group_id - test_suite_description = empty_if_null(selected_test_suite["test_suite_description"]) if mode == "edit" else "" - test_action = empty_if_null(selected_test_suite["test_action"]) if mode == "edit" else "" - severity_index = severity_options.index(selected_test_suite["severity"]) if mode == "edit" else 0 - export_to_observability = selected_test_suite["export_to_observability"] == "Y" if mode == "edit" else False - test_suite_schema = empty_if_null(selected_test_suite["test_suite_schema"]) if mode == "edit" else "" - component_key = empty_if_null(selected_test_suite["component_key"]) if mode == "edit" else "" - component_type = empty_if_null(selected_test_suite["component_type"]) if mode == "edit" else "dataset" - component_name = empty_if_null(selected_test_suite["component_name"]) if mode == "edit" else "" - - left_column, right_column = st.columns([0.50, 0.50]) - expander = st.expander("", expanded=True) - with expander: - expander_left_column, expander_right_column = st.columns([0.50, 0.50]) - - with st.form("Test Suite Add / Edit", clear_on_submit=True): - entity = { - "id": test_suite_id, - "project_code": project_code, - "test_suite": left_column.text_input( - label="Test Suite Name", max_chars=40, value=test_suite, disabled=(mode != "add") - ), - "connection_id": connection_id, - "table_groups_id": table_groups_id, - "test_suite_description": left_column.text_input( - label="Test Suite Description", max_chars=40, value=test_suite_description - ), - "test_action": test_action, - "severity": right_column.selectbox( - label="Severity", - options=severity_options, - index=severity_index, - help="Overrides the default severity in 'Test Definition' and/or 'Test Run'.", - ), - "test_suite_schema": test_suite_schema, - "export_to_observability": left_column.toggle( - "Export to Observability", - value=export_to_observability, - help="Fields below are only required when overriding the Table Group defaults.", - ), - "component_key": expander_left_column.text_input( - label="Component Key", - max_chars=40, - value=component_key, - placeholder="Optional Field", - help="Overrides the default component key mapping, which is set at Table Group level.", - ), - "component_type": expander_right_column.text_input( - label="Component Type", max_chars=40, value=component_type, disabled=True - ), - "component_name": expander_left_column.text_input( - label="Component Name", - max_chars=40, - value=component_name, - placeholder="Optional Field", - help="Overrides the default component name mapping, which is set at the Table Group level.", - ), - } - - submit_button_text = "Save" if mode == "edit" else "Add" - submit = st.form_submit_button( - submit_button_text, disabled=authentication_service.current_user_has_read_role() - ) + severity_options = ["Inherit", "Failed", "Warning"] + + selected_test_suite = selected[0] if mode == "edit" else None + + if mode == "edit" and not selected_test_suite["severity"]: + selected_test_suite["severity"] = severity_options[0] + + # establish default values + test_suite_id = selected_test_suite["id"] if mode == "edit" else None + test_suite = empty_if_null(selected_test_suite["test_suite"]) if mode == "edit" else "" + connection_id = selected_test_suite["connection_id"] if mode == "edit" else connection_id + table_groups_id = selected_test_suite["table_groups_id"] if mode == "edit" else table_group_id + test_suite_description = empty_if_null(selected_test_suite["test_suite_description"]) if mode == "edit" else "" + test_action = empty_if_null(selected_test_suite["test_action"]) if mode == "edit" else "" + severity_index = severity_options.index(selected_test_suite["severity"]) if mode == "edit" else 0 + export_to_observability = selected_test_suite["export_to_observability"] == "Y" if mode == "edit" else False + test_suite_schema = empty_if_null(selected_test_suite["test_suite_schema"]) if mode == "edit" else "" + component_key = empty_if_null(selected_test_suite["component_key"]) if mode == "edit" else "" + component_type = empty_if_null(selected_test_suite["component_type"]) if mode == "edit" else "dataset" + component_name = empty_if_null(selected_test_suite["component_name"]) if mode == "edit" else "" + + left_column, right_column = st.columns([0.50, 0.50]) + expander = st.expander("", expanded=True) + with expander: + expander_left_column, expander_right_column = st.columns([0.50, 0.50]) + + with st.form("Test Suite Add / Edit", clear_on_submit=True): + entity = { + "id": test_suite_id, + "project_code": project_code, + "test_suite": left_column.text_input( + label="Test Suite Name", max_chars=40, value=test_suite, disabled=(mode != "add") + ), + "connection_id": connection_id, + "table_groups_id": table_groups_id, + "test_suite_description": left_column.text_input( + label="Test Suite Description", max_chars=40, value=test_suite_description + ), + "test_action": test_action, + "severity": right_column.selectbox( + label="Severity", + options=severity_options, + index=severity_index, + help="Overrides the default severity in 'Test Definition' and/or 'Test Run'.", + ), + "test_suite_schema": test_suite_schema, + "export_to_observability": left_column.toggle( + "Export to Observability", + value=export_to_observability, + help="Fields below are only required when overriding the Table Group defaults.", + ), + "component_key": expander_left_column.text_input( + label="Component Key", + max_chars=40, + value=component_key, + placeholder="Optional Field", + help="Overrides the default component key mapping, which is set at Table Group level.", + ), + "component_type": expander_right_column.text_input( + label="Component Type", max_chars=40, value=component_type, disabled=True + ), + "component_name": expander_left_column.text_input( + label="Component Name", + max_chars=40, + value=component_name, + placeholder="Optional Field", + help="Overrides the default component name mapping, which is set at the Table Group level.", + ), + } + + submit_button_text = "Save" if mode == "edit" else "Add" + submit = st.form_submit_button( + submit_button_text, disabled=authentication_service.current_user_has_read_role() + ) - if submit: - if " " in entity["test_suite"]: - proposed_test_suite = entity["test_suite"].replace(" ", "-") - st.error( - f"Blank spaces not allowed in field 'Test Suite Name'. Use dash or underscore instead. i.e.: {proposed_test_suite}" - ) + if submit: + if " " in entity["test_suite"]: + proposed_test_suite = entity["test_suite"].replace(" ", "-") + st.error( + f"Blank spaces not allowed in field 'Test Suite Name'. Use dash or underscore instead. i.e.: {proposed_test_suite}" + ) + else: + if mode == "edit": + test_suite_service.edit(entity) else: - if mode == "edit": - test_suite_service.edit(entity) - else: - test_suite_service.add(entity) - success_message = ( - "Changes have been saved successfully. " - if mode == "edit" - else "New TestSuite added successfully. " - ) - st.success(success_message) - time.sleep(1) - modal.close() - st.rerun() - - -def run_tests(modal, project_code, selected): - selected_test_suite = selected[0] + test_suite_service.add(entity) + success_message = ( + "Changes have been saved successfully. " + if mode == "edit" + else "New TestSuite added successfully. " + ) + st.success(success_message) + time.sleep(1) + st.rerun() + - with modal.container(): - fm.render_modal_header("Run Test Execution", None) - container = st.empty() - with container: - st.markdown(":green[**Run Tests for the Test Suite**]") +@st.dialog(title="Add Test Suite") +def add_test_suite_dialog(project_code, connection, table_group): + show_test_suite("add", project_code, connection, table_group) - button_container = st.empty() - status_container = st.empty() - with button_container: - start_process_button_message = "Start" - run_test_button = st.button(start_process_button_message) +@st.dialog(title="Edit Test Suite") +def edit_test_suite_dialog(project_code, connection, table_group, selected): + show_test_suite("edit", project_code, connection, table_group, selected) - if run_test_button: - button_container.empty() - test_suite_key = selected_test_suite["test_suite"] - status_container.info(f"Running tests for test suite {test_suite_key}") +@st.dialog(title="Run Tests") +def run_tests_dialog(project_code, selected_test_suite): + container = st.empty() + with container: + st.markdown(":green[**Run Tests for the Test Suite**]") - try: - run_execution_steps_in_background(project_code, test_suite_key) - except Exception as e: - status_container.empty() - status_container.error(f"Process started with errors: {e!s}.") + button_container = st.empty() + status_container = st.empty() + with button_container: + start_process_button_message = "Start" + run_test_button = st.button(start_process_button_message) + + if run_test_button: + button_container.empty() + + test_suite_key = selected_test_suite["test_suite"] + status_container.info(f"Running tests for test suite {test_suite_key}") + + try: + run_execution_steps_in_background(project_code, test_suite_key) + except Exception as e: status_container.empty() - status_container.success( - "Process has successfully started. Check details in menu item 'Data Quality Testing'." - ) + status_container.error(f"Process started with errors: {e!s}.") + status_container.empty() + status_container.success( + "Process has successfully started. Check details in menu item 'Data Quality Testing'." + ) -def show_test_run_command(modal, project_code, selected): - with modal.container(): - fm.render_modal_header("Test Execution Command for CLI", None) - selected_test_suite = selected[0] - test_suite_name = selected_test_suite["test_suite"] - command = f"testgen run-tests --project-key {project_code} --test-suite-key {test_suite_name}" - st.code(command, language="shellSession") +@st.dialog(title="Run Tests CLI Command") +def run_tests_cli_dialog(project_code, selected_test_suite): + test_suite_name = selected_test_suite["test_suite"] + command = f"testgen run-tests --project-key {project_code} --test-suite-key {test_suite_name}" + st.code(command, language="shellSession") -def show_test_generation_command(modal, selected): - with modal.container(): - fm.render_modal_header("Test Generation Command for CLI", None) - selected_test_suite = selected[0] - test_suite_key = selected_test_suite["test_suite"] - table_group_id = selected_test_suite["table_groups_id"] - command = f"testgen run-test-generation --table-group-id {table_group_id} --test-suite-key {test_suite_key}" - st.code(command, language="shellSession") +@st.dialog(title="Generate Tests CLI Command") +def generate_tests_cli_dialog(selected_test_suite): + test_suite_key = selected_test_suite["test_suite"] + table_group_id = selected_test_suite["table_groups_id"] + command = f"testgen run-test-generation --table-group-id {table_group_id} --test-suite-key {test_suite_key}" + st.code(command, language="shellSession") -def show_export_command(modal, selected): - with modal.container(): - fm.render_modal_header("Observability Export Command for CLI", None) - selected_test_suite = selected[0] - test_suite_key = selected_test_suite["test_suite"] - project_key = selected_test_suite["project_code"] - command = f"testgen export-observability --project-key {project_key} --test-suite-key {test_suite_key}" - st.code(command, language="shellSession") +@st.dialog(title="Observability Export CLI Command") +def observability_export_cli_dialog(selected_test_suite): + test_suite_key = selected_test_suite["test_suite"] + project_key = selected_test_suite["project_code"] + command = f"testgen export-observability --project-key {project_key} --test-suite-key {test_suite_key}" + st.code(command, language="shellSession") -def run_export_command(modal, selected): - selected_test_suite = selected[0] - with modal.container(): - fm.render_modal_header("Run Observability Export", None) - container = st.empty() - with container: - st.markdown(":green[**Execute the test export for the current Test Suite**]") +@st.dialog(title="Export to Observability") +def observability_export_dialog(selected_test_suite): + container = st.empty() + with container: + st.markdown(":green[**Execute the test export for the current Test Suite**]") - button_container = st.empty() - status_container = st.empty() + button_container = st.empty() + status_container = st.empty() - with button_container: - start_process_button_message = "Start" - test_generation_button = st.button(start_process_button_message) + with button_container: + start_process_button_message = "Start" + test_generation_button = st.button(start_process_button_message) - if test_generation_button: - button_container.empty() + if test_generation_button: + button_container.empty() - test_suite_key = selected_test_suite["test_suite"] - project_key = selected_test_suite["project_code"] - status_container.info("Executing Export ...") + test_suite_key = selected_test_suite["test_suite"] + project_key = selected_test_suite["project_code"] + status_container.info("Executing Export ...") - try: - qty_of_exported_events = export_test_results(project_key, test_suite_key) - status_container.empty() - status_container.success( - f"Process has successfully finished, {qty_of_exported_events} events have been exported." - ) - except Exception as e: - status_container.empty() - status_container.error(f"Process has finished with errors: {e!s}.") + try: + qty_of_exported_events = export_test_results(project_key, test_suite_key) + status_container.empty() + status_container.success( + f"Process has successfully finished, {qty_of_exported_events} events have been exported." + ) + except Exception as e: + status_container.empty() + status_container.error(f"Process has finished with errors: {e!s}.") From 156336bb9e0175422f77b14743e1d8eac32f9009 Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Mon, 19 Aug 2024 22:03:21 -0400 Subject: [PATCH 11/78] fix(router): fix bug in navigating with empty query params --- testgen/ui/navigation/router.py | 4 ++-- testgen/ui/views/test_suites.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/testgen/ui/navigation/router.py b/testgen/ui/navigation/router.py index de31db6..dc375c9 100644 --- a/testgen/ui/navigation/router.py +++ b/testgen/ui/navigation/router.py @@ -39,7 +39,7 @@ def run(self, hide_sidebar=False) -> None: current_page = session.page_pending_cookies or current_page session.page_pending_cookies = None - if session.page_args_pending_router: + if session.page_args_pending_router is not None: session.current_page_args = session.page_args_pending_router st.query_params.from_dict(session.page_args_pending_router) session.page_args_pending_router = None @@ -48,7 +48,7 @@ def run(self, hide_sidebar=False) -> None: current_page.run() - def navigate(self, /, to: str, with_args: dict | None = None) -> None: + def navigate(self, /, to: str, with_args: dict = {}) -> None: # noqa: B006 try: if to != session.current_page: route = self._routes[to] diff --git a/testgen/ui/views/test_suites.py b/testgen/ui/views/test_suites.py index c341788..9903f88 100644 --- a/testgen/ui/views/test_suites.py +++ b/testgen/ui/views/test_suites.py @@ -94,7 +94,7 @@ def render(self, connection_id: str | None = None, table_group_id: str | None = self.router.navigate( "connections:test-definitions", { - "connection_id": connection, + "connection_id": connection["connection_id"], "table_group_id": table_group_id, "test_suite_id": selected[0]["id"], }, From 42eff31a99c3d2c23f74c344e6d7a170b6b33eae Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Mon, 19 Aug 2024 22:34:41 -0400 Subject: [PATCH 12/78] refactor(components): add custom expander toggle --- .../frontend/js/components/expander_toggle.js | 57 +++++++++++++++++++ testgen/ui/components/frontend/js/main.js | 2 + testgen/ui/components/widgets/__init__.py | 1 + .../ui/components/widgets/expander_toggle.py | 30 ++++++++++ 4 files changed, 90 insertions(+) create mode 100644 testgen/ui/components/frontend/js/components/expander_toggle.js create mode 100644 testgen/ui/components/widgets/expander_toggle.py diff --git a/testgen/ui/components/frontend/js/components/expander_toggle.js b/testgen/ui/components/frontend/js/components/expander_toggle.js new file mode 100644 index 0000000..1a31d9b --- /dev/null +++ b/testgen/ui/components/frontend/js/components/expander_toggle.js @@ -0,0 +1,57 @@ +/** + * @typedef Properties + * @type {object} + * @property {boolean} default + * @property {string} expandLabel + * @property {string} collapseLabel + */ +import van from '../van.min.js'; +import { Streamlit } from '../streamlit.js'; + +const { div, span, i } = van.tags; + +const ExpanderToggle = (/** @type Properties */ props) => { + Streamlit.setFrameHeight(24); + + if (!window.testgen.loadedStylesheets.expanderToggle) { + document.adoptedStyleSheets.push(stylesheet); + window.testgen.loadedStylesheets.expanderToggle = true; + } + + console.log(props) + const expandedState = van.state(!!props.default.val); + const expandLabel = props.expandLabel.val || 'Expand'; + const collapseLabel = props.collapseLabel.val || 'Collapse'; + + return div( + { + class: 'expander-toggle', + onclick: () => { + expandedState.val = !expandedState.val; + Streamlit.sendData(expandedState.val); + } + }, + span( + { class: 'expander-toggle--label' }, + () => expandedState.val ? collapseLabel : expandLabel, + ), + i( + { class: 'material-symbols-rounded' }, + () => expandedState.val ? 'keyboard_arrow_up' : 'keyboard_arrow_down', + ), + ); +}; + +const stylesheet = new CSSStyleSheet(); +stylesheet.replace(` +.expander-toggle { + display: flex; + flex-flow: row nowrap; + justify-content: flex-end; + align-items: center; + cursor: pointer; + color: #1976d2; +} +`); + +export { ExpanderToggle }; diff --git a/testgen/ui/components/frontend/js/main.js b/testgen/ui/components/frontend/js/main.js index 3b91dd0..e11b1ba 100644 --- a/testgen/ui/components/frontend/js/main.js +++ b/testgen/ui/components/frontend/js/main.js @@ -10,6 +10,7 @@ import { Streamlit } from './streamlit.js'; import { Button } from './components/button.js' import { Select } from './components/select.js' import { Breadcrumbs } from './components/breadcrumbs.js' +import { ExpanderToggle } from './components/expander_toggle.js'; let currentWindowVan = van; let topWindowVan = window.top.van; @@ -19,6 +20,7 @@ const TestGenComponent = (/** @type {string} */ id, /** @type {object} */ props) select: Button, button: Select, breadcrumbs: Breadcrumbs, + expander_toggle: ExpanderToggle, sidebar: window.top.testgen.components.Sidebar, }; diff --git a/testgen/ui/components/widgets/__init__.py b/testgen/ui/components/widgets/__init__.py index 1294f8d..4273495 100644 --- a/testgen/ui/components/widgets/__init__.py +++ b/testgen/ui/components/widgets/__init__.py @@ -2,3 +2,4 @@ from testgen.ui.components.widgets.breadcrumbs import breadcrumbs from testgen.ui.components.widgets.sidebar import sidebar +from testgen.ui.components.widgets.expander_toggle import expander_toggle diff --git a/testgen/ui/components/widgets/expander_toggle.py b/testgen/ui/components/widgets/expander_toggle.py new file mode 100644 index 0000000..0b776c7 --- /dev/null +++ b/testgen/ui/components/widgets/expander_toggle.py @@ -0,0 +1,30 @@ +import logging + +from testgen.ui.components.utils.component import component + +LOG = logging.getLogger("testgen") + + +def expander_toggle( + default: bool = False, + expand_label: str | None = None, + collapse_label: str | None = None, + key: str = "testgen:expander_toggle", +) -> None: + """ + Testgen component to display a toggle for an expandable container. + + # Parameters + :param default: default state for the component, default=False (collapsed) + :param expand_label: label for collapsed state, default="Expand" + :param collapse_label: label for expanded state, default="Collapse" + :param key: unique key to give the component a persisting state + """ + LOG.debug(key) + + return component( + id_="expander_toggle", + key=key, + default=default, + props={"default": default, "expandLabel": expand_label, "collapseLabel": collapse_label}, + ) From c2bed61eb4b5e13f2bfb36d6308f239851263db6 Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Mon, 19 Aug 2024 23:53:19 -0400 Subject: [PATCH 13/78] refactor(components): add custom summary bar --- testgen/ui/assets/style.css | 2 + testgen/ui/components/frontend/css/shared.css | 2 + .../frontend/js/components/summary_bar.js | 86 +++++++++++++++++ testgen/ui/components/frontend/js/main.js | 2 + testgen/ui/components/widgets/__init__.py | 1 + testgen/ui/components/widgets/summary_bar.py | 36 +++++++ testgen/ui/views/profiling_anomalies.py | 80 +++------------- testgen/ui/views/test_results.py | 93 +++---------------- 8 files changed, 154 insertions(+), 148 deletions(-) create mode 100644 testgen/ui/components/frontend/js/components/summary_bar.js create mode 100644 testgen/ui/components/widgets/summary_bar.py diff --git a/testgen/ui/assets/style.css b/testgen/ui/assets/style.css index 7be5a9d..b56cccc 100644 --- a/testgen/ui/assets/style.css +++ b/testgen/ui/assets/style.css @@ -4,6 +4,7 @@ body { --primary-text-color: #000000de; --secondary-text-color: #0000008a; --disabled-text-color: #00000042; + --caption-text-color: rgba(49, 51, 63, 0.6); /* Match Streamlit's caption color */ --sidebar-background-color: white; --sidebar-item-hover-color: #f5f5f5; @@ -79,6 +80,7 @@ button[title="Show password text"] { --primary-text-color: rgba(255, 255, 255); --secondary-text-color: rgba(255, 255, 255, .7); --disabled-text-color: rgba(255, 255, 255, .5); + --caption-text-color: rgba(250, 250, 250, .6); /* Match Streamlit's caption color */ --sidebar-background-color: #14181f; --sidebar-item-hover-color: #10141b; diff --git a/testgen/ui/components/frontend/css/shared.css b/testgen/ui/components/frontend/css/shared.css index 6d0c1f9..ef2f7eb 100644 --- a/testgen/ui/components/frontend/css/shared.css +++ b/testgen/ui/components/frontend/css/shared.css @@ -13,6 +13,7 @@ body { --primary-text-color: #000000de; --secondary-text-color: #0000008a; --disabled-text-color: #00000042; + --caption-text-color: rgba(49, 51, 63, 0.6); /* Match Streamlit's caption color */ --sidebar-background-color: white; --sidebar-item-hover-color: #f5f5f5; @@ -27,6 +28,7 @@ body { --primary-text-color: rgba(255, 255, 255); --secondary-text-color: rgba(255, 255, 255, .7); --disabled-text-color: rgba(255, 255, 255, .5); + --caption-text-color: rgba(250, 250, 250, .6); /* Match Streamlit's caption color */ --sidebar-background-color: #14181f; --sidebar-item-hover-color: #10141b; diff --git a/testgen/ui/components/frontend/js/components/summary_bar.js b/testgen/ui/components/frontend/js/components/summary_bar.js new file mode 100644 index 0000000..6049a2b --- /dev/null +++ b/testgen/ui/components/frontend/js/components/summary_bar.js @@ -0,0 +1,86 @@ +/** + * @typedef SummaryItem + * @type {object} + * @property {string} value + * @property {string} color + * @property {string} label + * + * @typedef Properties + * @type {object} + * @property {Array.} items + * @property {number} height + * @property {number} width + */ +import van from '../van.min.js'; +import { Streamlit } from '../streamlit.js'; + +const { div, span } = van.tags; +const colorMap = { + red: '#EF5350', + orange: '#FF9800', + yellow: '#FDD835', + green: '#9CCC65', + purple: '#AB47BC', + blue: '#42A5F5', + brown: '#8D6E63', + grey: '#BDBDBD', +} + +const SummaryBar = (/** @type Properties */ props) => { + const height = props.height.val || 24; + const width = props.width.val; + const summaryItems = props.items.val; + const total = summaryItems.reduce((sum, item) => sum + item.value, 0); + + Streamlit.setFrameHeight(height + 24); + + if (!window.testgen.loadedStylesheets.summaryBar) { + document.adoptedStyleSheets.push(stylesheet); + window.testgen.loadedStylesheets.summaryBar = true; + } + + return div( + { class: 'tg-summary-bar-wrapper' }, + div( + { + class: 'tg-summary-bar', + style: `height: ${height}px; max-width: ${width ? width + 'px' : '100%'}` + }, + summaryItems.map(item => span({ + class: `tg-summary-bar--item`, + style: `width: ${item.value * 100 / total}%; background-color: ${colorMap[item.color] || item.color};`, + })), + ), + () => { + return total ? div( + { class: `tg-summary-bar--caption` }, + summaryItems.map(item => `${item.label}: ${item.value}`).join(', '), + ) : null; + }, + ); +}; + +const stylesheet = new CSSStyleSheet(); +stylesheet.replace(` +.tg-summary-bar { + height: 100%; + display: flex; + flex-flow: row nowrap; + align-items: flex-start; + justify-content: flex-start; + border-radius: 4px; + overflow: hidden; +} + +.tg-summary-bar--item { + height: 100%; +} + +.tg-summary-bar--caption { + margin-top: 4px; + color: var(--caption-text-color); + font-style: italic; +} +`); + +export { SummaryBar }; diff --git a/testgen/ui/components/frontend/js/main.js b/testgen/ui/components/frontend/js/main.js index e11b1ba..99dee47 100644 --- a/testgen/ui/components/frontend/js/main.js +++ b/testgen/ui/components/frontend/js/main.js @@ -10,6 +10,7 @@ import { Streamlit } from './streamlit.js'; import { Button } from './components/button.js' import { Select } from './components/select.js' import { Breadcrumbs } from './components/breadcrumbs.js' +import { SummaryBar } from './components/summary_bar.js'; import { ExpanderToggle } from './components/expander_toggle.js'; let currentWindowVan = van; @@ -20,6 +21,7 @@ const TestGenComponent = (/** @type {string} */ id, /** @type {object} */ props) select: Button, button: Select, breadcrumbs: Breadcrumbs, + summary_bar: SummaryBar, expander_toggle: ExpanderToggle, sidebar: window.top.testgen.components.Sidebar, }; diff --git a/testgen/ui/components/widgets/__init__.py b/testgen/ui/components/widgets/__init__.py index 4273495..f52e262 100644 --- a/testgen/ui/components/widgets/__init__.py +++ b/testgen/ui/components/widgets/__init__.py @@ -2,4 +2,5 @@ from testgen.ui.components.widgets.breadcrumbs import breadcrumbs from testgen.ui.components.widgets.sidebar import sidebar +from testgen.ui.components.widgets.summary_bar import summary_bar from testgen.ui.components.widgets.expander_toggle import expander_toggle diff --git a/testgen/ui/components/widgets/summary_bar.py b/testgen/ui/components/widgets/summary_bar.py new file mode 100644 index 0000000..ccc80f3 --- /dev/null +++ b/testgen/ui/components/widgets/summary_bar.py @@ -0,0 +1,36 @@ +import logging +import typing + +from testgen.ui.components.utils.component import component + +LOG = logging.getLogger("testgen") + + +def summary_bar( + items: list["SummaryItem"], + height: int | None = None, + width: int | None = None, + key: str = "testgen:summary_bar", +) -> None: + """ + Testgen component to display a summary status bar. + + # Parameters + :param items: list of dicts with value, label, and color + :param height: height of bar in pixels, default=24 + :param width: width of bar in pixels, default is 100% of parent + :param key: unique key to give the component a persisting state + """ + + component( + id_="summary_bar", + key=key, + default={}, + props={"items": items, "height": height, "width": width}, + ) + + +class SummaryItem(typing.TypedDict): + value: int + label: str + color: str diff --git a/testgen/ui/views/profiling_anomalies.py b/testgen/ui/views/profiling_anomalies.py index 05c73b9..a89981a 100644 --- a/testgen/ui/views/profiling_anomalies.py +++ b/testgen/ui/views/profiling_anomalies.py @@ -7,6 +7,7 @@ import testgen.ui.services.form_service as fm import testgen.ui.services.query_service as dq import testgen.ui.services.toolbar_service as tb +from testgen.ui.components import widgets as testgen from testgen.ui.navigation.page import Page from testgen.ui.session import session from testgen.ui.views.profiling_modal import view_profiling_button @@ -64,9 +65,6 @@ def render(self) -> None: do_multi_select = st.toggle("Multi-Select", help=str_help) if str_table_groups_id: - # Get summary counts - df_sum = get_profiling_anomaly_summary(str_profile_run_id) - # Get hygiene issue list df_pa = get_profiling_anomalies(str_profile_run_id, str_likelihood) @@ -77,8 +75,11 @@ def render(self) -> None: df_pa["action"] = df_pa["id"].map(action_map).fillna(df_pa["action"]) if not df_pa.empty: + # Display summary bar + anomalies_summary = get_profiling_anomaly_summary(str_profile_run_id) + testgen.summary_bar(items=anomalies_summary, key="test_results", height=40, width=800) # write_frequency_graph(df_pa) - write_summary_graph(df_sum) + lst_show_columns = [ "table_name", "column_name", @@ -284,8 +285,14 @@ def get_profiling_anomaly_summary(str_profile_run_id): WHERE s.profile_run_id = '{str_profile_run_id}' GROUP BY schema_name; """ - # Retrieve and return data as df - return db.retrieve_data(str_sql) + df = db.retrieve_data(str_sql) + + return [ + { "label": "Definite", "value": int(df.at[0, "definite_ct"]), "color": "red" }, + { "label": "Likely", "value": int(df.at[0, "likely_ct"]), "color": "orange" }, + { "label": "Possible", "value": int(df.at[0, "possible_ct"]), "color": "yellow" }, + { "label": "Dismissed", "value": int(df.at[0, "dismissed_ct"]), "color": "grey" }, + ] @st.cache_data(show_spinner=False) @@ -373,67 +380,6 @@ def replace_parms(str_query): return "ERR", f"Source data lookup query caused an error:\n\n{e.args[0]}", None -def write_summary_graph(df_sum): - df_graph = df_sum[["definite_ct", "likely_ct", "possible_ct", "dismissed_ct"]] - - str_graph_caption = f"Definite: {df_sum.at[0, 'definite_ct']}, Likely: {df_sum.at[0, 'likely_ct']}, Possible: {df_sum.at[0, 'possible_ct']}, Dismissed: {df_sum.at[0, 'dismissed_ct']}" - - fig = px.bar( - df_graph, - orientation="h", - title=None, - color_discrete_sequence=["red", "orange", "yellow", "green"], - barmode="stack", - ) - - fig.update_traces(hovertemplate="%{x}") - - fig.update_layout( - showlegend=False, - legend_orientation="h", - legend_y=-0.2, # This value might need to be adjusted based on other chart elements - legend_x=0.5, - legend_xanchor="right", - legend_title_text="", - yaxis={ - "showticklabels": False, # hides y-axis labels - "showgrid": False, # removes grid lines - "zeroline": False, # removes the zero line - "showline": False, # hides the axis line - "title_text": "", - }, - xaxis={ - "showticklabels": False, # hides y-axis labels - "showgrid": False, # removes grid lines - "zeroline": False, # removes the zero line - "showline": False, # hides the axis line - "title_text": "", - }, - hovermode="closest", - height=100, - width=800, - margin={"l": 0, "r": 10, "b": 10, "t": 10}, # adjust margins around the plot - paper_bgcolor="rgba(0,0,0,0)", - plot_bgcolor="rgba(0,0,0,0)", - ) - - fig.add_annotation( - text=str_graph_caption, - xref="paper", - yref="paper", - # 'paper' coordinates are relative to the layout, with (0,0) at the bottom left and (1,1) at the top right - x=0, - y=0, - xanchor="left", - yanchor="top", - showarrow=False, - font={"size": 15, "color": "black"}, - ) - - config = {"displayModeBar": False} - st.plotly_chart(fig, config=config) - - def write_frequency_graph(df_tests): # Count the frequency of each test_name df_count = df_tests["anomaly_name"].value_counts().reset_index() diff --git a/testgen/ui/views/test_results.py b/testgen/ui/views/test_results.py index 8d13487..cffaa24 100644 --- a/testgen/ui/views/test_results.py +++ b/testgen/ui/views/test_results.py @@ -11,6 +11,7 @@ import testgen.ui.services.query_service as dq import testgen.ui.services.toolbar_service as tb from testgen.common import ConcatColumnList, date_service +from testgen.ui.components import widgets as testgen from testgen.ui.navigation.page import Page from testgen.ui.services.string_service import empty_if_null from testgen.ui.session import session @@ -266,20 +267,19 @@ def get_test_disposition(str_run_id): def get_test_result_summary(str_run_id): str_schema = st.session_state["dbschema"] str_sql = f""" - SELECT test_ct as result_ct, - COALESCE(error_ct, 0) as error_ct, - failed_ct + warning_ct as exception_ct, warning_ct, - ROUND({str_schema}.fn_pct(warning_ct, test_ct), 1) as warning_pct, - failed_ct, - ROUND({str_schema}.fn_pct(failed_ct, test_ct), 1) as failed_pct, - passed_ct, - ROUND({str_schema}.fn_pct(passed_ct, test_ct), 1) as passed_pct + SELECT passed_ct, warning_ct, failed_ct, + COALESCE(error_ct, 0) as error_ct FROM {str_schema}.test_runs WHERE id = '{str_run_id}'::UUID; """ df = db.retrieve_data(str_sql) - return df + return [ + { "label": "Passed", "value": int(df.at[0, "passed_ct"]), "color": "green" }, + { "label": "Warnings", "value": int(df.at[0, "warning_ct"]), "color": "yellow" }, + { "label": "Failed", "value": int(df.at[0, "failed_ct"]), "color": "red" }, + { "label": "Errors", "value": int(df.at[0, "error_ct"]), "color": "grey" }, + ] @st.cache_data(show_spinner=ALWAYS_SPIN) @@ -572,11 +572,9 @@ def show_test_def_detail(str_test_def_id): def show_result_detail(str_run_id, str_sel_test_status, do_multi_select, export_container): - # Retrieve summary counts - df_sum = get_test_result_summary(str_run_id) - if not df_sum.empty: - if (df_sum.at[0, "result_ct"] or 0) > 0: - write_summary_graph(df_sum) + # Display summary bar + tests_summary = get_test_result_summary(str_run_id) + testgen.summary_bar(items=tests_summary, key="test_results", height=40, width=800) # Retrieve test results (always cached, action as null) df = get_test_results(str_run_id, str_sel_test_status) @@ -695,73 +693,6 @@ def show_result_detail(str_run_id, str_sel_test_status, do_multi_select, export_ return selected_rows -def write_summary_graph(df_sum): - df_graph = df_sum[["passed_ct", "error_ct", "warning_ct", "failed_ct"]] - - str_error_caption = f"Errors: {df_sum.at[0, 'error_ct']}, " if df_sum.at[0, "error_ct"] > 0 else "" - str_graph_caption = f"Passed: {df_sum.at[0, 'passed_ct']} ({df_sum.at[0, 'passed_pct']}%), {str_error_caption}Warnings: {df_sum.at[0, 'warning_ct']} ({df_sum.at[0, 'warning_pct']}%), Failed: {df_sum.at[0, 'failed_ct']} ({df_sum.at[0, 'failed_pct']}%)" - - fig = px.bar( - df_graph, - orientation="h", - title=None, - # labels={'value': 'Tests', 'variable': 'Result Status'}, - color_discrete_sequence=["green", "gray", "yellow", "red"], - barmode="stack", - ) - - fig.update_traces( - # hoverinfo='y+name', # Display the y value and the trace name - # hovertemplate='Count: %{y}
Type: %{name}', # Custom template for hover text - hovertemplate="%{x}" - # hovertemplate=None - ) - - fig.update_layout( - showlegend=False, - legend_orientation="h", - legend_y=-0.2, # This value might need to be adjusted based on other chart elements - legend_x=0.5, - legend_xanchor="right", - legend_title_text="", - yaxis={ - "showticklabels": False, # hides y-axis labels - "showgrid": False, # removes grid lines - "zeroline": False, # removes the zero line - "showline": False, # hides the axis line - "title_text": "", - }, - xaxis={ - "showticklabels": False, # hides y-axis labels - "showgrid": False, # removes grid lines - "zeroline": False, # removes the zero line - "showline": False, # hides the axis line - "title_text": "", - }, - hovermode="closest", - height=100, - width=800, - margin={"l": 0, "r": 10, "b": 10, "t": 10}, # adjust margins around the plot - paper_bgcolor="rgba(0,0,0,0)", - plot_bgcolor="rgba(0,0,0,0)", - ) - - fig.add_annotation( - text=str_graph_caption, - xref="paper", - yref="paper", # 'paper' coordinates are relative to the layout, with (0,0) at the bottom left and (1,1) at the top right - x=0, - y=0, - xanchor="left", - yanchor="top", - showarrow=False, - font={"size": 15, "color": "black"}, - ) - - config = {"displayModeBar": False} - st.plotly_chart(fig, config=config) - - def write_history_graph(dfh): y_min = min(dfh["result_measure"].min(), dfh["threshold_value"].min()) y_max = max(dfh["result_measure"].max(), dfh["threshold_value"].max()) From ec8bb486862059c9e37fb5377ecdf8aab12167a7 Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Tue, 20 Aug 2024 00:54:34 -0400 Subject: [PATCH 14/78] refactor(theme): apply custom colors to buttons and inputs --- testgen/ui/assets/style.css | 27 +++++++++++++++++++++++ testgen/ui/components/widgets/__init__.py | 2 +- testgen/ui/views/table_groups.py | 2 +- testgen/ui/views/test_definitions.py | 2 +- testgen/ui/views/test_suites.py | 2 +- 5 files changed, 31 insertions(+), 4 deletions(-) diff --git a/testgen/ui/assets/style.css b/testgen/ui/assets/style.css index b56cccc..a96e650 100644 --- a/testgen/ui/assets/style.css +++ b/testgen/ui/assets/style.css @@ -64,6 +64,33 @@ div[data-testid="stModal"] div[role="dialog"] { } /* */ +/* Theming for buttons and form inputs */ +button[data-testid="baseButton-secondary"]:hover, +button[data-testid="baseButton-secondary"]:focus:not(:active), +button[data-testid="baseButton-secondaryFormSubmit"]:hover, +button[data-testid="baseButton-secondaryFormSubmit"]:focus:not(:active) { + border-color: var(--primary-color); + color: var(--primary-color); +} + +button[data-testid="baseButton-secondary"]:active, +button[data-testid="baseButton-secondaryFormSubmit"]:active, +label[data-baseweb="checkbox"]:has(input[aria-checked="true"]) > span { + border-color: var(--primary-color); + background-color: var(--primary-color); +} + +div[data-testid="stTextInput-RootElement"]:focus-within, +div[data-baseweb="select"] > div:has(input[aria-expanded="true"]) { + border-color: var(--primary-color); +} + +label[data-baseweb="radio"]:has(input[tabindex="0"]) > div:first-child, +label[data-baseweb="checkbox"]:has(input[aria-checked="true"]) > div:first-child { + background-color: var(--primary-color); +} +/* */ + button[title="Show password text"] { display: none; } diff --git a/testgen/ui/components/widgets/__init__.py b/testgen/ui/components/widgets/__init__.py index f52e262..a627555 100644 --- a/testgen/ui/components/widgets/__init__.py +++ b/testgen/ui/components/widgets/__init__.py @@ -1,6 +1,6 @@ # ruff: noqa: F401 from testgen.ui.components.widgets.breadcrumbs import breadcrumbs +from testgen.ui.components.widgets.expander_toggle import expander_toggle from testgen.ui.components.widgets.sidebar import sidebar from testgen.ui.components.widgets.summary_bar import summary_bar -from testgen.ui.components.widgets.expander_toggle import expander_toggle diff --git a/testgen/ui/views/table_groups.py b/testgen/ui/views/table_groups.py index 21b1791..43bbc0c 100644 --- a/testgen/ui/views/table_groups.py +++ b/testgen/ui/views/table_groups.py @@ -244,7 +244,7 @@ def delete_table_group_dialog(selected): disable_delete_button = authentication_service.current_user_has_read_role() or ( not can_be_deleted and not accept_cascade_delete ) - delete = st.form_submit_button("Delete", disabled=disable_delete_button) + delete = st.form_submit_button("Delete", disabled=disable_delete_button, type="primary") if delete: if table_group_service.are_table_groups_in_use([table_group_name]): diff --git a/testgen/ui/views/test_definitions.py b/testgen/ui/views/test_definitions.py index 76f1f0c..e0f58cd 100644 --- a/testgen/ui/views/test_definitions.py +++ b/testgen/ui/views/test_definitions.py @@ -183,7 +183,7 @@ def delete_test_dialog(selected_test_definition): with st.form("Delete Test Definition", clear_on_submit=True): disable_delete_button = authentication_service.current_user_has_read_role() or not can_be_deleted - delete = st.form_submit_button("Delete", disabled=disable_delete_button) + delete = st.form_submit_button("Delete", disabled=disable_delete_button, type="primary") if delete: test_definition_service.delete([test_definition_id]) diff --git a/testgen/ui/views/test_suites.py b/testgen/ui/views/test_suites.py index 9903f88..300f3c5 100644 --- a/testgen/ui/views/test_suites.py +++ b/testgen/ui/views/test_suites.py @@ -266,7 +266,7 @@ def delete_test_suite_dialog(selected): disable_delete_button = authentication_service.current_user_has_read_role() or ( not can_be_deleted and not accept_cascade_delete ) - delete = st.form_submit_button("Delete", disabled=disable_delete_button) + delete = st.form_submit_button("Delete", disabled=disable_delete_button, type="primary") if delete: if test_suite_service.are_test_suites_in_use([test_suite_name]): From f4d03a3c06d0ff73a4aaf6dc2178b7176a0b49c7 Mon Sep 17 00:00:00 2001 From: Astor Date: Thu, 22 Aug 2024 15:36:33 -0300 Subject: [PATCH 15/78] Changed dependencies versions to run in python 3.12 --- pyproject.toml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index ddf3085..fe9e439 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,15 +36,15 @@ dependencies = [ "click==8.1.3", "sqlalchemy==1.4.46", "snowflake-sqlalchemy==1.4.7", - "pyodbc==4.0.39", - "psycopg2-binary==2.9.6", + "pyodbc==5.0.0", + "psycopg2-binary==2.9.9", "pycryptodome==3.17", "prettytable==3.7.0", "requests_extensions==1.1.3", "bz2file==0.98", "trogon==0.4.0", - "numpy==1.25.2", - "pandas==2.1.0", + "numpy==1.26.4", + "pandas==2.1.4", "streamlit==1.37.1", "streamlit-extras==0.3.0", "streamlit-aggrid==0.3.4.post3", From 2afb55720dedb8f9ec7b88f515db85ca1b3ac7dd Mon Sep 17 00:00:00 2001 From: Astor Date: Thu, 22 Aug 2024 16:20:57 -0300 Subject: [PATCH 16/78] Updated README --- README.md | 102 ++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 99 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index badf1a5..57dca5b 100644 --- a/README.md +++ b/README.md @@ -20,9 +20,9 @@ A single place to manage Data Quality across data sets, locations, and te DataKitchen Open Source Data Quality TestGen Features - Single Place

-## Installation +## Installation with dk-installer (recommended) -The [dk-installer](https://github.com/DataKitchen/data-observability-installer/?tab=readme-ov-file#install-the-testgen-application) program installs DataOps Data Quality TestGen. +The [dk-installer](https://github.com/DataKitchen/data-observability-installer/?tab=readme-ov-file#install-the-testgen-application) program installs DataOps Data Quality TestGen as a [Docker Compose](https://docs.docker.com/compose/) application. This is the recommended mode of installation as Docker encapsulates and isolates the application from other software on your machine and does not require you to manage Python dependencies. ### Install the prerequisite software @@ -67,13 +67,109 @@ python3 dk-installer.py tg run-demo In the TestGen UI, you will see that new data profiling and test results have been generated. +## Installation with pip + +As an alternative to the Docker Compose [installation with dk-installer (recommended)](#installation-with-dk-installer-recommended), DataOps Data Quality TestGen can also be installed as a Python package via [pip](https://pip.pypa.io/en/stable/). This mode of installation uses the [dataops-testgen](https://pypi.org/project/dataops-testgen/) package published to PyPI, and it requires a PostgreSQL instance to be provisioned for the application database. + +### Install the prerequisite software + +| Software | Tested Versions | Command to check version | +|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------|------------------------------| +| [Python](https://www.python.org/downloads/)
- Most Linux and macOS systems have Python pre-installed.
- On Windows machines, you will need to download and install it. | 3.10, 3.11, 3.12 | `python3 --version` | +| [PostgreSQL](https://www.postgresql.org/download/) | 14.1, 15.8, 16.4 | `psql --version`| + +### Install the TestGen package + +We recommend using a Python virtual environment to avoid any dependency conflicts with other applications installed on your machine. The [venv](https://docs.python.org/3/library/venv.html#creating-virtual-environments) module, which is part of the Python standard library, or other third-party tools, like [virtualenv](https://virtualenv.pypa.io/en/latest/) or [conda](https://docs.conda.io/en/latest/), can be used. + +Create and activate a virtual environment with a TestGen-compatible version of Python (`>=3.10`). The steps may vary based on your operating system and Python installation - the [Python packaging user guide](https://packaging.python.org/en/latest/tutorials/installing-packages/) is a useful reference. + +_On Linux/Mac_ +```shell +python3.10 -m venv venv +source venv/bin/activate +``` + +_On Windows_ +```powershell +py -3.10 -m venv venv +venv\Scripts\activate +``` + +Within the virtual environment, install the TestGen package with pip. +```shell +pip install dataops-testgen +``` + +Verify that the [_testgen_ command line](https://docs.datakitchen.io/articles/#!dataops-testgen-help/testgen-commands-and-details) works. +```shell +testgen --help +``` + +### Set up the application database in PostgresSQL + +Set appropriate values for the following environment variables (use `export variable=value` for Linux/Mac and `set variable=value` for Windows). Refer to the [TestGen Configuration](configuration.md) document for more details, defaults, and other supported configuration. + +```shell +# Connection parameters for the PostgreSQL server +TG_METADATA_DB_HOST +TG_METADATA_DB_PORT + +# PostgreSQL admin role with privileges to create roles, users, database and schema +# This role will be used by the next step to initialize the application database +DATABASE_ADMIN_USER +DATABASE_ADMIN_PASSWORD + +# Credentials to be used for encrypting secrets in application database +TG_DECRYPT_SALT +TG_DECRYPT_PASSWORD + +# Default admin user to be created for TestGen +TESTGEN_USERNAME +TESTGEN_PASSWORD + +# Accessible path for storing application logs +TESTGEN_LOG_FILE_PATH +``` + +Make sure the PostgreSQL database server is up and running. Initialize the application database for TestGen. +```shell +testgen setup-system-db --yes +``` + +### Run the TestGen UI + +Run the following commands to start the TestGen UI. It will open the browser at [http://localhost:8501](http://localhost:8501). + +```shell +testgen ui patch-streamlit -f +testgen ui run +``` + +Verify that you can login to the UI with the `TESTGEN_USERNAME` and `TESTGEN_PASSWORD` values that you configured in the environment variables. + +### Optional: Run the TestGen demo setup + +The [Data Observability quickstart](https://docs.datakitchen.io/articles/open-source-data-observability/data-observability-overview) walks you through DataOps Data Quality TestGen capabilities to demonstrate how it covers critical use cases for data and analytic teams. + +```shell +testgen quick-start --delete-target-db +testgen run-profile --table-group-id 0ea85e17-acbe-47fe-8394-9970725ad37d +testgen run-test-generation --table-group-id 0ea85e17-acbe-47fe-8394-9970725ad37d +testgen run-tests --project-key DEFAULT --test-suite-key default-suite-1 +testgen quick-start --simulate-fast-forward +``` + +In the TestGen UI, you will see that new data profiling and test results have been generated. + + ## Product Documentation [DataOps Data Quality TestGen](https://docs.datakitchen.io/articles/dataops-testgen-help/dataops-testgen-help) ## Useful Commands -The [dk-installer](https://github.com/DataKitchen/data-observability-installer/?tab=readme-ov-file#install-the-testgen-application) and [docker compose CLI](https://docs.docker.com/compose/reference/) can be used to operate the installed TestGen application. All commands must be run in the same folder that contains the `dk-installer.py` and `docker-compose.yml` files used by the installation. +The [dk-installer](https://github.com/DataKitchen/data-observability-installer/?tab=readme-ov-file#install-the-testgen-application) and [docker compose CLI](https://docs.docker.com/compose/reference/) can be used to operate the TestGen application installed using dk-installer. All commands must be run in the same folder that contains the `dk-installer.py` and `docker-compose.yml` files used by the installation. ### Remove demo data From b22b1b86138ef79cc4c292b3e93f4f8d8b7e4c4f Mon Sep 17 00:00:00 2001 From: Luis Trinidad Date: Thu, 22 Aug 2024 17:21:29 -0400 Subject: [PATCH 17/78] misc(cli): merge run and patch-streamlit commands Users can just start testgen UI and streamlit statics will be patched on every start. --- testgen/__main__.py | 13 +++---------- testgen/ui/scripts/patch_streamlit.py | 2 +- 2 files changed, 4 insertions(+), 11 deletions(-) diff --git a/testgen/__main__.py b/testgen/__main__.py index 1e2ac2d..234428a 100644 --- a/testgen/__main__.py +++ b/testgen/__main__.py @@ -671,6 +671,7 @@ def ui(): ... @ui.command("run", help="Run the browser application with default settings") @click.option("-d", "--debug", is_flag=True, default=False) def run(debug: bool): + from testgen.ui.scripts import patch_streamlit configure_logging( level=logging.INFO, log_format="%(message)s", @@ -683,6 +684,8 @@ def run(debug: bool): use_ssl = os.path.isfile(settings.SSL_CERT_FILE) and os.path.isfile(settings.SSL_KEY_FILE) + patch_streamlit.patch(force=True) + try: app_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "ui/app.py") status_code = subprocess.check_call( @@ -718,15 +721,5 @@ def list_ui_plugins(): click.echo(click.style(" + ", fg="bright_green") + f"{plugin.package: <30}" + f"\tversion: {plugin.version}") -@ui.command("patch-streamlit", help="Modifies Streamlit's internals with custom static files") -@click.option("-f", "--force", is_flag=True, default=False) -def patch_streamlit(force: bool) -> None: - from testgen.ui.scripts import patch_streamlit - - patched_files = patch_streamlit.patch(force=force) - click.echo(click.style("Patched ", bold=True) + click.style(patch_streamlit.STREAMLIT_INDEX, fg="bright_magenta")) - click.echo(click.style(" + ", fg="bright_green") + click.style(f"patched {len(patched_files)} files", italic=True)) - - if __name__ == "__main__": cli() diff --git a/testgen/ui/scripts/patch_streamlit.py b/testgen/ui/scripts/patch_streamlit.py index af83db3..bba728a 100644 --- a/testgen/ui/scripts/patch_streamlit.py +++ b/testgen/ui/scripts/patch_streamlit.py @@ -35,7 +35,7 @@ def patch(force: bool = False) -> list[str]: return [op.split(":")[0] for op in operations] -def _patch_streamlit_index(*operations: list[str], force: bool = False) -> None: +def _patch_streamlit_index(*operations: str, force: bool = False) -> None: """ Patches the index.html inside streamlit package to inject Testgen's own styles and scripts before rendering time. From 81201622232bc277f197b0c8c5123d8ccf125b19 Mon Sep 17 00:00:00 2001 From: Luis Trinidad Date: Fri, 23 Aug 2024 10:44:14 -0400 Subject: [PATCH 18/78] misc: update readme and docker files to remove patch command --- Dockerfile | 2 -- docs/local_development.md | 7 +------ 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/Dockerfile b/Dockerfile index dc7faea..9c6f0ef 100644 --- a/Dockerfile +++ b/Dockerfile @@ -28,8 +28,6 @@ RUN python3 -m pip install --no-deps /tmp/dk --prefix=/dk ENV PYTHONPATH ${PYTHONPATH}:/dk/lib/python3.10/site-packages ENV PATH="$PATH:/dk/bin:/opt/mssql-tools/bin/" -RUN TG_METADATA_DB_USER=- TG_METADATA_DB_PASSWORD=- TG_METADATA_DB_HOST=- TG_METADATA_DB_PORT=- testgen ui patch-streamlit - ARG TESTGEN_VERSION ENV TESTGEN_VERSION=v$TESTGEN_VERSION diff --git a/docs/local_development.md b/docs/local_development.md index 0687aa0..62ce185 100644 --- a/docs/local_development.md +++ b/docs/local_development.md @@ -87,12 +87,7 @@ testgen run-tests --project-key DEFAULT --test-suite-key default-suite-1 testgen quick-start --simulate-fast-forward ``` -### Patch and run Streamlit -Patch the Streamlit package with our custom files. -```shell -testgen ui patch-streamlit -f -``` - +### Run Streamlit Run the local Streamlit-based TestGen application. It will open the browser at [http://localhost:8501](http://localhost:8501). ```shell testgen ui run From 6a96e5c1abe0e62620097c3a98bc4aa2304fa1a0 Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Fri, 23 Aug 2024 21:25:53 -0400 Subject: [PATCH 19/78] docs: update readme and local dev docs --- README.md | 3 +-- docs/local_development.md | 25 ++++++++++++++++++------- 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 57dca5b..467b954 100644 --- a/README.md +++ b/README.md @@ -139,10 +139,9 @@ testgen setup-system-db --yes ### Run the TestGen UI -Run the following commands to start the TestGen UI. It will open the browser at [http://localhost:8501](http://localhost:8501). +Run the following command to start the TestGen UI. It will open the browser at [http://localhost:8501](http://localhost:8501). ```shell -testgen ui patch-streamlit -f testgen ui run ``` diff --git a/docs/local_development.md b/docs/local_development.md index 62ce185..db68ed3 100644 --- a/docs/local_development.md +++ b/docs/local_development.md @@ -21,25 +21,34 @@ git clone https://github.com/YOUR-USERNAME/dataops-testgen ### Set up virtual environment -From the root of your local repository, create a Python virtual environment. +We recommend using a Python virtual environment to avoid any dependency conflicts with other applications installed on your machine. The [venv](https://docs.python.org/3/library/venv.html#creating-virtual-environments) module, which is part of the Python standard library, or other third-party tools, like [virtualenv](https://virtualenv.pypa.io/en/latest/) or [conda](https://docs.conda.io/en/latest/), can be used. + +From the root of your local repository, create and activate a virtual environment with a TestGen-compatible version of Python (`>=3.10`). The steps may vary based on your operating system and Python installation - the [Python packaging user guide](https://packaging.python.org/en/latest/tutorials/installing-packages/) is a useful reference. + +_On Linux/Mac_ ```shell python3.10 -m venv venv +source venv/bin/activate ``` -Activate the environment. -```shell -source venv/bin/activate +_On Windows_ +```powershell +py -3.10 -m venv venv +venv\Scripts\activate ``` ### Install dependencies Install the Python dependencies in editable mode. + +_On Linux_ ```shell -# On Linux pip install -e .[dev] +``` -# On Mac -pip install -e .'[dev]' +_On Mac/Windows_ +```shell +pip install -e ".[dev]" ``` On Mac, you can optionally install [watchdog](https://github.com/gorakhargosh/watchdog) for better performance of the [file watcher](https://docs.streamlit.io/develop/api-reference/configuration/config.toml) used for local development. @@ -65,6 +74,8 @@ Source the file to apply the environment variables. source local.env ``` +For the Windows equivalent, refer to [this guide](https://bennett4.medium.com/windows-alternative-to-source-env-for-setting-environment-variables-606be2a6d3e1). + ### Set up Postgres instance Run a PostgreSQL instance as a Docker container. From 855d3ab0f9f9025e1eff7c25905d0769a1509fa3 Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Sun, 25 Aug 2024 16:09:34 +0530 Subject: [PATCH 20/78] build(pypi): update pyproject.toml info and release dependencies --- pyproject.toml | 40 ++++++++++++++++++++++++---------------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index fe9e439..5be21d0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,28 +7,26 @@ requires = [ build-backend = "setuptools.build_meta" [project] -name = "data-ops-testgen" -version = "2.2.0" -description = "DataKitchen Inc. Data Quality Engine" -urls = { "homepage" = "https://datakitchen.io" } +name = "dataops-testgen" +version = "2.8.1" +description = "DataKitchen's Data Quality DataOps TestGen" authors = [ - { "name" = "Charles Bloche", "email" = "chip@datakitchen.io" }, - { "name" = "Tyler Stubenvoll", "email" = "tstubenvoll@datakitchen.io" }, - { "name" = "Alejandro Fernandez", "email" = "alex@datakitchen.io" }, - { "name" = "Anuja Waikar", "email" = "awaikar@datakitchen.io" }, - { "name" = "Shruthy Vakkil", "email" = "svakkil@datakitchen.io" }, - { "name" = "Arnob Bordoloi", "email" = "abordoloi@datakitchen.io" }, - { "name" = "Saurabh Vashist", "email" = "svashist@datakitchen.io" }, - { "name" = "Saurabh Vaidya", "email" = "saurabh@datakitchen.io" } + { "name" = "DataKitchen, Inc.", "email" = "info@datakitchen.io" }, +] +maintainers = [ + { "name" = "DataKitchen, Inc.", "email" = "info@datakitchen.io" }, ] -license = { "text" = "CLOSED" } readme = "README.md" classifiers = [ - "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Development Status :: 5 - Production/Stable", "Operating System :: OS Independent", - "Programming Language :: Python", "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Topic :: System :: Monitoring", ] +keywords = [ "dataops", "data", "quality", "testing", "database", "profiling" ] requires-python = ">=3.10" dependencies = [ @@ -79,12 +77,22 @@ dev = [ ] release = [ - "bumpver==2023.1129" + "build==1.2.1", + "bumpver==2023.1129", + "twine==5.1.1", ] [project.entry-points.console_scripts] testgen = "testgen.__main__:cli" +[project.urls] +"Source Code" = "https://github.com/DataKitchen/dataops-testgen" +"Bug Tracker" = "https://github.com/DataKitchen/dataops-testgen/issues" +"Documentation" = "https://docs.datakitchen.io/articles/#!dataops-testgen-help/dataops-testgen-help" +"Release Notes" = "https://docs.datakitchen.io/articles/#!dataops-testgen-help/testgen-release-notes" +"Slack" = "https://data-observability-slack.datakitchen.io/join" +"Homepage" = "https://example.com" + [tool.setuptools] include-package-data = true From b48e29a593a812ffd119f6b078e9b88fe4df0aa2 Mon Sep 17 00:00:00 2001 From: Luis Trinidad Date: Fri, 23 Aug 2024 18:34:59 -0400 Subject: [PATCH 21/78] feat(profiling): display a second summary bar for new pii issues --- testgen/ui/views/profiling_anomalies.py | 49 +++++++++++++++---------- 1 file changed, 30 insertions(+), 19 deletions(-) diff --git a/testgen/ui/views/profiling_anomalies.py b/testgen/ui/views/profiling_anomalies.py index a89981a..9060448 100644 --- a/testgen/ui/views/profiling_anomalies.py +++ b/testgen/ui/views/profiling_anomalies.py @@ -76,8 +76,12 @@ def render(self) -> None: if not df_pa.empty: # Display summary bar - anomalies_summary = get_profiling_anomaly_summary(str_profile_run_id) - testgen.summary_bar(items=anomalies_summary, key="test_results", height=40, width=800) + summaries = get_profiling_anomaly_summary(str_profile_run_id) + anomalies_pii_summary = [summary for summary in summaries if summary.get("type") == "PII"] + others_summary = [summary for summary in summaries if summary.get("type") != "PII"] + testgen.summary_bar(items=others_summary, key="test_results_summary:others", height=40, width=800) + if anomalies_pii_summary: + testgen.summary_bar(items=anomalies_pii_summary, key="test_results_summary:pii", height=40, width=800) # write_frequency_graph(df_pa) lst_show_columns = [ @@ -267,23 +271,27 @@ def get_profiling_anomaly_summary(str_profile_run_id): str_schema = st.session_state["dbschema"] # Define the query str_sql = f""" - SELECT schema_name, - COUNT(DISTINCT s.table_name) as table_ct, - COUNT(DISTINCT s.column_name) as column_ct, - COUNT(*) as issue_ct, - SUM(CASE WHEN COALESCE(s.disposition, 'Confirmed') = 'Confirmed' - AND t.issue_likelihood = 'Definite' THEN 1 ELSE 0 END) as definite_ct, - SUM(CASE WHEN COALESCE(s.disposition, 'Confirmed') = 'Confirmed' - AND t.issue_likelihood = 'Likely' THEN 1 ELSE 0 END) as likely_ct, - SUM(CASE WHEN COALESCE(s.disposition, 'Confirmed') = 'Confirmed' - AND t.issue_likelihood = 'Possible' THEN 1 ELSE 0 END) as possible_ct, - SUM(CASE WHEN COALESCE(s.disposition, 'Confirmed') - IN ('Dismissed', 'Inactive') THEN 1 ELSE 0 END) as dismissed_ct - FROM {str_schema}.profile_anomaly_results s - LEFT JOIN {str_schema}.profile_anomaly_types t - ON (s.anomaly_id = t.id) - WHERE s.profile_run_id = '{str_profile_run_id}' - GROUP BY schema_name; + SELECT + schema_name, + COUNT(DISTINCT s.table_name) as table_ct, + COUNT(DISTINCT s.column_name) as column_ct, + COUNT(*) as issue_ct, + SUM(CASE WHEN COALESCE(s.disposition, 'Confirmed') = 'Confirmed' + AND t.issue_likelihood = 'Definite' THEN 1 ELSE 0 END) as definite_ct, + SUM(CASE WHEN COALESCE(s.disposition, 'Confirmed') = 'Confirmed' + AND t.issue_likelihood = 'Likely' THEN 1 ELSE 0 END) as likely_ct, + SUM(CASE WHEN COALESCE(s.disposition, 'Confirmed') = 'Confirmed' + AND t.issue_likelihood = 'Possible' THEN 1 ELSE 0 END) as possible_ct, + SUM(CASE WHEN COALESCE(s.disposition, 'Confirmed') + IN ('Dismissed', 'Inactive') + AND t.issue_likelihood <> 'Potential PII' THEN 1 ELSE 0 END) as dismissed_ct, + SUM(CASE WHEN COALESCE(s.disposition, 'Confirmed') = 'Confirmed' AND t.issue_likelihood = 'Potential PII' AND s.detail LIKE 'Risk: HIGH%%' THEN 1 ELSE 0 END) as pii_high_ct, + SUM(CASE WHEN COALESCE(s.disposition, 'Confirmed') = 'Confirmed' AND t.issue_likelihood = 'Potential PII' AND s.detail LIKE 'Risk: MODERATE%%' THEN 1 ELSE 0 END) as pii_moderate_ct, + SUM(CASE WHEN COALESCE(s.disposition, 'Confirmed') IN ('Dismissed', 'Inactive') AND t.issue_likelihood = 'Potential PII' THEN 1 ELSE 0 END) as pii_dismissed_ct + FROM {str_schema}.profile_anomaly_results s + LEFT JOIN {str_schema}.profile_anomaly_types t ON (s.anomaly_id = t.id) + WHERE s.profile_run_id = '{str_profile_run_id}' + GROUP BY schema_name; """ df = db.retrieve_data(str_sql) @@ -292,6 +300,9 @@ def get_profiling_anomaly_summary(str_profile_run_id): { "label": "Likely", "value": int(df.at[0, "likely_ct"]), "color": "orange" }, { "label": "Possible", "value": int(df.at[0, "possible_ct"]), "color": "yellow" }, { "label": "Dismissed", "value": int(df.at[0, "dismissed_ct"]), "color": "grey" }, + { "label": "Potential PII Definite", "value": int(df.at[0, "pii_high_ct"]), "color": "red", "type": "PII" }, + { "label": "Potential PII Likely", "value": int(df.at[0, "pii_moderate_ct"]), "color": "orange", "type": "PII" }, + { "label": "Potential PII Dismissed", "value": int(df.at[0, "pii_dismissed_ct"]), "color": "grey", "type": "PII" }, ] From 2ed5136b9aa70441168698a49bf18c19e926ef6c Mon Sep 17 00:00:00 2001 From: Luis Trinidad Date: Mon, 26 Aug 2024 10:37:44 -0400 Subject: [PATCH 22/78] misc(profiling): render bars side-by-side and update captions --- testgen/ui/views/profiling_anomalies.py | 29 +++++++++++++++++++------ 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/testgen/ui/views/profiling_anomalies.py b/testgen/ui/views/profiling_anomalies.py index 9060448..52250b6 100644 --- a/testgen/ui/views/profiling_anomalies.py +++ b/testgen/ui/views/profiling_anomalies.py @@ -75,13 +75,28 @@ def render(self) -> None: df_pa["action"] = df_pa["id"].map(action_map).fillna(df_pa["action"]) if not df_pa.empty: - # Display summary bar + others_summary_column, pii_summary_column, _ = st.columns([.3, .3, .4]) summaries = get_profiling_anomaly_summary(str_profile_run_id) - anomalies_pii_summary = [summary for summary in summaries if summary.get("type") == "PII"] others_summary = [summary for summary in summaries if summary.get("type") != "PII"] - testgen.summary_bar(items=others_summary, key="test_results_summary:others", height=40, width=800) + with others_summary_column: + st.html("Hygiene Issues") + testgen.summary_bar( + items=others_summary, + key="test_results_summary:others", + height=40, + width=400, + ) + + anomalies_pii_summary = [summary for summary in summaries if summary.get("type") == "PII"] if anomalies_pii_summary: - testgen.summary_bar(items=anomalies_pii_summary, key="test_results_summary:pii", height=40, width=800) + with pii_summary_column: + st.html("Potential PII") + testgen.summary_bar( + items=anomalies_pii_summary, + key="test_results_summary:pii", + height=40, + width=400, + ) # write_frequency_graph(df_pa) lst_show_columns = [ @@ -300,9 +315,9 @@ def get_profiling_anomaly_summary(str_profile_run_id): { "label": "Likely", "value": int(df.at[0, "likely_ct"]), "color": "orange" }, { "label": "Possible", "value": int(df.at[0, "possible_ct"]), "color": "yellow" }, { "label": "Dismissed", "value": int(df.at[0, "dismissed_ct"]), "color": "grey" }, - { "label": "Potential PII Definite", "value": int(df.at[0, "pii_high_ct"]), "color": "red", "type": "PII" }, - { "label": "Potential PII Likely", "value": int(df.at[0, "pii_moderate_ct"]), "color": "orange", "type": "PII" }, - { "label": "Potential PII Dismissed", "value": int(df.at[0, "pii_dismissed_ct"]), "color": "grey", "type": "PII" }, + { "label": "High Risk", "value": int(df.at[0, "pii_high_ct"]), "color": "red", "type": "PII" }, + { "label": "Moderate Risk", "value": int(df.at[0, "pii_moderate_ct"]), "color": "orange", "type": "PII" }, + { "label": "Dismissed", "value": int(df.at[0, "pii_dismissed_ct"]), "color": "grey", "type": "PII" }, ] From 7e3a25eb22249afb4aa097f27ecdd7db1cc62b6c Mon Sep 17 00:00:00 2001 From: Ricardo Boni Date: Fri, 16 Aug 2024 10:26:09 -0400 Subject: [PATCH 23/78] refactor(test_definitions): changing the FK to test_suites to be the id --- .../queries/execute_cat_tests_query.py | 4 +- .../commands/queries/execute_tests_query.py | 5 +- .../test_parameter_validation_query.py | 6 +- testgen/commands/run_execute_cat_tests.py | 2 +- testgen/commands/run_execute_tests.py | 2 +- testgen/commands/run_generate_tests.py | 11 +- .../commands/run_test_parameter_validation.py | 2 +- testgen/common/database/database_service.py | 10 ++ .../030_initialize_new_schema_structure.sql | 37 ++---- .../dbupgrade/0108_incremental_upgrade.sql | 107 ++++++++++++++++++ .../ex_cat_build_agg_table_tests.sql | 3 +- .../ex_cat_get_distinct_tables.sql | 21 ++-- .../execution/ex_get_tests_non_cat.sql | 21 ++-- .../gen_funny_cat_tests/gen_test_constant.sql | 23 ++-- .../gen_test_distinct_value_ct.sql | 21 ++-- .../gen_funny_cat_tests/gen_test_row_ct.sql | 21 ++-- .../gen_test_row_ct_pct.sql | 22 ++-- .../generation/gen_delete_old_tests.sql | 2 +- .../generation/gen_insert_test_suite.sql | 3 +- .../generation/gen_standard_tests.sql | 16 ++- .../get_entities/get_test_generation_list.sql | 29 ++--- .../template/get_entities/get_test_info.sql | 75 ++++++------ .../template/parms/parms_test_execution.sql | 27 +++-- testgen/template/parms/parms_test_gen.sql | 44 ++++--- .../ex_disable_tests_test_definitions.sql | 9 +- .../ex_flag_tests_test_definitions.sql | 15 +-- .../ex_get_test_column_list_tg.sql | 25 +--- .../ex_write_test_val_errors.sql | 53 +++++---- testgen/ui/queries/test_definition_queries.py | 24 ++-- testgen/ui/queries/test_run_queries.py | 11 +- testgen/ui/queries/test_suite_queries.py | 15 ++- testgen/ui/services/table_group_service.py | 14 ++- testgen/ui/services/test_suite_service.py | 10 +- testgen/ui/views/test_suites.py | 5 +- 34 files changed, 393 insertions(+), 302 deletions(-) create mode 100644 testgen/template/dbupgrade/0108_incremental_upgrade.sql diff --git a/testgen/commands/queries/execute_cat_tests_query.py b/testgen/commands/queries/execute_cat_tests_query.py index a6f3d01..fc91e2b 100644 --- a/testgen/commands/queries/execute_cat_tests_query.py +++ b/testgen/commands/queries/execute_cat_tests_query.py @@ -20,8 +20,9 @@ class CCATExecutionSQL: replace_qc_schema = "" dctTestParms: typing.ClassVar = {} - def __init__(self, strProjectCode, strTestSuite, strSQLFlavor, max_query_chars, minutes_offset=0): + def __init__(self, strProjectCode, strTestSuiteId, strTestSuite, strSQLFlavor, max_query_chars, minutes_offset=0): # Defaults + self.test_suite_id = strTestSuiteId self.test_suite = strTestSuite self.project_code = strProjectCode flavor_service = database_service.get_flavor_service(strSQLFlavor) @@ -36,6 +37,7 @@ def _ReplaceParms(self, strInputString): strInputString = strInputString.replace("{TEST_RUN_ID}", self.test_run_id) strInputString = strInputString.replace("{PROJECT_CODE}", self.project_code) strInputString = strInputString.replace("{TEST_SUITE}", self.test_suite) + strInputString = strInputString.replace("{TEST_SUITE_ID}", self.test_suite_id) # NOTE: REPLACE_QC_SCHEMA is parm replaced to run build query: sets the actual value to replace. # DATA_QC_SCHEMA is parm in cat_test_conditions that build query replaces via SQL. strInputString = strInputString.replace("{REPLACE_QC_SCHEMA}", self.replace_qc_schema) diff --git a/testgen/commands/queries/execute_tests_query.py b/testgen/commands/queries/execute_tests_query.py index d8fac7a..23204a1 100644 --- a/testgen/commands/queries/execute_tests_query.py +++ b/testgen/commands/queries/execute_tests_query.py @@ -7,6 +7,7 @@ class CTestExecutionSQL: flavor = "" run_date = "" project_code = "" + test_suite_id = "" test_suite = "" test_run_id = "" exception_message = "" @@ -18,9 +19,10 @@ class CTestExecutionSQL: match_sum_columns = "" multi_column_error_condition = "" - def __init__(self, strProjectCode, strFlavor, strTestSuite, minutes_offset=0): + def __init__(self, strProjectCode, strFlavor, strTestSuiteId, strTestSuite, minutes_offset=0): self.project_code = strProjectCode self.flavor = strFlavor + self.test_suite_id = strTestSuiteId self.test_suite = strTestSuite self.today = date_service.get_now_as_string_with_offset(minutes_offset) self.minutes_offset = minutes_offset @@ -41,6 +43,7 @@ def _AssembleDisplayParameters(self): def _ReplaceParms(self, strInputString: str): strInputString = strInputString.replace("{PROJECT_CODE}", self.project_code) + strInputString = strInputString.replace("{TEST_SUITE_ID}", self.test_suite_id) strInputString = strInputString.replace("{TEST_SUITE}", self.test_suite) strInputString = strInputString.replace("{SQL_FLAVOR}", self.flavor) strInputString = strInputString.replace("{TEST_RUN_ID}", self.test_run_id) diff --git a/testgen/commands/queries/test_parameter_validation_query.py b/testgen/commands/queries/test_parameter_validation_query.py index b812047..6566bb9 100644 --- a/testgen/commands/queries/test_parameter_validation_query.py +++ b/testgen/commands/queries/test_parameter_validation_query.py @@ -18,15 +18,15 @@ class CTestParamValidationSQL: # Test Set Parameters dctTestParms: typing.ClassVar = {} - def __init__(self, strProjectCode, strFlavor, strTestSuite): + def __init__(self, strProjectCode, strFlavor, strTestSuiteId): self.project_code = strProjectCode self.flavor = strFlavor - self.test_suite = strTestSuite + self.test_suite_id = strTestSuiteId self.today = date_service.get_now_as_string() def _ReplaceParms(self, strInputString): strInputString = strInputString.replace("{PROJECT_CODE}", self.project_code) - strInputString = strInputString.replace("{TEST_SUITE}", self.test_suite) + strInputString = strInputString.replace("{TEST_SUITE_ID}", self.test_suite_id) strInputString = strInputString.replace("{RUN_DATE}", self.run_date) strInputString = strInputString.replace("{TEST_RUN_ID}", self.test_run_id) strInputString = strInputString.replace("{FLAG}", self.flag_val) diff --git a/testgen/commands/run_execute_cat_tests.py b/testgen/commands/run_execute_cat_tests.py index 6dcd755..9ca8de5 100644 --- a/testgen/commands/run_execute_cat_tests.py +++ b/testgen/commands/run_execute_cat_tests.py @@ -76,7 +76,7 @@ def run_cat_test_queries( LOG.info("CurrentStep: Initializing CAT Query Generator") clsCATExecute = CCATExecutionSQL( - strProjectCode, strTestSuite, dctParms["sql_flavor"], dctParms["max_query_chars"], minutes_offset + strProjectCode, dctParms["test_suite_id"], strTestSuite, dctParms["sql_flavor"], dctParms["max_query_chars"], minutes_offset ) clsCATExecute.test_run_id = strTestRunID clsCATExecute.run_date = strTestTime diff --git a/testgen/commands/run_execute_tests.py b/testgen/commands/run_execute_tests.py index e25a697..3f56348 100644 --- a/testgen/commands/run_execute_tests.py +++ b/testgen/commands/run_execute_tests.py @@ -52,7 +52,7 @@ def run_test_queries(strTestRunID, strTestTime, strProjectCode, strTestSuite, mi LOG.info("CurrentStep: Initializing Query Generator") - clsExecute = CTestExecutionSQL(strProjectCode, dctParms["sql_flavor"], strTestSuite, minutes_offset) + clsExecute = CTestExecutionSQL(strProjectCode, dctParms["sql_flavor"], dctParms["test_suite_id"], strTestSuite, minutes_offset) clsExecute.run_date = strTestTime clsExecute.test_run_id = strTestRunID clsExecute.process_id = process_service.get_current_process_id() diff --git a/testgen/commands/run_generate_tests.py b/testgen/commands/run_generate_tests.py index fadc6ce..f1ca058 100644 --- a/testgen/commands/run_generate_tests.py +++ b/testgen/commands/run_generate_tests.py @@ -15,7 +15,7 @@ def run_test_gen_queries(strTableGroupsID, strTestSuite, strGenerationSet=None): # Set General Parms booClean = False - LOG.info("CurrentStep: Retrieving General Parameters for Test Suite " + strTestSuite) + LOG.info("CurrentStep: Retrieving General Parameters for Test Suite %s", strTestSuite) dctParms = RetrieveTestGenParms(strTableGroupsID, strTestSuite) # Set Project Connection Parms from retrieved parms @@ -40,21 +40,22 @@ def run_test_gen_queries(strTableGroupsID, strTestSuite, strGenerationSet=None): # Set static parms clsTests.project_code = dctParms["project_code"] clsTests.test_suite = strTestSuite - clsTests.test_suite_id = dctParms["test_suite_id"] clsTests.generation_set = strGenerationSet if strGenerationSet is not None else "" + clsTests.test_suite_id = dctParms["test_suite_id"] if dctParms["test_suite_id"] else "" clsTests.connection_id = str(dctParms["connection_id"]) clsTests.table_groups_id = strTableGroupsID clsTests.sql_flavor = dctParms["sql_flavor"] - clsTests.data_schema = dctParms["table_group_schema"] if dctParms["profiling_as_of_date"] is not None: clsTests.as_of_date = dctParms["profiling_as_of_date"].strftime("%Y-%m-%d %H:%M:%S") - if dctParms["test_suite"] is None: + if dctParms["test_suite_id"]: + clsTests.test_suite_id = dctParms["test_suite_id"] + else: LOG.info("CurrentStep: Creating new Test Suite") strQuery = clsTests.GetInsertTestSuiteSQL(booClean) if strQuery: - RunActionQueryList("DKTG", [strQuery]) + clsTests.test_suite_id, = RunActionQueryList("DKTG", [strQuery]) else: raise ValueError("Test Suite not found and could not be created") diff --git a/testgen/commands/run_test_parameter_validation.py b/testgen/commands/run_test_parameter_validation.py index 357ee22..d5c5eb3 100644 --- a/testgen/commands/run_test_parameter_validation.py +++ b/testgen/commands/run_test_parameter_validation.py @@ -35,7 +35,7 @@ def run_parameter_validation_queries( LOG.debug("Validating parameters for Test Suite %s") LOG.info("CurrentStep: Initializing Test Parameter Validation") - clsExecute = CTestParamValidationSQL(strProjectCode, dctParms["sql_flavor"], strTestSuite) + clsExecute = CTestParamValidationSQL(strProjectCode, dctParms["sql_flavor"], dctParms["test_suite_id"]) clsExecute.run_date = test_time clsExecute.test_run_id = test_run_id LOG.info("CurrentStep: Validation Class successfully initialized") diff --git a/testgen/common/database/database_service.py b/testgen/common/database/database_service.py index d7949bc..97e8b63 100644 --- a/testgen/common/database/database_service.py +++ b/testgen/common/database/database_service.py @@ -389,6 +389,7 @@ def RunActionQueryList(strCredentialSet, lstQueries, strAdminNDS="N", user_overr ) as con: i = 0 n = len(lstQueries) + lstInsertedIds = [] if n == 0: LOG.info("No queries to process") for q in lstQueries: @@ -401,9 +402,18 @@ def RunActionQueryList(strCredentialSet, lstQueries, strAdminNDS="N", user_overr strMsg = "Action query processed no records." else: strMsg = str(exQ.rowcount) + " records processed." + + try: + lstInsertedIds.append(exQ.fetchone()[0]) + except Exception: + lstInsertedIds.append(None) + tx.commit() LOG.info(strMsg) + return lstInsertedIds + + def RunRetrievalQueryList(strCredentialSet, lstQueries): LOG.info("CurrentDB Operation: RunRetrievalQueryList. Creds: %s", strCredentialSet) diff --git a/testgen/template/dbsetup/030_initialize_new_schema_structure.sql b/testgen/template/dbsetup/030_initialize_new_schema_structure.sql index ac31aa3..29ffa2f 100644 --- a/testgen/template/dbsetup/030_initialize_new_schema_structure.sql +++ b/testgen/template/dbsetup/030_initialize_new_schema_structure.sql @@ -138,8 +138,8 @@ CREATE TABLE test_suites ( component_key VARCHAR(100), component_type VARCHAR(100), component_name VARCHAR(100), - CONSTRAINT test_suites_project_code_test_suite_pk - PRIMARY KEY (project_code, test_suite) + CONSTRAINT test_suites_id_pk + PRIMARY KEY (id) ); CREATE TABLE test_definitions ( @@ -147,12 +147,10 @@ CREATE TABLE test_definitions ( cat_test_id BIGINT GENERATED BY DEFAULT AS IDENTITY CONSTRAINT test_definitions_cat_test_id_pk PRIMARY KEY, - project_code VARCHAR(30), table_groups_id UUID, profile_run_id UUID, test_type VARCHAR(200), - test_suite_id UUID, - test_suite VARCHAR(200), + test_suite_id UUID NOT NULL, test_description VARCHAR(1000), test_action VARCHAR(100), schema_name VARCHAR(100), @@ -190,8 +188,8 @@ CREATE TABLE test_definitions ( profiling_as_of_date TIMESTAMP, last_manual_update TIMESTAMP DEFAULT NULL, export_to_observability VARCHAR(5), - CONSTRAINT test_definitions_test_suites_project_code_test_suite_fk - FOREIGN KEY (project_code, test_suite) REFERENCES test_suites + CONSTRAINT test_definitions_test_suites_test_suite_id_fk + FOREIGN KEY (test_suite_id) REFERENCES test_suites ); ALTER SEQUENCE test_definitions_cat_test_id_seq OWNED BY test_definitions.cat_test_id; @@ -322,26 +320,6 @@ CREATE TABLE profile_pair_rules ( ); -CREATE TABLE execution_queue ( - execution_id BIGINT GENERATED ALWAYS AS IDENTITY - CONSTRAINT execution_queue_execution_id_pk - PRIMARY KEY, - project_code VARCHAR(30) - CONSTRAINT execution_queue_projects_project_code_fk - REFERENCES projects, - pipeline VARCHAR(20), - test_suite VARCHAR(200), - connection_id BIGINT - CONSTRAINT execution_queue_connections_connection_id_fk - REFERENCES connections, - schedule_datetime INTEGER, - execution_datetime TIMESTAMP, - execution_status VARCHAR(10), - order_run_number VARCHAR(50), - CONSTRAINT execution_queue_test_suites_project_code_test_suite_fk - FOREIGN KEY (project_code, test_suite) REFERENCES test_suites -); - CREATE TABLE data_structure_log ( project_code VARCHAR(30), connection_id BIGINT, @@ -548,9 +526,8 @@ CREATE TABLE test_results ( test_run_id UUID, table_groups_id UUID, observability_status VARCHAR(10), --- exported_observability BOOLEAN NOT NULL DEFAULT FALSE, CONSTRAINT test_results_test_suites_project_code_test_suite_fk - FOREIGN KEY (project_code, test_suite) REFERENCES test_suites + FOREIGN KEY (test_suite_id) REFERENCES test_suites ); @@ -648,7 +625,7 @@ CREATE INDEX ix_ts_con -- Index test_definitions CREATE INDEX ix_td_pc_stc_tst - ON test_definitions(project_code, schema_name, table_name, column_name, test_type); + ON test_definitions(test_suite_id, schema_name, table_name, column_name, test_type); CREATE UNIQUE INDEX uix_td_id ON test_definitions(id); diff --git a/testgen/template/dbupgrade/0108_incremental_upgrade.sql b/testgen/template/dbupgrade/0108_incremental_upgrade.sql new file mode 100644 index 0000000..39f0ed3 --- /dev/null +++ b/testgen/template/dbupgrade/0108_incremental_upgrade.sql @@ -0,0 +1,107 @@ +SET SEARCH_PATH TO {SCHEMA_NAME}; + +-- Step 1: Drop everything that depends on the current state + +DROP TABLE execution_queue; +DROP VIEW v_test_results; +ALTER TABLE test_definitions DROP CONSTRAINT test_definitions_test_suites_project_code_test_suite_fk; +ALTER TABLE test_results DROP CONSTRAINT test_results_test_suites_project_code_test_suite_fk; +ALTER TABLE test_suites DROP CONSTRAINT test_suites_project_code_test_suite_pk; +DROP INDEX ix_td_pc_stc_tst; + + +-- Step 2: Adjust the test definition table + + UPDATE test_definitions + SET test_suite_id = ts.id + FROM test_definitions td +INNER JOIN test_suites AS ts ON td.test_suite = ts.test_suite AND td.project_code = ts.project_code + WHERE td.test_suite_id is NULL; + +ALTER TABLE test_definitions ALTER COLUMN test_suite_id SET NOT NULL; + +-- Step 3: Re-create the constraints + +ALTER TABLE test_suites ADD CONSTRAINT test_suites_id_pk PRIMARY KEY (id); +ALTER TABLE test_definitions ADD CONSTRAINT test_definitions_test_suites_test_suite_id_fk + FOREIGN KEY (test_suite_id) REFERENCES test_suites; +ALTER TABLE test_results ADD CONSTRAINT test_results_test_suites_test_suite_id_fk + FOREIGN KEY (test_suite_id) REFERENCES test_suites; + +-- Step 4: Clean up + +ALTER TABLE test_definitions DROP COLUMN test_suite; +ALTER TABLE test_definitions DROP COLUMN project_code; + +-- Step 5: Re-create views and indexes + +CREATE INDEX ix_td_pc_stc_tst + ON test_definitions(test_suite_id, schema_name, table_name, column_name, test_type); + +CREATE VIEW v_test_results AS + SELECT p.project_name, + ts.test_suite, + tg.table_groups_name, + cn.connection_name, cn.project_host, cn.sql_flavor, + tt.dq_dimension, + r.schema_name, r.table_name, r.column_names, + r.test_time as test_date, + r.test_type, tt.id as test_type_id, tt.test_name_short, tt.test_name_long, + r.test_description, + tt.measure_uom, tt.measure_uom_description, + c.test_operator, + r.threshold_value::NUMERIC(16, 5) as threshold_value, + r.result_measure::NUMERIC(16, 5), + r.result_status, + r.input_parameters, + r.result_message, + CASE WHEN result_code <> 1 THEN r.severity END as severity, + CASE + WHEN result_code <> 1 THEN r.disposition + ELSE 'Passed' + END AS disposition, + r.result_code as passed_ct, + (1 - r.result_code)::INTEGER as exception_ct, + CASE + WHEN result_status = 'Warning' + AND result_message NOT ILIKE 'ERROR - TEST COLUMN MISSING%' THEN 1 + END::INTEGER as warning_ct, + CASE + WHEN result_status = 'Failed' + AND result_message NOT ILIKE 'ERROR - TEST COLUMN MISSING%' THEN 1 + END::INTEGER as failed_ct, + CASE + WHEN result_message ILIKE 'ERROR - TEST COLUMN MISSING%' THEN 1 + END AS execution_error_ct, + r.project_code, + r.table_groups_id, + r.id as test_result_id, c.id as connection_id, + r.test_suite_id, + r.test_definition_id as test_definition_id_runtime, + CASE + WHEN r.auto_gen = TRUE THEN d.id + ELSE r.test_definition_id + END as test_definition_id_current, + r.test_run_id as test_run_id, + r.auto_gen + FROM test_results r + INNER JOIN test_types tt + ON r.test_type = tt.test_type + LEFT JOIN test_definitions d + ON r.test_suite_id = d.test_suite_id + AND r.table_name = d.table_name + AND r.column_names = COALESCE(d.column_name, 'N/A') + AND r.test_type = d.test_type + AND r.auto_gen = TRUE + AND d.last_auto_gen_date IS NOT NULL + INNER JOIN test_suites ts + ON r.test_suite_id = ts.id + INNER JOIN projects p + ON r.project_code = p.project_code + INNER JOIN table_groups tg + ON r.table_groups_id = tg.id + INNER JOIN connections cn + ON tg.connection_id = cn.connection_id + LEFT JOIN cat_test_conditions c + ON cn.sql_flavor = c.sql_flavor + AND r.test_type = c.test_type; diff --git a/testgen/template/exec_cat_tests/ex_cat_build_agg_table_tests.sql b/testgen/template/exec_cat_tests/ex_cat_build_agg_table_tests.sql index 0b0ad47..7e0413f 100644 --- a/testgen/template/exec_cat_tests/ex_cat_build_agg_table_tests.sql +++ b/testgen/template/exec_cat_tests/ex_cat_build_agg_table_tests.sql @@ -67,8 +67,7 @@ WITH test_detail INNER JOIN cat_test_conditions c ON (t.test_type = c.test_type AND '{SQL_FLAVOR}' = c.sql_flavor) - WHERE t.project_code = '{PROJECT_CODE}' - AND t.test_suite = '{TEST_SUITE}' + WHERE t.test_suite_id = '{TEST_SUITE_ID}' AND t.schema_name = '{SCHEMA_NAME}' AND t.table_name = '{TABLE_NAME}' AND COALESCE(t.test_active, 'Y') = 'Y' diff --git a/testgen/template/exec_cat_tests/ex_cat_get_distinct_tables.sql b/testgen/template/exec_cat_tests/ex_cat_get_distinct_tables.sql index 5e39061..ff2878b 100644 --- a/testgen/template/exec_cat_tests/ex_cat_get_distinct_tables.sql +++ b/testgen/template/exec_cat_tests/ex_cat_get_distinct_tables.sql @@ -1,11 +1,12 @@ -SELECT DISTINCT schema_name, table_name, +SELECT DISTINCT schema_name, + table_name, project_qc_schema as replace_qc_schema - FROM test_definitions t -INNER JOIN test_types tt - ON t.test_type = tt.test_type -INNER JOIN table_groups tg - ON (t.table_groups_id = tg.id) -INNER JOIN connections c - ON (tg.connection_id = c.connection_id) - WHERE t.test_suite = '{TEST_SUITE}' - AND tt.run_type = 'CAT'; + FROM test_definitions td + INNER JOIN test_types tt + ON td.test_type = tt.test_type + INNER JOIN table_groups tg + ON (td.table_groups_id = tg.id) + INNER JOIN connections c + ON (tg.connection_id = c.connection_id) + WHERE td.test_suite_id = '{TEST_SUITE_ID}' + AND tt.run_type = 'CAT'; diff --git a/testgen/template/execution/ex_get_tests_non_cat.sql b/testgen/template/execution/ex_get_tests_non_cat.sql index 8b4367c..7d69ef4 100644 --- a/testgen/template/execution/ex_get_tests_non_cat.sql +++ b/testgen/template/execution/ex_get_tests_non_cat.sql @@ -1,7 +1,7 @@ SELECT tt.test_type, - s.id::VARCHAR as test_definition_id, - COALESCE(s.test_description, tt.test_description) as test_description, - COALESCE(s.test_action, g.test_action, '') as test_action, + td.id::VARCHAR AS test_definition_id, + COALESCE(td.test_description, tt.test_description) AS test_description, + COALESCE(td.test_action, ts.test_action, '') AS test_action, schema_name, table_name, column_name, @@ -33,15 +33,14 @@ SELECT tt.test_type, coalesce(match_having_condition, '') as match_having_condition, coalesce(custom_query, '') as custom_query, coalesce(tm.template_name, '') as template_name -FROM test_definitions s - INNER JOIN test_suites g - ON (s.test_suite = g.test_suite) +FROM test_definitions td + INNER JOIN test_suites ts + ON (td.test_suite_id = ts.id) INNER JOIN test_types tt - ON (s.test_type = tt.test_type) + ON (td.test_type = tt.test_type) LEFT JOIN test_templates tm - ON (s.test_type = tm.test_type + ON (td.test_type = tm.test_type AND '{SQL_FLAVOR}' = tm.sql_flavor) -WHERE s.project_code = '{PROJECT_CODE}' - AND s.test_suite = '{TEST_SUITE}' +WHERE td.test_suite_id = '{TEST_SUITE_ID}' AND tt.run_type = 'QUERY' - AND s.test_active = 'Y'; + AND td.test_active = 'Y'; diff --git a/testgen/template/gen_funny_cat_tests/gen_test_constant.sql b/testgen/template/gen_funny_cat_tests/gen_test_constant.sql index c0e2204..3f28dc5 100644 --- a/testgen/template/gen_funny_cat_tests/gen_test_constant.sql +++ b/testgen/template/gen_funny_cat_tests/gen_test_constant.sql @@ -1,6 +1,7 @@ -- Then insert new tests where a locked test is not already present -INSERT INTO test_definitions (project_code, table_groups_id, profile_run_id, - test_type, test_suite, test_suite_id, + +INSERT INTO test_definitions (table_groups_id, profile_run_id, + test_type, test_suite_id, schema_name, table_name, column_name, skip_errors, last_auto_gen_date, test_active, baseline_value, threshold_value, profiling_as_of_date) @@ -8,12 +9,12 @@ WITH last_run AS (SELECT r.table_groups_id, MAX(run_date) AS last_run_date FROM profile_results p INNER JOIN profiling_runs r ON (p.profile_run_id = r.id) - INNER JOIN test_suites tg - ON p.project_code = tg.project_code - AND p.connection_id = tg.connection_id + INNER JOIN test_suites ts + ON p.project_code = ts.project_code + AND p.connection_id = ts.connection_id WHERE p.project_code = '{PROJECT_CODE}' AND r.table_groups_id = '{TABLE_GROUPS_ID}'::UUID - AND tg.test_suite = '{TEST_SUITE}' + AND ts.id = '{TEST_SUITE_ID}' AND p.run_date::DATE <= '{AS_OF_DATE}' GROUP BY r.table_groups_id), curprof AS (SELECT p.* @@ -24,7 +25,7 @@ WITH last_run AS (SELECT r.table_groups_id, MAX(run_date) AS last_run_date locked AS (SELECT schema_name, table_name, column_name, test_type FROM test_definitions WHERE table_groups_id = '{TABLE_GROUPS_ID}'::UUID - AND test_suite = '{TEST_SUITE}' + AND test_suite_id = '{TEST_SUITE_ID}' AND lock_refresh = 'Y'), all_runs AS ( SELECT DISTINCT p.table_groups_id, p.schema_name, p.run_date, DENSE_RANK() OVER (PARTITION BY p.table_groups_id ORDER BY p.run_date DESC) as run_rank @@ -33,7 +34,7 @@ WITH last_run AS (SELECT r.table_groups_id, MAX(run_date) AS last_run_date ON p.connection_id = ts.connection_id AND p.project_code = ts.project_code WHERE p.table_groups_id = '{TABLE_GROUPS_ID}'::UUID - AND ts.test_suite = '{TEST_SUITE}' + AND ts.id = '{TEST_SUITE_ID}' AND p.run_date::DATE <= '{AS_OF_DATE}'), recent_runs AS (SELECT table_groups_id, schema_name, run_date, run_rank FROM all_runs @@ -68,10 +69,8 @@ WITH last_run AS (SELECT r.table_groups_id, MAX(run_date) AS last_run_date AND p.distinct_value_ct = 1 THEN 'FALSE' END ) = 1 ), newtests AS ( SELECT 'Constant'::VARCHAR AS test_type, - '{TEST_SUITE}'::VARCHAR AS test_suite, '{TEST_SUITE_ID}'::UUID AS test_suite_id, c.profile_run_id, - c.project_code, c.schema_name, c.table_name, c.column_name, c.run_date AS last_run_date, case when general_type='A' then fn_quote_literal_escape(min_text, '{SQL_FLAVOR}')::VARCHAR @@ -90,8 +89,8 @@ newtests AS ( SELECT 'Constant'::VARCHAR AS test_type, AND '{GENERATION_SET}' = s.generation_set) WHERE (s.generation_set IS NOT NULL OR '{GENERATION_SET}' = '') ) -SELECT n.project_code, '{TABLE_GROUPS_ID}'::UUID as table_groups_id, n.profile_run_id, - n.test_type, n.test_suite, n.test_suite_id, n.schema_name, n.table_name, n.column_name, +SELECT '{TABLE_GROUPS_ID}'::UUID as table_groups_id, n.profile_run_id, + n.test_type, n.test_suite_id, n.schema_name, n.table_name, n.column_name, 0 as skip_errors, '{RUN_DATE}'::TIMESTAMP as auto_gen_date, 'Y' as test_active, COALESCE(baseline_value, '') as baseline_value, '0' as threshold_value, '{AS_OF_DATE}'::TIMESTAMP diff --git a/testgen/template/gen_funny_cat_tests/gen_test_distinct_value_ct.sql b/testgen/template/gen_funny_cat_tests/gen_test_distinct_value_ct.sql index 3b00304..75e63ce 100644 --- a/testgen/template/gen_funny_cat_tests/gen_test_distinct_value_ct.sql +++ b/testgen/template/gen_funny_cat_tests/gen_test_distinct_value_ct.sql @@ -1,6 +1,6 @@ -- FIRST TYPE OF CONSTANT IS HANDLED IN SEPARATE SQL FILE gen_standard_tests.sql using generic parameters -- Second type: constants with changing values (1 distinct value) -INSERT INTO test_definitions (project_code, table_groups_id, profile_run_id, test_type, test_suite, test_suite_id, +INSERT INTO test_definitions (table_groups_id, profile_run_id, test_type, test_suite_id, schema_name, table_name, column_name, skip_errors, last_auto_gen_date, test_active, baseline_value_ct, threshold_value, profiling_as_of_date) @@ -8,12 +8,12 @@ WITH last_run AS (SELECT r.table_groups_id, MAX(run_date) AS last_run_date FROM profile_results p INNER JOIN profiling_runs r ON (p.profile_run_id = r.id) - INNER JOIN test_suites tg - ON p.project_code = tg.project_code - AND p.connection_id = tg.connection_id + INNER JOIN test_suites ts + ON p.project_code = ts.project_code + AND p.connection_id = ts.connection_id WHERE p.project_code = '{PROJECT_CODE}' AND r.table_groups_id = '{TABLE_GROUPS_ID}'::UUID - AND tg.test_suite = '{TEST_SUITE}' + AND ts.id = '{TEST_SUITE_ID}' AND p.run_date::DATE <= '{AS_OF_DATE}' GROUP BY r.table_groups_id), curprof AS (SELECT p.* @@ -24,7 +24,7 @@ WITH last_run AS (SELECT r.table_groups_id, MAX(run_date) AS last_run_date locked AS (SELECT schema_name, table_name, column_name, test_type FROM test_definitions WHERE table_groups_id = '{TABLE_GROUPS_ID}'::UUID - AND test_suite = '{TEST_SUITE}' + AND test_suite_id = '{TEST_SUITE_ID}' AND lock_refresh = 'Y'), all_runs AS ( SELECT DISTINCT p.table_groups_id, p.schema_name, p.run_date, DENSE_RANK() OVER (PARTITION BY p.table_groups_id ORDER BY p.run_date DESC) as run_rank @@ -33,7 +33,7 @@ WITH last_run AS (SELECT r.table_groups_id, MAX(run_date) AS last_run_date ON p.connection_id = ts.connection_id AND p.project_code = ts.project_code WHERE p.table_groups_id = '{TABLE_GROUPS_ID}'::UUID - AND ts.test_suite = '{TEST_SUITE}' + AND ts.id = '{TEST_SUITE_ID}' AND p.run_date::DATE <= '{AS_OF_DATE}'), recent_runs AS (SELECT table_groups_id, schema_name, run_date, run_rank FROM all_runs @@ -67,9 +67,8 @@ WITH last_run AS (SELECT r.table_groups_id, MAX(run_date) AS last_run_date AND p.distinct_value_ct = 1 THEN 'FALSE' END ) > 1 ), newtests AS ( SELECT 'Distinct_Value_Ct'::VARCHAR AS test_type, - '{TEST_SUITE}'::VARCHAR AS test_suite, '{TEST_SUITE_ID}'::UUID AS test_suite_id, - c.project_code, c.table_groups_id, c.profile_run_id, + c.table_groups_id, c.profile_run_id, c.schema_name, c.table_name, c.column_name, c.run_date AS last_run_date, c.distinct_value_ct @@ -83,8 +82,8 @@ newtests AS ( SELECT 'Distinct_Value_Ct'::VARCHAR AS test_type, AND '{GENERATION_SET}' = s.generation_set) WHERE (s.generation_set IS NOT NULL OR '{GENERATION_SET}' = '') ) -SELECT n.project_code, n.table_groups_id, n.profile_run_id, - n.test_type, n.test_suite, n.test_suite_id, +SELECT n.table_groups_id, n.profile_run_id, + n.test_type, n.test_suite_id, n.schema_name, n.table_name, n.column_name, 0 as skip_errors, '{RUN_DATE}'::TIMESTAMP as last_auto_gen_date, 'Y' as test_active, distinct_value_ct as baseline_value_ct, distinct_value_ct as threshold_value, diff --git a/testgen/template/gen_funny_cat_tests/gen_test_row_ct.sql b/testgen/template/gen_funny_cat_tests/gen_test_row_ct.sql index dacf48b..55b626e 100644 --- a/testgen/template/gen_funny_cat_tests/gen_test_row_ct.sql +++ b/testgen/template/gen_funny_cat_tests/gen_test_row_ct.sql @@ -1,5 +1,5 @@ -- Insert new tests where a locked test is not already present -INSERT INTO test_definitions (project_code, table_groups_id, profile_run_id, test_type, test_suite, test_suite_id, +INSERT INTO test_definitions (table_groups_id, profile_run_id, test_type, test_suite_id, schema_name, table_name, skip_errors, threshold_value, last_auto_gen_date, test_active, baseline_ct, profiling_as_of_date) @@ -7,12 +7,12 @@ WITH last_run AS (SELECT r.table_groups_id, MAX(run_date) AS last_run_date FROM profile_results p INNER JOIN profiling_runs r ON (p.profile_run_id = r.id) - INNER JOIN test_suites tg - ON p.project_code = tg.project_code - AND p.connection_id = tg.connection_id + INNER JOIN test_suites ts + ON p.project_code = ts.project_code + AND p.connection_id = ts.connection_id WHERE p.project_code = '{PROJECT_CODE}' AND r.table_groups_id = '{TABLE_GROUPS_ID}'::UUID - AND tg.test_suite = '{TEST_SUITE}' + AND ts.id = '{TEST_SUITE_ID}' AND p.run_date::DATE <= '{AS_OF_DATE}' GROUP BY r.table_groups_id), curprof AS (SELECT p.* @@ -23,11 +23,10 @@ WITH last_run AS (SELECT r.table_groups_id, MAX(run_date) AS last_run_date locked AS (SELECT schema_name, table_name, column_name, test_type FROM test_definitions WHERE table_groups_id = '{TABLE_GROUPS_ID}'::UUID - AND test_suite = '{TEST_SUITE}' + AND test_suite_id = '{TEST_SUITE_ID}' AND lock_refresh = 'Y'), - newtests AS (SELECT project_code, table_groups_id, profile_run_id, + newtests AS (SELECT table_groups_id, profile_run_id, 'Row_Ct' AS test_type, - '{TEST_SUITE}' AS test_suite, '{TEST_SUITE_ID}'::UUID AS test_suite_id, schema_name, table_name, @@ -41,9 +40,9 @@ WITH last_run AS (SELECT r.table_groups_id, MAX(run_date) AS last_run_date AND (s.generation_set IS NOT NULL OR '{GENERATION_SET}' = '') GROUP BY project_code, table_groups_id, profile_run_id, - test_type, test_suite, schema_name, table_name ) -SELECT n.project_code, n.table_groups_id, n.profile_run_id, - n.test_type, n.test_suite, n.test_suite_id, + test_type, test_suite_id, schema_name, table_name ) +SELECT n.table_groups_id, n.profile_run_id, + n.test_type, n.test_suite_id, n.schema_name, n.table_name, 0 as skip_errors, record_ct AS threshold_value, '{RUN_DATE}'::TIMESTAMP as last_auto_gen_date, diff --git a/testgen/template/gen_funny_cat_tests/gen_test_row_ct_pct.sql b/testgen/template/gen_funny_cat_tests/gen_test_row_ct_pct.sql index 0113d01..d68a432 100644 --- a/testgen/template/gen_funny_cat_tests/gen_test_row_ct_pct.sql +++ b/testgen/template/gen_funny_cat_tests/gen_test_row_ct_pct.sql @@ -1,5 +1,5 @@ -- Insert new tests where a locked test is not already present -INSERT INTO test_definitions (project_code, table_groups_id, profile_run_id, test_type, test_suite, test_suite_id, +INSERT INTO test_definitions (table_groups_id, profile_run_id, test_type, test_suite_id, schema_name, table_name, skip_errors, last_auto_gen_date, profiling_as_of_date, test_active, baseline_ct, threshold_value) @@ -7,12 +7,12 @@ WITH last_run AS (SELECT r.table_groups_id, MAX(run_date) AS last_run_date FROM profile_results p INNER JOIN profiling_runs r ON (p.profile_run_id = r.id) - INNER JOIN test_suites tg - ON p.project_code = tg.project_code - AND p.connection_id = tg.connection_id + INNER JOIN test_suites ts + ON p.project_code = ts.project_code + AND p.connection_id = ts.connection_id WHERE p.project_code = '{PROJECT_CODE}' AND r.table_groups_id = '{TABLE_GROUPS_ID}'::UUID - AND tg.test_suite = '{TEST_SUITE}' + AND ts.id = '{TEST_SUITE_ID}' AND p.run_date::DATE <= '{AS_OF_DATE}' GROUP BY r.table_groups_id), curprof AS (SELECT p.* @@ -23,12 +23,12 @@ WITH last_run AS (SELECT r.table_groups_id, MAX(run_date) AS last_run_date locked AS (SELECT schema_name, table_name, column_name, test_type FROM test_definitions WHERE table_groups_id = '{TABLE_GROUPS_ID}'::UUID - AND test_suite = '{TEST_SUITE}' + AND test_suite_id = '{TEST_SUITE_ID}' AND lock_refresh = 'Y'), newtests AS ( - SELECT project_code, table_groups_id, profile_run_id, + SELECT table_groups_id, + profile_run_id, 'Row_Ct_Pct' AS test_type, - '{TEST_SUITE}' AS test_suite, '{TEST_SUITE_ID}'::UUID AS test_suite_id, schema_name, table_name, @@ -42,10 +42,10 @@ WITH last_run AS (SELECT r.table_groups_id, MAX(run_date) AS last_run_date AND (s.generation_set IS NOT NULL OR '{GENERATION_SET}' = '') GROUP BY project_code, table_groups_id, profile_run_id, - test_type, test_suite, schema_name, table_name + test_type, test_suite_id, schema_name, table_name HAVING MAX(record_ct) >= 500) -SELECT n.project_code, n.table_groups_id, n.profile_run_id, - n.test_type, n.test_suite, n.test_suite_id, +SELECT n.table_groups_id, n.profile_run_id, + n.test_type, n.test_suite_id, n.schema_name, n.table_name, 0 as skip_errors, '{RUN_DATE}'::TIMESTAMP as last_auto_gen_date, '{AS_OF_DATE}'::TIMESTAMP as profiling_as_of_date, diff --git a/testgen/template/generation/gen_delete_old_tests.sql b/testgen/template/generation/gen_delete_old_tests.sql index 67b38b4..9446304 100644 --- a/testgen/template/generation/gen_delete_old_tests.sql +++ b/testgen/template/generation/gen_delete_old_tests.sql @@ -1,5 +1,5 @@ DELETE FROM test_definitions WHERE table_groups_id = '{TABLE_GROUPS_ID}'::UUID - AND test_suite = '{TEST_SUITE}' + AND test_suite_id = '{TEST_SUITE_ID}' AND last_auto_gen_date IS NOT NULL AND COALESCE(lock_refresh, 'N') <> 'Y'; diff --git a/testgen/template/generation/gen_insert_test_suite.sql b/testgen/template/generation/gen_insert_test_suite.sql index c5a5542..d78becb 100644 --- a/testgen/template/generation/gen_insert_test_suite.sql +++ b/testgen/template/generation/gen_insert_test_suite.sql @@ -2,4 +2,5 @@ INSERT INTO test_suites (project_code, test_suite, connection_id, table_groups_id, test_suite_description, component_type, component_key) VALUES ('{PROJECT_CODE}', '{TEST_SUITE}', {CONNECTION_ID}, '{TABLE_GROUPS_ID}', '{TEST_SUITE} Test Suite', - 'dataset', '{TEST_SUITE}'); + 'dataset', '{TEST_SUITE}') +RETURNING id::VARCHAR; diff --git a/testgen/template/generation/gen_standard_tests.sql b/testgen/template/generation/gen_standard_tests.sql index b253f94..c8b9a61 100644 --- a/testgen/template/generation/gen_standard_tests.sql +++ b/testgen/template/generation/gen_standard_tests.sql @@ -1,5 +1,5 @@ -- Insert new tests where a locked test is not already present -INSERT INTO test_definitions (project_code, table_groups_id, profile_run_id, test_type, test_suite, test_suite_id, +INSERT INTO test_definitions (table_groups_id, profile_run_id, test_type, test_suite_id, schema_name, table_name, column_name, skip_errors, test_active, last_auto_gen_date, profiling_as_of_date, {DEFAULT_PARM_COLUMNS} ) @@ -7,12 +7,12 @@ WITH last_run AS (SELECT r.table_groups_id, MAX(run_date) AS last_run_date FROM profile_results p INNER JOIN profiling_runs r ON (p.profile_run_id = r.id) - INNER JOIN test_suites tg - ON p.project_code = tg.project_code - AND p.connection_id = tg.connection_id + INNER JOIN test_suites ts + ON p.project_code = ts.project_code + AND p.connection_id = ts.connection_id WHERE p.project_code = '{PROJECT_CODE}' AND r.table_groups_id = '{TABLE_GROUPS_ID}'::UUID - AND tg.test_suite = '{TEST_SUITE}' + AND ts.id = '{TEST_SUITE_ID}' AND p.run_date::DATE <= '{AS_OF_DATE}' GROUP BY r.table_groups_id), curprof AS (SELECT p.*, datediff('MM', p.min_date, p.max_date) as min_max_months, datediff('week', '1800-01-05'::DATE, p.max_date) - datediff('week', '1800-01-05'::DATE, p.min_date) as min_max_weeks @@ -23,18 +23,16 @@ WITH last_run AS (SELECT r.table_groups_id, MAX(run_date) AS last_run_date locked AS (SELECT schema_name, table_name, column_name FROM test_definitions WHERE table_groups_id = '{TABLE_GROUPS_ID}'::UUID - AND test_suite = '{TEST_SUITE}' + AND test_suite_id = '{TEST_SUITE_ID}' AND test_type = '{TEST_TYPE}' AND lock_refresh = 'Y'), newtests AS (SELECT * FROM curprof WHERE schema_name = '{DATA_SCHEMA}' AND {SELECTION_CRITERIA} ) -SELECT '{PROJECT_CODE}' as project_code, - '{TABLE_GROUPS_ID}'::UUID as table_groups_id, +SELECT '{TABLE_GROUPS_ID}'::UUID as table_groups_id, n.profile_run_id, '{TEST_TYPE}' AS test_type, - '{TEST_SUITE}' AS test_suite, '{TEST_SUITE_ID}' AS test_suite_id, n.schema_name, n.table_name, n.column_name, 0 as skip_errors, 'Y' as test_active, '{RUN_DATE}'::TIMESTAMP as last_auto_gen_date, diff --git a/testgen/template/get_entities/get_test_generation_list.sql b/testgen/template/get_entities/get_test_generation_list.sql index 0cf6c60..2efa558 100644 --- a/testgen/template/get_entities/get_test_generation_list.sql +++ b/testgen/template/get_entities/get_test_generation_list.sql @@ -2,17 +2,18 @@ Output: list all test generation runs based on last_auto_run_date Optional: n/a*/ -Select test_suite as test_suite_key, - table_groups_id, - last_auto_gen_date, - d.profiling_as_of_date, - lock_refresh, - COUNT(DISTINCT schema_name || '.' || table_name) as tables, - COUNT(DISTINCT schema_name || '.' || table_name || '.' || column_name) as columns, - COUNT(*) as tests -from test_definitions d -where d.project_code = '{PROJECT_CODE}' - and test_suite = '{TEST_SUITE}' - and last_auto_gen_date IS NOT NULL -GROUP BY table_groups_id, project_code, test_suite, last_auto_gen_date, d.profiling_as_of_date, lock_refresh -order by last_auto_gen_date desc; + SELECT ts.test_suite AS test_suite_key, + ts.table_groups_id, + td.last_auto_gen_date, + td.profiling_as_of_date, + td.lock_refresh, + COUNT(DISTINCT td.schema_name || '.' || td.table_name) as tables, + COUNT(DISTINCT td.schema_name || '.' || td.table_name || '.' || td.column_name) as columns, + COUNT(*) as tests + FROM test_definitions td + JOIN test_suites ts ON td.test_suite_id = ts.id + WHERE ts.project_code = '{PROJECT_CODE}' + AND ts.test_suite = '{TEST_SUITE}' + AND td.last_auto_gen_date IS NOT NULL +GROUP BY ts.id, td.last_auto_gen_date, td.profiling_as_of_date, td.lock_refresh +ORDER BY td.last_auto_gen_date desc; diff --git a/testgen/template/get_entities/get_test_info.sql b/testgen/template/get_entities/get_test_info.sql index 5a7d1aa..2bd589a 100644 --- a/testgen/template/get_entities/get_test_info.sql +++ b/testgen/template/get_entities/get_test_info.sql @@ -3,39 +3,42 @@ Output: current detail of tests to perform for all columns within the test-suite Alternative: project-code, connection-id Optional: last_auto_run_date (==test-gen-run-id==), schema-name, table-name, column-name*/ -SELECT s.project_code as project_key, - s.cat_test_id, - s.test_suite as test_suite_key, - s.test_type, - COALESCE(s.test_description, tt.test_description) as test_description, - CASE - WHEN COALESCE(s.lock_refresh, 'N') = 'N' THEN 'Allowed' - ELSE 'Locked' - END as test_refresh, - CASE - WHEN COALESCE(s.test_active, 'Y') = 'N' THEN 'Disabled' - ELSE 'Enabled' - END as disabled, - COALESCE(s.watch_level, 'Warn') as watch_level, - s.schema_name, - s.table_name, - s.column_name, - tt.measure_uom, - s.threshold_value, - s.baseline_ct, - s.baseline_unique_ct, - s.baseline_value, - s.baseline_value_ct, - s.baseline_sum, - s.baseline_avg, - s.baseline_sd, - s.subset_condition, - s.check_result, - s.last_auto_gen_date, - s.profiling_as_of_date -FROM test_definitions s - INNER JOIN test_types tt ON s.test_type = tt.test_type -WHERE s.project_code = '{PROJECT_CODE}' - AND s.test_suite = '{TEST_SUITE}' -ORDER BY s.schema_name, s.table_name, - s.column_name, s.test_type; + SELECT ts.project_code as project_key, + td.cat_test_id, + ts.test_suite as test_suite_key, + td.test_type, + COALESCE(td.test_description, tt.test_description) as test_description, + CASE + WHEN COALESCE(td.lock_refresh, 'N') = 'N' THEN 'Allowed' + ELSE 'Locked' + END as test_refresh, + CASE + WHEN COALESCE(td.test_active, 'Y') = 'N' THEN 'Disabled' + ELSE 'Enabled' + END as disabled, + COALESCE(td.watch_level, 'Warn') as watch_level, + td.schema_name, + td.table_name, + td.column_name, + tt.measure_uom, + td.threshold_value, + td.baseline_ct, + td.baseline_unique_ct, + td.baseline_value, + td.baseline_value_ct, + td.baseline_sum, + td.baseline_avg, + td.baseline_sd, + td.subset_condition, + td.check_result, + td.last_auto_gen_date, + td.profiling_as_of_date + FROM test_definitions td +INNER JOIN test_types tt ON td.test_type = tt.test_type +INNER JOIN test_suites ts ON td.test_suite_id = ts.id + WHERE ts.project_code = '{PROJECT_CODE}' + AND ts.test_suite = '{TEST_SUITE}' + ORDER BY td.schema_name, + td.table_name, + td.column_name, + td.test_type; diff --git a/testgen/template/parms/parms_test_execution.sql b/testgen/template/parms/parms_test_execution.sql index 3091c32..204b49c 100644 --- a/testgen/template/parms/parms_test_execution.sql +++ b/testgen/template/parms/parms_test_execution.sql @@ -1,13 +1,22 @@ -SELECT g.project_code, g.connection_id::varchar(50), +SELECT ts.project_code, + ts.connection_id::VARCHAR, + ts.id::VARCHAR as test_suite_id, + tg.table_group_schema, cc.sql_flavor, - cc.project_host, cc.project_port, - cc.project_user, cc.project_db, tg.table_group_schema, cc.project_qc_schema, + cc.project_host, + cc.project_port, + cc.project_user, + cc.project_db, + cc.project_qc_schema, cc.connect_by_key, cc.private_key, cc.private_key_passphrase, - cc.max_threads, cc.max_query_chars, cc.url, cc.connect_by_url - FROM test_suites g -INNER JOIN connections cc ON (g.connection_id = cc.connection_id) -INNER join table_groups tg ON (g.table_groups_id = tg.id) - WHERE g.project_code = '{PROJECT_CODE}' - AND g.test_suite = '{TEST_SUITE}'; + cc.max_threads, + cc.max_query_chars, + cc.url, + cc.connect_by_url + FROM test_suites ts + JOIN connections cc ON (ts.connection_id = cc.connection_id) + JOIN table_groups tg ON (ts.table_groups_id = tg.id) + WHERE ts.project_code = '{PROJECT_CODE}' + AND ts.test_suite = '{TEST_SUITE}'; diff --git a/testgen/template/parms/parms_test_gen.sql b/testgen/template/parms/parms_test_gen.sql index 5328d6b..13dd4a1 100644 --- a/testgen/template/parms/parms_test_gen.sql +++ b/testgen/template/parms/parms_test_gen.sql @@ -1,23 +1,21 @@ -SELECT tg.project_code, tg.connection_id, - cc.sql_flavor, - cc.project_host, - cc.project_port, - cc.project_user, - cc.connect_by_key, - cc.private_key, - cc.private_key_passphrase, - cc.project_db, - tg.table_group_schema, - s.export_to_observability, - s.test_suite, - s.id::VARCHAR as test_suite_id, - cc.url, - cc.connect_by_url, - CURRENT_TIMESTAMP AT TIME ZONE 'UTC' - CAST(tg.profiling_delay_days AS integer) * INTERVAL '1 day' as profiling_as_of_date - FROM table_groups tg -INNER JOIN connections cc - ON (tg.connection_id = cc.connection_id) -LEFT JOIN test_suites s - ON (tg.connection_id = s.connection_id - AND '{TEST_SUITE}' = s.test_suite) -WHERE tg.id = '{TABLE_GROUPS_ID}'; + SELECT tg.project_code, + tg.connection_id, + cc.sql_flavor, + cc.project_host, + cc.project_port, + cc.project_user, + cc.connect_by_key, + cc.private_key, + cc.private_key_passphrase, + cc.project_db, + tg.table_group_schema, + ts.export_to_observability, + ts.id::VARCHAR as test_suite_id, + cc.url, + cc.connect_by_url, + CURRENT_TIMESTAMP AT TIME ZONE 'UTC' - + CAST(tg.profiling_delay_days AS integer) * INTERVAL '1 day' as profiling_as_of_date + FROM table_groups tg +INNER JOIN connections cc ON tg.connection_id = cc.connection_id + LEFT JOIN test_suites ts ON tg.connection_id = ts.connection_id AND ts.test_suite = '{TEST_SUITE}' + WHERE tg.id = '{TABLE_GROUPS_ID}'; diff --git a/testgen/template/validate_tests/ex_disable_tests_test_definitions.sql b/testgen/template/validate_tests/ex_disable_tests_test_definitions.sql index 8745c4a..40793fa 100644 --- a/testgen/template/validate_tests/ex_disable_tests_test_definitions.sql +++ b/testgen/template/validate_tests/ex_disable_tests_test_definitions.sql @@ -1,5 +1,4 @@ -update test_definitions -set test_active = 'N' - where project_code = '{PROJECT_CODE}' - and test_suite = '{TEST_SUITE}' - and test_active = 'D'; +UPDATE test_definitions + SET test_active = 'N' + WHERE test_suite_id = '{TEST_SUITE_ID}' + AND test_active = 'D'; diff --git a/testgen/template/validate_tests/ex_flag_tests_test_definitions.sql b/testgen/template/validate_tests/ex_flag_tests_test_definitions.sql index a517cf2..2ef7689 100644 --- a/testgen/template/validate_tests/ex_flag_tests_test_definitions.sql +++ b/testgen/template/validate_tests/ex_flag_tests_test_definitions.sql @@ -4,14 +4,10 @@ Mark Test inactive for Missing columns with update status with test_columns as (SELECT DISTINCT schema_name || '.' || table_name || '.' || column_name AS columns FROM ( SELECT cat_test_id, - project_code, - test_suite, schema_name, table_name, UNNEST(STRING_TO_ARRAY(all_columns, '~|~')) AS column_name FROM ( SELECT cat_test_id, - project_code, - test_suite, schema_name, table_name, CONCAT_WS('~|~', column_name, @@ -20,14 +16,11 @@ with test_columns as FROM test_definitions d INNER JOIN test_types t ON d.test_type = t.test_type - WHERE project_code = '{PROJECT_CODE}' - AND test_suite = '{TEST_SUITE}' + WHERE test_suite_id = '{TEST_SUITE_ID}' AND t.test_scope = 'column' UNION SELECT cat_test_id, - project_code, - test_suite, match_schema_name AS schema_name, match_table_name AS table_name, CONCAT_WS('~|~', @@ -36,8 +29,7 @@ with test_columns as FROM test_definitions d INNER JOIN test_types t ON d.test_type = t.test_type - WHERE project_code = '{PROJECT_CODE}' - AND test_suite = '{TEST_SUITE}' + WHERE test_suite_id = '{TEST_SUITE_ID}' AND t.test_scope = 'column') a ) b) update test_definitions set test_active = '{FLAG}', @@ -54,8 +46,7 @@ Mark Test inactive for Missing table with update status with test_columns as (select distinct cat_test_id, schema_name || '.' || table_name || '.' || column_name as columns from test_definitions - where project_code = '{PROJECT_CODE}' - and test_suite = '{TEST_SUITE}' + where test_suite_id = '{TEST_SUITE_ID}' and lower(schema_name || '.' || table_name) in ({MISSING_TABLES})) update test_definitions set test_active = '{FLAG}', diff --git a/testgen/template/validate_tests/ex_get_test_column_list_tg.sql b/testgen/template/validate_tests/ex_get_test_column_list_tg.sql index 4c30ac5..318909c 100644 --- a/testgen/template/validate_tests/ex_get_test_column_list_tg.sql +++ b/testgen/template/validate_tests/ex_get_test_column_list_tg.sql @@ -1,65 +1,50 @@ SELECT DISTINCT schema_name || '.' || table_name || '.' || column_name AS columns FROM ( SELECT cat_test_id, - project_code, - test_suite, schema_name AS schema_name, table_name AS table_name, TRIM(UNNEST(STRING_TO_ARRAY(column_name, ','))) as column_name FROM test_definitions d INNER JOIN test_types t ON d.test_type = t.test_type - WHERE project_code = '{PROJECT_CODE}' - AND test_suite = '{TEST_SUITE}' + WHERE test_suite_id = '{TEST_SUITE_ID}' AND t.test_scope IN ('column', 'referential') UNION SELECT cat_test_id, - project_code, - test_suite, schema_name AS schema_name, table_name AS table_name, TRIM(UNNEST(STRING_TO_ARRAY(groupby_names, ','))) as column_name FROM test_definitions d INNER JOIN test_types t ON d.test_type = t.test_type - WHERE project_code = '{PROJECT_CODE}' - AND test_suite = '{TEST_SUITE}' + WHERE test_suite_id = '{TEST_SUITE_ID}' AND t.test_scope IN ('column', 'referential') UNION SELECT cat_test_id, - project_code, - test_suite, schema_name AS schema_name, table_name AS table_name, TRIM(UNNEST(STRING_TO_ARRAY(window_date_column, ','))) as column_name FROM test_definitions d INNER JOIN test_types t ON d.test_type = t.test_type - WHERE project_code = '{PROJECT_CODE}' - AND test_suite = '{TEST_SUITE}' + WHERE test_suite_id = '{TEST_SUITE_ID}' AND t.test_scope IN ('column', 'referential') UNION SELECT cat_test_id, - project_code, - test_suite, match_schema_name AS schema_name, match_table_name AS table_name, TRIM(UNNEST(STRING_TO_ARRAY(match_column_names, ','))) as column_name FROM test_definitions d INNER JOIN test_types t ON d.test_type = t.test_type - WHERE project_code = '{PROJECT_CODE}' - AND test_suite = '{TEST_SUITE}' + WHERE test_suite_id = '{TEST_SUITE_ID}' AND t.test_scope = 'referential' UNION SELECT cat_test_id, - project_code, - test_suite, match_schema_name AS schema_name, match_table_name AS table_name, TRIM(UNNEST(STRING_TO_ARRAY(match_groupby_names, ','))) as column_name FROM test_definitions d INNER JOIN test_types t ON d.test_type = t.test_type - WHERE project_code = '{PROJECT_CODE}' - AND test_suite = '{TEST_SUITE}' + WHERE test_suite_id = '{TEST_SUITE_ID}' AND t.test_scope = 'referential' ) cols; diff --git a/testgen/template/validate_tests/ex_write_test_val_errors.sql b/testgen/template/validate_tests/ex_write_test_val_errors.sql index 8317b7c..ef03a93 100644 --- a/testgen/template/validate_tests/ex_write_test_val_errors.sql +++ b/testgen/template/validate_tests/ex_write_test_val_errors.sql @@ -1,22 +1,33 @@ INSERT INTO test_results - (project_code, test_suite, test_type, test_definition_id, - schema_name, table_name, column_names, test_time, test_run_id, - input_parameters, result_code, result_message, result_measure) -SELECT '{PROJECT_CODE}' as project_code, - '{TEST_SUITE}' as test_suite, - test_type, - id, - schema_name, - table_name, - column_name, - '{RUN_DATE}' as test_time, - '{TEST_RUN_ID}' as test_run_id, - NULL as input_parameters, - 0 as result_code, - -- TODO: show only missing columns referenced in this test - left('ERROR - TEST COLUMN MISSING: {MISSING_COLUMNS_NO_QUOTES}', 470) AS result_message, - NULL as result_measure - FROM test_definitions - WHERE test_active = '-1' - AND project_code = '{PROJECT_CODE}' - AND test_suite = '{TEST_SUITE}'; + ( project_code, + test_suite, + test_type, + test_definition_id, + schema_name, + table_name, + column_names, + test_time, + test_run_id, + input_parameters, + result_code, + result_message, + result_measure ) + SELECT '{PROJECT_CODE}' as project_code, + '{TEST_SUITE}' as test_suite, + td.test_type, + td.id, + td.schema_name, + td.table_name, + td.column_name, + '{RUN_DATE}' as test_time, + '{TEST_RUN_ID}' as test_run_id, + NULL as input_parameters, + 0 as result_code, + -- TODO: show only missing columns referenced in this test + left('ERROR - TEST COLUMN MISSING: {MISSING_COLUMNS_NO_QUOTES}', 470) AS result_message, + NULL as result_measure + FROM test_definitions td + INNER JOIN test_suites ts ON td.test_suite_id = ts.id + WHERE td.test_active = '-1' + AND ts.project_code = '{PROJECT_CODE}' + AND ts.test_suite = '{TEST_SUITE}'; diff --git a/testgen/ui/queries/test_definition_queries.py b/testgen/ui/queries/test_definition_queries.py index 3073a90..ba285ab 100644 --- a/testgen/ui/queries/test_definition_queries.py +++ b/testgen/ui/queries/test_definition_queries.py @@ -29,13 +29,13 @@ def get_test_definitions(schema, project_code, test_suite, table_name, column_na SELECT d.schema_name, d.table_name, d.column_name, t.test_name_short, t.test_name_long, d.id::VARCHAR(50), - d.project_code, d.table_groups_id::VARCHAR(50), d.test_suite, d.test_suite_id::VARCHAR, + s.project_code, d.table_groups_id::VARCHAR(50), s.test_suite, d.test_suite_id::VARCHAR, d.test_type, d.cat_test_id::VARCHAR(50), d.test_active, CASE WHEN d.test_active = 'Y' THEN 'Yes' ELSE 'No' END as test_active_display, d.lock_refresh, CASE WHEN d.lock_refresh = 'Y' THEN 'Yes' ELSE 'No' END as lock_refresh_display, - t.test_scope, + t.test_scope, d.test_description, d.profiling_as_of_date, d.last_manual_update, @@ -68,11 +68,11 @@ def get_test_definitions(schema, project_code, test_suite, table_name, column_na """ if project_code: - sql += f""" AND d.project_code = '{project_code}' + sql += f""" AND s.project_code = '{project_code}' """ if test_suite: - sql += f""" AND d.test_suite = '{test_suite}' {table_condition} {column_condition} + sql += f""" AND s.test_suite = '{test_suite}' {table_condition} {column_condition} """ if test_definition_ids: sql += f""" AND d.id in ({"'" + "','".join(test_definition_ids) + "'"}) @@ -97,16 +97,15 @@ def update(schema, test_definition): export_to_observability = NULLIF('{test_definition["export_to_observability"]}', ''), column_name = NULLIF($${test_definition["column_name"]}$$, ''), watch_level = NULLIF('{test_definition["watch_level"]}', ''), - project_code = NULLIF('{test_definition["project_code"]}', ''), table_groups_id = '{test_definition["table_groups_id"]}'::UUID, """ if test_definition["profile_run_id"]: - sql += f""" profile_run_id = '{test_definition["profile_run_id"]}'::UUID, - """ + sql += f"profile_run_id = '{test_definition['profile_run_id']}'::UUID,\n" + if test_definition["test_suite_id"]: + sql += f"test_suite_id = '{test_definition['test_suite_id']}'::UUID,\n" sql += f""" test_type = NULLIF('{test_definition["test_type"]}', ''), - test_suite = NULLIF('{test_definition["test_suite"]}', ''), test_description = NULLIF($${test_definition["test_description"]}$$, ''), test_action = NULLIF('{test_definition["test_action"]}', ''), test_mode = NULLIF('{test_definition["test_mode"]}', ''), @@ -156,11 +155,9 @@ def add(schema, test_definition): export_to_observability, column_name, watch_level, - project_code, table_groups_id, profile_run_id, test_type, - test_suite, test_suite_id, test_description, test_action, @@ -202,11 +199,9 @@ def add(schema, test_definition): NULLIF('{test_definition["export_to_observability"]}', '') as export_to_observability, NULLIF('{test_definition["column_name"]}', '') as column_name, NULLIF('{test_definition["watch_level"]}', '') as watch_level, - NULLIF('{test_definition["project_code"]}', '') as project_code, '{test_definition["table_groups_id"]}'::UUID as table_groups_id, NULL AS profile_run_id, NULLIF('{test_definition["test_type"]}', '') as test_type, - NULLIF('{test_definition["test_suite"]}', '') as test_suite, '{test_definition["test_suite_id"]}'::UUID as test_suite_id, NULLIF('{test_definition["test_description"]}', '') as test_description, NULLIF('{test_definition["test_action"]}', '') as test_action, @@ -265,6 +260,9 @@ def cascade_delete(schema, test_suite_names): raise ValueError("No Test Suite is specified.") items = [f"'{item}'" for item in test_suite_names] - sql = f"""delete from {schema}.test_definitions where test_suite in ({",".join(items)})""" + sql = f""" + DELETE FROM {schema}.test_definitions + WHERE test_suite_id in (select id from {schema}.test_suites where test_suite in ({",".join(items)})) + """ db.execute_sql(sql) st.cache_data.clear() diff --git a/testgen/ui/queries/test_run_queries.py b/testgen/ui/queries/test_run_queries.py index fbc0e60..ba1e3a9 100644 --- a/testgen/ui/queries/test_run_queries.py +++ b/testgen/ui/queries/test_run_queries.py @@ -9,11 +9,12 @@ def cascade_delete(schema: str, test_suite_names: list[str]) -> None: raise ValueError("No Test Suite is specified.") items = [f"'{item}'" for item in test_suite_names] - sql = f"""delete from {schema}.working_agg_cat_results where test_suite in ({",".join(items)}); -delete from {schema}.working_agg_cat_tests where test_suite in ({",".join(items)}); -delete from {schema}.test_runs where test_suite in ({",".join(items)}); -delete from {schema}.test_results where test_suite in ({",".join(items)}); -delete from {schema}.execution_queue where test_suite in ({",".join(items)});""" + sql = f""" + delete from {schema}.working_agg_cat_results where test_suite in ({",".join(items)}); + delete from {schema}.working_agg_cat_tests where test_suite in ({",".join(items)}); + delete from {schema}.test_runs where test_suite in ({",".join(items)}); + delete from {schema}.test_results where test_suite in ({",".join(items)}); + """ db.execute_sql(sql) st.cache_data.clear() diff --git a/testgen/ui/queries/test_suite_queries.py b/testgen/ui/queries/test_suite_queries.py index 8e45764..99ccc76 100644 --- a/testgen/ui/queries/test_suite_queries.py +++ b/testgen/ui/queries/test_suite_queries.py @@ -90,16 +90,15 @@ def cascade_delete(schema: str, test_suite_names: list[str]) -> None: def get_test_suite_dependencies(schema: str, test_suite_names: list[str]) -> pd.DataFrame: test_suite_names_join = [f"'{item}'" for item in test_suite_names] sql = f""" - select distinct test_suite from {schema}.test_definitions where test_suite in ({",".join(test_suite_names_join)}) - union - select distinct test_suite from {schema}.execution_queue where test_suite in ({",".join(test_suite_names_join)}) + select distinct ts.test_suite + from {schema}.test_definitions td join {schema}.test_suites ts on ts.id = td.test_suite_id + where ts.test_suite in ({",".join(test_suite_names_join)}) union select distinct test_suite from {schema}.test_results where test_suite in ({",".join(test_suite_names_join)}); """ return db.retrieve_data(sql) - def get_test_suite_usage(schema: str, test_suite_names: list[str]) -> pd.DataFrame: test_suite_names_join = [f"'{item}'" for item in test_suite_names] sql = f""" @@ -108,7 +107,7 @@ def get_test_suite_usage(schema: str, test_suite_names: list[str]) -> pd.DataFra return db.retrieve_data(sql) -def get_test_suite_refresh_check(schema, table_groups_id, test_suite_name): +def get_test_suite_refresh_check(schema, table_groups_id, test_suite_id): sql = f""" SELECT COUNT(*) as test_ct, SUM(CASE WHEN COALESCE(d.lock_refresh, 'N') = 'N' THEN 1 ELSE 0 END) as unlocked_test_ct, @@ -117,7 +116,7 @@ def get_test_suite_refresh_check(schema, table_groups_id, test_suite_name): INNER JOIN {schema}.test_types t ON (d.test_type = t.test_type) WHERE d.table_groups_id = '{table_groups_id}'::UUID - AND d.test_suite = '{test_suite_name}' + AND d.test_suite_id = '{test_suite_id}' AND t.run_type = 'CAT' AND t.selection_criteria IS NOT NULL; """ @@ -133,11 +132,11 @@ def get_generation_sets(schema): return db.retrieve_data(sql) -def lock_edited_tests(schema, test_suite_name): +def lock_edited_tests(schema, test_suite_id): sql = f""" UPDATE {schema}.test_definitions SET lock_refresh = 'Y' - WHERE test_suite = '{test_suite_name}' + WHERE test_suite_id = '{test_suite_id}' AND last_manual_update IS NOT NULL AND lock_refresh = 'N'; """ diff --git a/testgen/ui/services/table_group_service.py b/testgen/ui/services/table_group_service.py index 598c163..e9ce018 100644 --- a/testgen/ui/services/table_group_service.py +++ b/testgen/ui/services/table_group_service.py @@ -37,12 +37,14 @@ def cascade_delete(table_group_names, dry_run=False): def table_group_has_dependencies(schema, table_group_names, test_suite_names): - test_suite_usage_result = test_suite_service.has_test_suite_dependencies(schema, test_suite_names) - if not table_group_names: - table_group_usage_result = False - else: - table_group_usage_result = not table_group_queries.get_table_group_dependencies(schema, table_group_names).empty - return test_suite_usage_result or table_group_usage_result + return any( + ( + table_group_names and not table_group_queries.get_table_group_dependencies( + schema, table_group_names + ).empty, + test_suite_service.has_test_suite_dependencies(schema, test_suite_names), + ) + ) def are_table_groups_in_use(table_group_names): diff --git a/testgen/ui/services/test_suite_service.py b/testgen/ui/services/test_suite_service.py index a827657..f23d444 100644 --- a/testgen/ui/services/test_suite_service.py +++ b/testgen/ui/services/test_suite_service.py @@ -44,11 +44,11 @@ def are_test_suites_in_use(test_suite_names): return not usage_result.empty -def get_test_suite_refresh_warning(table_groups_id, test_suite_name): - if not test_suite_name: +def get_test_suite_refresh_warning(table_groups_id, test_suite_id): + if not test_suite_id: return False schema = st.session_state["dbschema"] - row_result = test_suite_queries.get_test_suite_refresh_check(schema, table_groups_id, test_suite_name) + row_result = test_suite_queries.get_test_suite_refresh_check(schema, table_groups_id, test_suite_id) test_ct = None unlocked_test_ct = None @@ -70,7 +70,7 @@ def get_generation_set_choices(): return dfSets["generation_set"].to_list() -def lock_edited_tests(test_suite_name): +def lock_edited_tests(test_suite_id): schema = st.session_state["dbschema"] - tests_locked = test_suite_queries.lock_edited_tests(schema, test_suite_name) + tests_locked = test_suite_queries.lock_edited_tests(schema, test_suite_id) return tests_locked diff --git a/testgen/ui/views/test_suites.py b/testgen/ui/views/test_suites.py index 300f3c5..6ed52b1 100644 --- a/testgen/ui/views/test_suites.py +++ b/testgen/ui/views/test_suites.py @@ -184,14 +184,13 @@ def generate_tests_dialog(selected_test_suite): status_container = st.empty() test_ct, unlocked_test_ct, unlocked_edits_ct = test_suite_service.get_test_suite_refresh_warning( - selected_test_suite["table_groups_id"], selected_test_suite["test_suite"] + selected_test_suite["table_groups_id"], selected_test_suite["id"] ) if test_ct: warning_msg = "" counts_msg = f"\n\nAuto-Generated Tests: {test_ct}, Unlocked: {unlocked_test_ct}, Edited Unlocked: {unlocked_edits_ct}" if unlocked_edits_ct > 0: if unlocked_edits_ct > 1: - warning_msg = "Manual changes have been made to auto-generated tests in this Test Suite that have not been locked. " else: warning_msg = "A manual change has been made to an auto-generated test in this Test Suite that has not been locked. " @@ -203,7 +202,7 @@ def generate_tests_dialog(selected_test_suite): if unlocked_edits_ct > 0: lock_edits_button = st.button("Lock Edited Tests") if lock_edits_button: - edits_locked = test_suite_service.lock_edited_tests(selected_test_suite["test_suite"]) + edits_locked = test_suite_service.lock_edited_tests(selected_test_suite["id"]) if edits_locked: st.info("Edited tests have been successfully locked.") From 665f8ef2e3de815b8661ac5bfc368d2307babbd9 Mon Sep 17 00:00:00 2001 From: Ricardo Boni Date: Thu, 22 Aug 2024 09:53:47 -0400 Subject: [PATCH 24/78] refactor(test_results): Changing the FK to test_suite to be the ID --- .../test_parameter_validation_query.py | 4 +- .../commands/run_observability_exporter.py | 41 ++-- .../commands/run_test_parameter_validation.py | 2 +- .../030_initialize_new_schema_structure.sql | 98 ++++---- .../dbsetup/060_create_standard_views.sql | 38 +--- .../dbupgrade/0109_incremental_upgrade.sql | 214 ++++++++++++++++++ .../ex_cat_build_agg_table_tests.sql | 15 +- .../exec_cat_tests/ex_cat_results_parse.sql | 14 +- .../ex_cat_retrieve_agg_test_parms.sql | 10 +- .../exec_cat_tests/ex_cat_test_query.sql | 1 - .../ex_finalize_test_run_results.sql | 10 +- .../ex_write_test_record_to_testrun_table.sql | 5 +- .../ex_aggregate_match_no_drops_generic.sql | 4 +- .../ex_aggregate_match_num_incr_generic.sql | 4 +- ..._aggregate_match_percent_above_generic.sql | 5 +- ...aggregate_match_percent_within_generic.sql | 5 +- .../ex_aggregate_match_same_generic.sql | 5 +- .../ex_custom_query_generic.sql | 5 +- .../ex_data_match_2way_generic.sql | 5 +- .../ex_data_match_generic.sql | 5 +- .../ex_prior_match_generic.sql | 4 +- .../ex_relative_entropy_generic.sql | 5 +- .../ex_window_match_no_drops_generic.sql | 5 +- .../ex_window_match_same_generic.sql | 5 +- .../ex_relative_entropy_mssql.sql | 5 +- .../ex_window_match_no_drops_postgresql.sql | 5 +- .../ex_window_match_same_postgresql.sql | 5 +- .../get_test_results_for_run_cli.sql | 3 +- .../get_entities/get_test_run_list.sql | 38 ++-- .../get_entities/get_test_suite_list.sql | 29 +-- .../template/observability/get_event_data.sql | 3 +- .../observability/get_test_results.sql | 3 +- ...test_results_exported_to_observability.sql | 3 +- .../ex_write_test_val_errors.sql | 24 +- testgen/ui/queries/table_group_queries.py | 16 +- testgen/ui/queries/test_definition_queries.py | 13 +- testgen/ui/queries/test_run_queries.py | 28 ++- testgen/ui/queries/test_suite_queries.py | 41 ++-- testgen/ui/services/query_service.py | 64 ++---- testgen/ui/services/table_group_service.py | 37 +-- .../ui/services/test_definition_service.py | 6 +- testgen/ui/services/test_run_service.py | 4 +- testgen/ui/services/test_suite_service.py | 27 +-- testgen/ui/views/test_results.py | 26 ++- testgen/ui/views/test_runs.py | 19 +- testgen/ui/views/test_suites.py | 16 +- 46 files changed, 517 insertions(+), 407 deletions(-) create mode 100644 testgen/template/dbupgrade/0109_incremental_upgrade.sql diff --git a/testgen/commands/queries/test_parameter_validation_query.py b/testgen/commands/queries/test_parameter_validation_query.py index 6566bb9..be0c9bc 100644 --- a/testgen/commands/queries/test_parameter_validation_query.py +++ b/testgen/commands/queries/test_parameter_validation_query.py @@ -18,14 +18,12 @@ class CTestParamValidationSQL: # Test Set Parameters dctTestParms: typing.ClassVar = {} - def __init__(self, strProjectCode, strFlavor, strTestSuiteId): - self.project_code = strProjectCode + def __init__(self, strFlavor, strTestSuiteId): self.flavor = strFlavor self.test_suite_id = strTestSuiteId self.today = date_service.get_now_as_string() def _ReplaceParms(self, strInputString): - strInputString = strInputString.replace("{PROJECT_CODE}", self.project_code) strInputString = strInputString.replace("{TEST_SUITE_ID}", self.test_suite_id) strInputString = strInputString.replace("{RUN_DATE}", self.run_date) strInputString = strInputString.replace("{TEST_RUN_ID}", self.test_run_id) diff --git a/testgen/commands/run_observability_exporter.py b/testgen/commands/run_observability_exporter.py index ff6cb88..1efcc1e 100644 --- a/testgen/commands/run_observability_exporter.py +++ b/testgen/commands/run_observability_exporter.py @@ -11,7 +11,11 @@ from testgen import settings from testgen.common import date_service, read_template_sql_file -from testgen.common.database.database_service import ExecuteDBQuery, RetrieveDBResultsToDictList +from testgen.common.database.database_service import ( + ExecuteDBQuery, + RetrieveDBResultsToDictList, + RetrieveDBResultsToList, +) LOG = logging.getLogger("testgen") @@ -71,23 +75,22 @@ def _get_api_endpoint(api_url: str | None, event_type: str) -> str: return f"{parsed_url.scheme!s}://{parsed_url.netloc!s}{parsed_url.path!s}/events/v1/{event_type}" -def collect_event_data(project_code, test_suite): +def collect_event_data(test_suite_id): try: event_data_query = ( read_template_sql_file("get_event_data.sql", "observability") - .replace("{PROJECT_CODE}", project_code) - .replace("{TEST_SUITE}", test_suite) + .replace("{TEST_SUITE_ID}", test_suite_id) ) event_data_query_result = RetrieveDBResultsToDictList("DKTG", event_data_query) if not event_data_query_result: LOG.error( - f"Could not get event data for exporting to Observability. Test suite '{test_suite}' - project_code '{project_code}'. EXITING!" + f"Could not get event data for exporting to Observability. Test suite '{test_suite_id}'. EXITING!" ) sys.exit(1) if len(event_data_query_result) == 0: LOG.error( - f"Event data query is empty. Test suite '{test_suite}' - project_code '{project_code}'. Exiting export to Observability!" + f"Event data query is empty. Test suite '{test_suite_id}'. Exiting export to Observability!" ) sys.exit(1) @@ -97,7 +100,7 @@ def collect_event_data(project_code, test_suite): api_url = event.observability_api_url except Exception: LOG.exception( - f"Error collecting event data for exporting to Observability. Test suite '{test_suite}' - project_code '{project_code}'" + f"Error collecting event data for exporting to Observability. Test suite '{test_suite_id}'" ) sys.exit(2) else: @@ -203,12 +206,11 @@ def _get_processed_profiling_table_set(profiling_table_set): return items_remove_blank -def collect_test_results(project_code, test_suite, max_qty_events): +def collect_test_results(test_suite_id, max_qty_events): try: query = ( read_template_sql_file("get_test_results.sql", "observability") - .replace("{PROJECT_CODE}", project_code) - .replace("{TEST_SUITE}", test_suite) + .replace("{TEST_SUITE_ID}", test_suite_id) .replace("{MAX_QTY_EVENTS}", str(max_qty_events)) ) query_results = RetrieveDBResultsToDictList("DKTG", query) @@ -283,15 +285,14 @@ def _get_input_parameters(input_parameters): return ret -def mark_exported_results(project_code, test_suite, ids): +def mark_exported_results(test_suite_id, ids): if len(ids) == 0: return result_ids = ", ".join(ids) query = ( read_template_sql_file("update_test_results_exported_to_observability.sql", "observability") - .replace("{PROJECT_CODE}", project_code) - .replace("{TEST_SUITE}", test_suite) + .replace("{TEST_SUITE_ID}", test_suite_id) .replace("{RESULT_IDS}", result_ids) ) try: @@ -304,23 +305,27 @@ def mark_exported_results(project_code, test_suite, ids): sys.exit(3) -def export_test_results(project_code, test_suite): +def export_test_results(test_suite_id): LOG.info("Observability Export V2 - Privileged UI") - event, api_url, api_key = collect_event_data(project_code, test_suite) + event, api_url, api_key = collect_event_data(test_suite_id) max_qty_events = settings.OBSERVABILITY_EXPORT_LIMIT qty_of_exported_events = 0 while True: click.echo(f"Observability Export Increment - {qty_of_exported_events} exported events so far") - test_outcomes, updated_ids = collect_test_results(project_code, test_suite, max_qty_events) + test_outcomes, updated_ids = collect_test_results(test_suite_id, max_qty_events) if len(test_outcomes) == 0: return qty_of_exported_events qty_of_exported_events += post_event("test-outcomes", event, api_url, api_key, test_outcomes) - mark_exported_results(project_code, test_suite, updated_ids) + mark_exported_results(test_suite_id, updated_ids) def run_observability_exporter(project_code, test_suite): LOG.info("CurrentStep: Observability Export - Test Results") - qty_of_exported_events = export_test_results(project_code, test_suite) + result = RetrieveDBResultsToList( + "DKTG", + f"SELECT id::VARCHAR FROM test_suites WHERE test_suite = '{test_suite}' AND project_code = '{project_code}'" + ) + qty_of_exported_events = export_test_results(result[0][0][0]) click.echo(f"{qty_of_exported_events} events have been exported.") diff --git a/testgen/commands/run_test_parameter_validation.py b/testgen/commands/run_test_parameter_validation.py index d5c5eb3..21f9102 100644 --- a/testgen/commands/run_test_parameter_validation.py +++ b/testgen/commands/run_test_parameter_validation.py @@ -35,7 +35,7 @@ def run_parameter_validation_queries( LOG.debug("Validating parameters for Test Suite %s") LOG.info("CurrentStep: Initializing Test Parameter Validation") - clsExecute = CTestParamValidationSQL(strProjectCode, dctParms["sql_flavor"], dctParms["test_suite_id"]) + clsExecute = CTestParamValidationSQL(dctParms["sql_flavor"], dctParms["test_suite_id"]) clsExecute.run_date = test_time clsExecute.test_run_id = test_run_id LOG.info("CurrentStep: Validation Class successfully initialized") diff --git a/testgen/template/dbsetup/030_initialize_new_schema_structure.sql b/testgen/template/dbsetup/030_initialize_new_schema_structure.sql index 29ffa2f..db24128 100644 --- a/testgen/template/dbsetup/030_initialize_new_schema_structure.sql +++ b/testgen/template/dbsetup/030_initialize_new_schema_structure.sql @@ -387,42 +387,6 @@ CREATE TABLE data_column_chars ( warnings_30_days_prior INTEGER ); -CREATE TABLE working_agg_cat_tests ( - project_code VARCHAR(30), - test_run_id VARCHAR(100) NOT NULL, - test_suite VARCHAR(200), - schema_name VARCHAR(200) NOT NULL, - table_name VARCHAR(200) NOT NULL, - cat_sequence INTEGER NOT NULL, - test_count INTEGER, - test_time TIMESTAMP, - start_time TIMESTAMP, - end_time TIMESTAMP, - column_names TEXT, - test_types TEXT, - test_definition_ids TEXT, - test_actions TEXT, - test_descriptions TEXT, - test_parms TEXT, - test_measures TEXT, - test_conditions TEXT, - CONSTRAINT working_agg_cat_tests_trid_sn_tn_cs - PRIMARY KEY (test_run_id, schema_name, table_name, cat_sequence) -); - -CREATE TABLE working_agg_cat_results ( - project_code VARCHAR(30), - test_run_id VARCHAR(100) NOT NULL, - test_suite VARCHAR(200), - schema_name VARCHAR(200) NOT NULL, - table_name VARCHAR(200) NOT NULL, - cat_sequence INTEGER NOT NULL, - measure_results TEXT, - test_results TEXT, - CONSTRAINT working_agg_cat_results_tri_sn_tn_cs - PRIMARY KEY (test_run_id, schema_name, table_name, cat_sequence) -); - CREATE TABLE test_types ( id VARCHAR, test_type VARCHAR(200) NOT NULL @@ -473,8 +437,7 @@ CREATE TABLE test_runs ( id UUID NOT NULL CONSTRAINT test_runs_id_pk PRIMARY KEY, - project_code VARCHAR(30), - test_suite VARCHAR(200), + test_suite_id UUID NOT NULL, test_starttime TIMESTAMP, test_endtime TIMESTAMP, status VARCHAR(100) DEFAULT 'Running', @@ -489,18 +452,18 @@ CREATE TABLE test_runs ( column_ct INTEGER, column_failed_ct INTEGER, column_warning_ct INTEGER, - process_id INTEGER + process_id INTEGER, + CONSTRAINT test_runs_test_suites_fk + FOREIGN KEY (test_suite_id) REFERENCES test_suites ); CREATE TABLE test_results ( id UUID DEFAULT gen_random_uuid(), result_id BIGINT GENERATED ALWAYS AS IDENTITY, - project_code VARCHAR(30), test_type VARCHAR(50) CONSTRAINT test_results_test_types_test_type_fk REFERENCES test_types, - test_suite_id UUID, - test_suite VARCHAR(200), + test_suite_id UUID NOT NULL, test_definition_id UUID, auto_gen BOOLEAN, test_time TIMESTAMP, @@ -523,13 +486,48 @@ CREATE TABLE test_results ( subset_condition VARCHAR(500), result_query VARCHAR(4000), test_description VARCHAR(1000), - test_run_id UUID, + test_run_id UUID NOT NULL, table_groups_id UUID, observability_status VARCHAR(10), CONSTRAINT test_results_test_suites_project_code_test_suite_fk FOREIGN KEY (test_suite_id) REFERENCES test_suites ); +CREATE TABLE working_agg_cat_tests ( + test_run_id UUID NOT NULL, + schema_name VARCHAR(200) NOT NULL, + table_name VARCHAR(200) NOT NULL, + cat_sequence INTEGER NOT NULL, + test_count INTEGER, + test_time TIMESTAMP, + start_time TIMESTAMP, + end_time TIMESTAMP, + column_names TEXT, + test_types TEXT, + test_definition_ids TEXT, + test_actions TEXT, + test_descriptions TEXT, + test_parms TEXT, + test_measures TEXT, + test_conditions TEXT, + CONSTRAINT working_agg_cat_tests_trid_sn_tn_cs + PRIMARY KEY (test_run_id, schema_name, table_name, cat_sequence), + CONSTRAINT working_agg_cat_tests_test_runs_fk + FOREIGN KEY (test_run_id) REFERENCES test_runs +); + +CREATE TABLE working_agg_cat_results ( + test_run_id UUID NOT NULL, + schema_name VARCHAR(200) NOT NULL, + table_name VARCHAR(200) NOT NULL, + cat_sequence INTEGER NOT NULL, + measure_results TEXT, + test_results TEXT, + CONSTRAINT working_agg_cat_results_tri_sn_tn_cs + PRIMARY KEY (test_run_id, schema_name, table_name, cat_sequence), + CONSTRAINT working_agg_cat_results_test_runs_fk + FOREIGN KEY (test_run_id) REFERENCES test_runs +); CREATE TABLE cat_test_conditions ( id VARCHAR, @@ -594,11 +592,6 @@ CREATE TABLE tg_revision ( revision INTEGER ); --- Index working table - ORIGINAL -CREATE INDEX working_agg_cat_tests_test_run_id_index - ON working_agg_cat_tests(test_run_id); - - -- Index Connections CREATE UNIQUE INDEX uix_con_id ON connections(id); @@ -638,7 +631,7 @@ CREATE INDEX ix_td_ts_tc -- Index test_runs CREATE INDEX ix_trun_pc_ts_time - ON test_runs(project_code, test_suite, test_starttime); + ON test_runs(test_suite_id, test_starttime); CREATE INDEX ix_trun_time ON test_runs USING BRIN (test_starttime); @@ -647,9 +640,6 @@ CREATE INDEX ix_trun_time CREATE UNIQUE INDEX uix_tr_id ON test_results(id); -CREATE INDEX ix_tr_pc_ts - ON test_results(project_code, test_suite); - CREATE INDEX ix_tr_trun ON test_results(test_run_id); @@ -657,7 +647,7 @@ CREATE INDEX ix_tr_tt ON test_results(test_type); CREATE INDEX ix_tr_pc_sctc_tt - ON test_results(project_code, test_suite, schema_name, table_name, column_names, test_type); + ON test_results(test_suite_id, schema_name, table_name, column_names, test_type); CREATE INDEX ix_tr_ts_tctt ON test_results(test_suite_id, table_name, column_names, test_type); @@ -705,7 +695,7 @@ CREATE INDEX ix_ares_anid -- Conditional index for Observability Export - ORIGINAL CREATE INDEX cix_tr_pc_ts - ON test_results(project_code, test_suite) WHERE observability_status = 'Queued'; + ON test_results(test_suite_id) WHERE observability_status = 'Queued'; INSERT INTO tg_revision (component, revision) diff --git a/testgen/template/dbsetup/060_create_standard_views.sql b/testgen/template/dbsetup/060_create_standard_views.sql index 788e278..f0b451b 100644 --- a/testgen/template/dbsetup/060_create_standard_views.sql +++ b/testgen/template/dbsetup/060_create_standard_views.sql @@ -80,30 +80,6 @@ GROUP BY r.id, r.project_code, cc.connection_name, r.connection_id, r.profiling_starttime, r.profiling_endtime, r.status; -DROP VIEW IF EXISTS v_test_runs; - -CREATE VIEW v_test_runs - AS -SELECT r.id as test_run_id, - r.project_code, p.project_name, - r.test_suite, - r.test_starttime, - TO_CHAR(r.test_endtime - r.test_starttime, 'HH24:MI:SS') as duration, - r.status, r.log_message, - COUNT(*) as test_ct, - SUM(result_code) as passed_ct, - COALESCE(SUM(CASE WHEN tr.result_status = 'Failed' THEN 1 END), 0) as failed_ct, - COALESCE(SUM(CASE WHEN tr.result_status = 'Warning' THEN 1 END), 0) as warning_ct, - r.process_id - FROM test_runs r -INNER JOIN projects p - ON (r.project_code = p.project_code) -INNER JOIN test_results tr - ON (r.id = tr.test_run_id) -GROUP BY r.id, r.project_code, r.test_suite, r.test_starttime, r.test_endtime, - r.process_id, r.status, r.log_message, p.project_name; - - DROP VIEW IF EXISTS v_test_results; CREATE VIEW v_test_results @@ -142,7 +118,7 @@ SELECT p.project_name, CASE WHEN result_message ILIKE 'ERROR - TEST COLUMN MISSING%' THEN 1 END as execution_error_ct, - r.project_code, + p.project_code, r.table_groups_id, r.id as test_result_id, c.id as connection_id, r.test_suite_id, @@ -166,7 +142,7 @@ LEFT JOIN test_definitions d INNER JOIN test_suites ts ON (r.test_suite_id = ts.id) INNER JOIN projects p - ON (r.project_code = p.project_code) + ON (ts.project_code = p.project_code) INNER JOIN table_groups tg ON (r.table_groups_id = tg.id) INNER JOIN connections cn @@ -192,7 +168,7 @@ SELECT END as sample_min_count, tg.id as group_id, tg.profile_use_sampling = 'Y' as uses_sampling, - r.project_code, + ts.project_code, CASE WHEN tg.profile_use_sampling = 'Y' THEN tg.profile_sample_percent END as sample_percentage, @@ -207,7 +183,8 @@ SELECT r.column_names, r.table_name, - r.test_suite, + ts.test_suite, + ts.id AS test_suite_id, r.input_parameters, r.test_definition_id, tt.test_name_short as type, @@ -234,14 +211,13 @@ INNER JOIN test_types tt INNER JOIN test_definitions d ON (r.test_definition_id = d.id) INNER JOIN test_suites ts - ON (r.project_code = ts.project_code - AND r.test_suite = ts.test_suite) + ON r.test_suite_id = ts.id INNER JOIN table_groups tg ON (d.table_groups_id = tg.id) INNER JOIN connections cn ON (tg.connection_id = cn.connection_id) INNER JOIN projects p - ON (r.project_code = p.project_code) + ON (ts.project_code = p.project_code) INNER JOIN cat_test_conditions c ON (cn.sql_flavor = c.sql_flavor AND d.test_type = c.test_type) diff --git a/testgen/template/dbupgrade/0109_incremental_upgrade.sql b/testgen/template/dbupgrade/0109_incremental_upgrade.sql new file mode 100644 index 0000000..1a74941 --- /dev/null +++ b/testgen/template/dbupgrade/0109_incremental_upgrade.sql @@ -0,0 +1,214 @@ +SET SEARCH_PATH TO {SCHEMA_NAME}; + +-- Step 1: Drop everything that depends on the current state + +DROP VIEW v_test_runs; -- Not needed, unused +DROP VIEW v_test_results; +DROP VIEW v_queued_observability_results; +DROP INDEX cix_tr_pc_ts; +DROP INDEX ix_tr_pc_ts; -- Not needed, replaced by a FK +DROP INDEX ix_tr_pc_sctc_tt; +DROP INDEX ix_trun_pc_ts_time; +DROP INDEX working_agg_cat_tests_test_run_id_index; -- Not needed, given the column is a FK + +-- Step 2: Adjust the tables + +ALTER TABLE test_runs ADD COLUMN test_suite_id UUID; + + UPDATE test_runs + SET test_suite_id = ts.id + FROM test_runs tr +INNER JOIN test_suites AS ts ON tr.test_suite = ts.test_suite AND tr.project_code = ts.project_code; + +ALTER TABLE test_runs ALTER COLUMN test_suite_id SET NOT NULL; + + + UPDATE test_results + SET test_suite_id = ts.id + FROM test_results tr +INNER JOIN test_suites AS ts ON tr.test_suite = ts.test_suite AND tr.project_code = ts.project_code + WHERE tr.test_suite_id is NULL; + +ALTER TABLE test_results ALTER COLUMN test_suite_id SET NOT NULL; +ALTER TABLE test_results ALTER COLUMN test_run_id SET NOT NULL; + + +ALTER TABLE working_agg_cat_tests RENAME COLUMN test_run_id TO varchar_test_run_id; +ALTER TABLE working_agg_cat_tests ADD COLUMN test_run_id UUID; +UPDATE working_agg_cat_tests SET test_run_id = varchar_test_run_id::UUID; +ALTER TABLE working_agg_cat_tests ALTER COLUMN test_run_id SET NOT NULL; +ALTER TABLE working_agg_cat_tests DROP COLUMN varchar_test_run_id; + + +ALTER TABLE working_agg_cat_results RENAME COLUMN test_run_id TO varchar_test_run_id; +ALTER TABLE working_agg_cat_results ADD COLUMN test_run_id UUID; +UPDATE working_agg_cat_results SET test_run_id = varchar_test_run_id::UUID; +ALTER TABLE working_agg_cat_results ALTER COLUMN test_run_id SET NOT NULL; +ALTER TABLE working_agg_cat_results DROP COLUMN varchar_test_run_id; + +-- Step 3: Clean up + +ALTER TABLE test_runs +DROP COLUMN test_suite, +DROP COLUMN project_code; + +ALTER TABLE test_results +DROP COLUMN test_suite, +DROP COLUMN project_code; + +ALTER TABLE working_agg_cat_tests +DROP COLUMN project_code, +DROP COLUMN test_suite; + +ALTER TABLE working_agg_cat_results +DROP COLUMN project_code, +DROP COLUMN test_suite; + +-- Step 4: Re-create views and indexes + +CREATE INDEX ix_tr_pc_sctc_tt + ON test_results(test_suite_id, schema_name, table_name, column_names, test_type); + +CREATE INDEX cix_tr_pc_ts + ON test_results(test_suite_id) WHERE observability_status = 'Queued'; + +CREATE INDEX ix_trun_pc_ts_time + ON test_runs(test_suite_id, test_starttime); + +CREATE VIEW v_test_results +AS +SELECT p.project_name, + ts.test_suite, + tg.table_groups_name, + cn.connection_name, cn.project_host, cn.sql_flavor, + tt.dq_dimension, + r.schema_name, r.table_name, r.column_names, + r.test_time as test_date, + r.test_type, tt.id as test_type_id, tt.test_name_short, tt.test_name_long, + r.test_description, + tt.measure_uom, tt.measure_uom_description, + c.test_operator, + r.threshold_value::NUMERIC(16, 5) as threshold_value, + r.result_measure::NUMERIC(16, 5), + r.result_status, + r.input_parameters, + r.result_message, + CASE WHEN result_code <> 1 THEN r.severity END as severity, + CASE + WHEN result_code <> 1 THEN r.disposition + ELSE 'Passed' + END as disposition, + r.result_code as passed_ct, + (1 - r.result_code)::INTEGER as exception_ct, + CASE + WHEN result_status = 'Warning' + AND result_message NOT ILIKE 'ERROR - TEST COLUMN MISSING%' THEN 1 + END::INTEGER as warning_ct, + CASE + WHEN result_status = 'Failed' + AND result_message NOT ILIKE 'ERROR - TEST COLUMN MISSING%' THEN 1 + END::INTEGER as failed_ct, + CASE + WHEN result_message ILIKE 'ERROR - TEST COLUMN MISSING%' THEN 1 + END as execution_error_ct, + p.project_code, + r.table_groups_id, + r.id as test_result_id, c.id as connection_id, + r.test_suite_id, + r.test_definition_id as test_definition_id_runtime, + CASE + WHEN r.auto_gen = TRUE THEN d.id + ELSE r.test_definition_id + END as test_definition_id_current, + r.test_run_id as test_run_id, + r.auto_gen + FROM test_results r +INNER JOIN test_types tt + ON (r.test_type = tt.test_type) +LEFT JOIN test_definitions d + ON (r.test_suite_id = d.test_suite_id + AND r.table_name = d.table_name + AND r.column_names = COALESCE(d.column_name, 'N/A') + AND r.test_type = d.test_type + AND r.auto_gen = TRUE + AND d.last_auto_gen_date IS NOT NULL) +INNER JOIN test_suites ts + ON (r.test_suite_id = ts.id) +INNER JOIN projects p + ON (ts.project_code = p.project_code) +INNER JOIN table_groups tg + ON (r.table_groups_id = tg.id) +INNER JOIN connections cn + ON (tg.connection_id = cn.connection_id) +LEFT JOIN cat_test_conditions c + ON (cn.sql_flavor = c.sql_flavor + AND r.test_type = c.test_type); + +CREATE VIEW v_queued_observability_results + AS +SELECT + p.project_name, + cn.sql_flavor as component_tool, + ts.test_suite_schema as schema, + cn.connection_name, + cn.project_db, + + CASE + WHEN tg.profile_use_sampling = 'Y' THEN tg.profile_sample_min_count + END as sample_min_count, + tg.id as group_id, + tg.profile_use_sampling = 'Y' as uses_sampling, + ts.project_code, + CASE + WHEN tg.profile_use_sampling = 'Y' THEN tg.profile_sample_percent + END as sample_percentage, + + tg.profiling_table_set, + tg.profiling_include_mask, + tg.profiling_exclude_mask, + + COALESCE(ts.component_type, 'dataset') as component_type, + COALESCE(ts.component_key, tg.id::VARCHAR) as component_key, + COALESCE(ts.component_name, tg.table_groups_name) as component_name, + + r.column_names, + r.table_name, + ts.test_suite, + ts.id AS test_suite_id, + r.input_parameters, + r.test_definition_id, + tt.test_name_short as type, + CASE + WHEN c.test_operator IN ('>', '>=') THEN d.threshold_value + END as min_threshold, + CASE + WHEN c.test_operator IN ('<', '<=') THEN d.threshold_value + END as max_threshold, + tt.test_name_long as name, + tt.test_description as description, + r.test_time as start_time, + r.test_time as end_time, + r.result_message as result_message, + tt.dq_dimension, + r.result_status, + r.result_id, + r.result_measure as metric_value, + tt.measure_uom, + tt.measure_uom_description + FROM test_results r +INNER JOIN test_types tt + ON (r.test_type = tt.test_type) +INNER JOIN test_definitions d + ON (r.test_definition_id = d.id) +INNER JOIN test_suites ts + ON r.test_suite_id = ts.id +INNER JOIN table_groups tg + ON (d.table_groups_id = tg.id) +INNER JOIN connections cn + ON (tg.connection_id = cn.connection_id) +INNER JOIN projects p + ON (ts.project_code = p.project_code) +INNER JOIN cat_test_conditions c + ON (cn.sql_flavor = c.sql_flavor + AND d.test_type = c.test_type) +WHERE r.observability_status = 'Queued'; diff --git a/testgen/template/exec_cat_tests/ex_cat_build_agg_table_tests.sql b/testgen/template/exec_cat_tests/ex_cat_build_agg_table_tests.sql index 7e0413f..bee3588 100644 --- a/testgen/template/exec_cat_tests/ex_cat_build_agg_table_tests.sql +++ b/testgen/template/exec_cat_tests/ex_cat_build_agg_table_tests.sql @@ -1,14 +1,14 @@ -- Create one record per CAT query: all test sets against one table, split over max chars INSERT INTO working_agg_cat_tests - (project_code, test_run_id, - test_suite, schema_name, table_name, cat_sequence, test_count, test_time, + (test_run_id, + schema_name, table_name, cat_sequence, test_count, test_time, column_names, test_types, test_definition_ids, test_actions, test_descriptions, test_parms, test_measures, test_conditions) -- Test details from each test type WITH test_detail AS ( - SELECT '{TEST_SUITE}' as test_suite, + SELECT t.test_suite_id, '{SCHEMA_NAME}' as schema_name, '{TABLE_NAME}' as table_name, '{RUN_DATE}'::TIMESTAMP as test_time, t.column_name, t.test_type, t.id::VARCHAR as test_definition_id, @@ -73,7 +73,7 @@ WITH test_detail AND COALESCE(t.test_active, 'Y') = 'Y' ), test_detail_split - AS ( SELECT test_suite, schema_name, table_name, test_time, + AS ( SELECT test_suite_id, schema_name, table_name, test_time, column_name, test_type, test_definition_id, test_action, test_description, parms, measure, condition, SUM(LENGTH(condition)) OVER (PARTITION BY t.schema_name, t.table_name @@ -82,10 +82,7 @@ test_detail_split ORDER BY t.column_name ROWS UNBOUNDED PRECEDING ) / {MAX_QUERY_CHARS} ) + 1 as query_split_no FROM test_detail t ) -SELECT - '{PROJECT_CODE}' as project_code, - '{TEST_RUN_ID}' as test_run_id, - d.test_suite, +SELECT '{TEST_RUN_ID}' as test_run_id, d.schema_name, d.table_name, d.query_split_no as cat_sequence, COUNT(*) as test_count, @@ -112,4 +109,4 @@ SELECT '++' ORDER BY d.column_name) as conditions FROM test_detail_split d -GROUP BY d.test_suite, d.schema_name, d.table_name, test_time, d.query_split_no; +GROUP BY d.test_suite_id, d.schema_name, d.table_name, test_time, d.query_split_no; diff --git a/testgen/template/exec_cat_tests/ex_cat_results_parse.sql b/testgen/template/exec_cat_tests/ex_cat_results_parse.sql index 160737b..f499fff 100644 --- a/testgen/template/exec_cat_tests/ex_cat_results_parse.sql +++ b/testgen/template/exec_cat_tests/ex_cat_results_parse.sql @@ -11,7 +11,7 @@ WITH seq_digit AS ( seq_table AS ( SELECT nbr FROM seq_table_raw WHERE nbr > 0), raw_results AS ( - SELECT t.project_code, t.test_run_id, t.test_suite, t.schema_name, t.table_name, t.cat_sequence, t.test_count, + SELECT t.test_run_id, t.schema_name, t.table_name, t.cat_sequence, t.test_count, t.test_time, t.start_time, t.end_time, t.column_names, t.test_types, t.test_definition_ids, t.test_actions, t.test_descriptions, t.test_parms, t.test_measures, t.test_conditions, @@ -26,8 +26,7 @@ WITH seq_digit AS ( AND t.column_names > '' ), parsed_results AS ( - SELECT t.test_suite, - t.schema_name, + SELECT t.schema_name, t.table_name, t.test_time, t.start_time, @@ -48,13 +47,12 @@ WITH seq_digit AS ( CROSS JOIN seq_table s ) INSERT INTO test_results - (project_code, test_run_id, - test_type, test_definition_id, - test_suite, test_time, starttime, endtime, schema_name, table_name, column_names, + (test_run_id, test_type, test_definition_id, test_suite_id, + test_time, starttime, endtime, schema_name, table_name, column_names, skip_errors, input_parameters, result_code, result_measure, test_action, subset_condition, result_query, test_description) -SELECT '{PROJECT_CODE}' as project_code, '{TEST_RUN_ID}' as test_run_id, - r.test_type, r.test_definition_id::UUID, r.test_suite, r.test_time, r.start_time, r.end_time, +SELECT '{TEST_RUN_ID}' as test_run_id, + r.test_type, r.test_definition_id::UUID, '{TEST_SUITE_ID}'::UUID, r.test_time, r.start_time, r.end_time, r.schema_name, r.table_name, r.column_name, 0 as skip_errors, r.test_parms as input_parameters, diff --git a/testgen/template/exec_cat_tests/ex_cat_retrieve_agg_test_parms.sql b/testgen/template/exec_cat_tests/ex_cat_retrieve_agg_test_parms.sql index 1fd113b..615f5c9 100644 --- a/testgen/template/exec_cat_tests/ex_cat_retrieve_agg_test_parms.sql +++ b/testgen/template/exec_cat_tests/ex_cat_retrieve_agg_test_parms.sql @@ -1,6 +1,8 @@ -SELECT test_suite, schema_name, table_name, cat_sequence, +SELECT schema_name, + table_name, + cat_sequence, -- Replace list delimiters with concat operator - REPLACE(test_measures, '++', '{CONCAT_OPERATOR}') as test_measures, - REPLACE(test_conditions, '++', '{CONCAT_OPERATOR}') as test_conditions + REPLACE(test_measures, '++', '{CONCAT_OPERATOR}') as test_measures, + REPLACE(test_conditions, '++', '{CONCAT_OPERATOR}') as test_conditions FROM working_agg_cat_tests - WHERE test_run_id = '{TEST_RUN_ID}'; \ No newline at end of file + WHERE test_run_id = '{TEST_RUN_ID}'; diff --git a/testgen/template/exec_cat_tests/ex_cat_test_query.sql b/testgen/template/exec_cat_tests/ex_cat_test_query.sql index b575f4a..c544be2 100644 --- a/testgen/template/exec_cat_tests/ex_cat_test_query.sql +++ b/testgen/template/exec_cat_tests/ex_cat_test_query.sql @@ -1,5 +1,4 @@ SELECT '{TEST_RUN_ID}' as test_run_id, - '{TEST_SUITE}' as test_suite, '{SCHEMA_NAME}' as schema_name, '{TABLE_NAME}' as table_name, '{CAT_SEQUENCE}' as cat_sequence, diff --git a/testgen/template/execution/ex_finalize_test_run_results.sql b/testgen/template/execution/ex_finalize_test_run_results.sql index e1b3c8e..e4d1d6e 100644 --- a/testgen/template/execution/ex_finalize_test_run_results.sql +++ b/testgen/template/execution/ex_finalize_test_run_results.sql @@ -26,12 +26,8 @@ UPDATE test_results test_suite_id = s.id, auto_gen = d.last_auto_gen_date IS NOT NULL FROM test_results r -INNER JOIN test_suites s - ON (r.project_code = s.project_code - AND r.test_suite = s.test_suite) -INNER JOIN test_definitions d - ON (r.test_definition_id = d.id) -INNER JOIN test_types tt - ON (r.test_type = tt.test_type) +INNER JOIN test_suites s ON r.test_suite_id = s.id +INNER JOIN test_definitions d ON r.test_definition_id = d.id +INNER JOIN test_types tt ON r.test_type = tt.test_type WHERE r.test_run_id = '{TEST_RUN_ID}' AND test_results.id = r.id; diff --git a/testgen/template/execution/ex_write_test_record_to_testrun_table.sql b/testgen/template/execution/ex_write_test_record_to_testrun_table.sql index 7505bd0..07be146 100644 --- a/testgen/template/execution/ex_write_test_record_to_testrun_table.sql +++ b/testgen/template/execution/ex_write_test_record_to_testrun_table.sql @@ -1,6 +1,5 @@ -INSERT INTO test_runs (id, project_code, test_suite, test_starttime, process_id) +INSERT INTO test_runs (id, test_suite_id, test_starttime, process_id) (SELECT '{TEST_RUN_ID}' :: UUID as id, - '{PROJECT_CODE}' as project_code, - '{TEST_SUITE}' as test_suite, + '{TEST_SUITE_ID}' as test_suite_id, '{RUN_DATE}' as test_starttime, '{PROCESS_ID}'as process_id); diff --git a/testgen/template/flavors/generic/exec_query_tests/ex_aggregate_match_no_drops_generic.sql b/testgen/template/flavors/generic/exec_query_tests/ex_aggregate_match_no_drops_generic.sql index 0cfe53a..7a992b4 100644 --- a/testgen/template/flavors/generic/exec_query_tests/ex_aggregate_match_no_drops_generic.sql +++ b/testgen/template/flavors/generic/exec_query_tests/ex_aggregate_match_no_drops_generic.sql @@ -1,6 +1,6 @@ -SELECT '{PROJECT_CODE}' as project_code, '{TEST_TYPE}' as test_type, +SELECT '{TEST_TYPE}' as test_type, '{TEST_DEFINITION_ID}' as test_definition_id, - '{TEST_SUITE}' as test_suite, + '{TEST_SUITE_ID}' as test_suite_id, '{TEST_RUN_ID}' as test_run_id, '{RUN_DATE}' as test_time, '{START_TIME}' as starttime, diff --git a/testgen/template/flavors/generic/exec_query_tests/ex_aggregate_match_num_incr_generic.sql b/testgen/template/flavors/generic/exec_query_tests/ex_aggregate_match_num_incr_generic.sql index fc5b356..7152b46 100644 --- a/testgen/template/flavors/generic/exec_query_tests/ex_aggregate_match_num_incr_generic.sql +++ b/testgen/template/flavors/generic/exec_query_tests/ex_aggregate_match_num_incr_generic.sql @@ -1,6 +1,6 @@ -SELECT '{PROJECT_CODE}' as project_code, '{TEST_TYPE}' as test_type, +SELECT '{TEST_TYPE}' as test_type, '{TEST_DEFINITION_ID}' as test_definition_id, - '{TEST_SUITE}' as test_suite, + '{TEST_SUITE_ID}' as test_suite_id, '{RUN_DATE}' as test_time, '{START_TIME}' as starttime, CURRENT_TIMESTAMP as endtime, '{SCHEMA_NAME}' as schema_name, '{TABLE_NAME}' as table_name, '{GROUPBY_NAMES}' as column_name, {SKIP_ERRORS} as skip_errors, diff --git a/testgen/template/flavors/generic/exec_query_tests/ex_aggregate_match_percent_above_generic.sql b/testgen/template/flavors/generic/exec_query_tests/ex_aggregate_match_percent_above_generic.sql index c36d130..755d27d 100644 --- a/testgen/template/flavors/generic/exec_query_tests/ex_aggregate_match_percent_above_generic.sql +++ b/testgen/template/flavors/generic/exec_query_tests/ex_aggregate_match_percent_above_generic.sql @@ -1,7 +1,6 @@ -SELECT '{PROJECT_CODE}' as project_code, - '{TEST_TYPE}' as test_type, +SELECT '{TEST_TYPE}' as test_type, '{TEST_DEFINITION_ID}' as test_definition_id, - '{TEST_SUITE}' as test_suite, + '{TEST_SUITE_ID}' as test_suite_id, '{TEST_RUN_ID}' as test_run_id, '{RUN_DATE}' as test_time, '{START_TIME}' as starttime, diff --git a/testgen/template/flavors/generic/exec_query_tests/ex_aggregate_match_percent_within_generic.sql b/testgen/template/flavors/generic/exec_query_tests/ex_aggregate_match_percent_within_generic.sql index a3aaf9b..fcf3070 100644 --- a/testgen/template/flavors/generic/exec_query_tests/ex_aggregate_match_percent_within_generic.sql +++ b/testgen/template/flavors/generic/exec_query_tests/ex_aggregate_match_percent_within_generic.sql @@ -1,7 +1,6 @@ -SELECT '{PROJECT_CODE}' as project_code, - '{TEST_TYPE}' as test_type, +SELECT '{TEST_TYPE}' as test_type, '{TEST_DEFINITION_ID}' as test_definition_id, - '{TEST_SUITE}' as test_suite, + '{TEST_SUITE_ID}' as test_suite_id, '{TEST_RUN_ID}' as test_run_id, '{RUN_DATE}' as test_time, '{START_TIME}' as starttime, diff --git a/testgen/template/flavors/generic/exec_query_tests/ex_aggregate_match_same_generic.sql b/testgen/template/flavors/generic/exec_query_tests/ex_aggregate_match_same_generic.sql index 09d5890..6923e22 100644 --- a/testgen/template/flavors/generic/exec_query_tests/ex_aggregate_match_same_generic.sql +++ b/testgen/template/flavors/generic/exec_query_tests/ex_aggregate_match_same_generic.sql @@ -1,7 +1,6 @@ -SELECT '{PROJECT_CODE}' as project_code, - '{TEST_TYPE}' as test_type, +SELECT '{TEST_TYPE}' as test_type, '{TEST_DEFINITION_ID}' as test_definition_id, - '{TEST_SUITE}' as test_suite, + '{TEST_SUITE_ID}' as test_suite_id, '{TEST_RUN_ID}' as test_run_id, '{RUN_DATE}' as test_time, '{START_TIME}' as starttime, diff --git a/testgen/template/flavors/generic/exec_query_tests/ex_custom_query_generic.sql b/testgen/template/flavors/generic/exec_query_tests/ex_custom_query_generic.sql index eedda15..e12d926 100644 --- a/testgen/template/flavors/generic/exec_query_tests/ex_custom_query_generic.sql +++ b/testgen/template/flavors/generic/exec_query_tests/ex_custom_query_generic.sql @@ -1,7 +1,6 @@ -SELECT '{PROJECT_CODE}' as project_code, - '{TEST_TYPE}' as test_type, +SELECT '{TEST_TYPE}' as test_type, '{TEST_DEFINITION_ID}' as test_definition_id, - '{TEST_SUITE}' as test_suite, + '{TEST_SUITE_ID}' as test_suite_id, '{TEST_RUN_ID}' as test_run_id, '{RUN_DATE}' as test_time, '{START_TIME}' as starttime, diff --git a/testgen/template/flavors/generic/exec_query_tests/ex_data_match_2way_generic.sql b/testgen/template/flavors/generic/exec_query_tests/ex_data_match_2way_generic.sql index 9022693..256db87 100644 --- a/testgen/template/flavors/generic/exec_query_tests/ex_data_match_2way_generic.sql +++ b/testgen/template/flavors/generic/exec_query_tests/ex_data_match_2way_generic.sql @@ -1,7 +1,6 @@ -SELECT '{PROJECT_CODE}' as project_code, - '{TEST_TYPE}' as test_type, +SELECT '{TEST_TYPE}' as test_type, '{TEST_DEFINITION_ID}' as test_definition_id, - '{TEST_SUITE}' as test_suite, + '{TEST_SUITE_ID}' as test_suite_id, '{TEST_RUN_ID}' as test_run_id, '{RUN_DATE}' as test_time, '{START_TIME}' as starttime, diff --git a/testgen/template/flavors/generic/exec_query_tests/ex_data_match_generic.sql b/testgen/template/flavors/generic/exec_query_tests/ex_data_match_generic.sql index 683e9d2..7c7adc0 100644 --- a/testgen/template/flavors/generic/exec_query_tests/ex_data_match_generic.sql +++ b/testgen/template/flavors/generic/exec_query_tests/ex_data_match_generic.sql @@ -1,7 +1,6 @@ -SELECT '{PROJECT_CODE}' as project_code, - '{TEST_TYPE}' as test_type, +SELECT '{TEST_TYPE}' as test_type, '{TEST_DEFINITION_ID}' as test_definition_id, - '{TEST_SUITE}' as test_suite, + '{TEST_SUITE_ID}' as test_suite_id, '{TEST_RUN_ID}' as test_run_id, '{RUN_DATE}' as test_time, '{START_TIME}' as starttime, diff --git a/testgen/template/flavors/generic/exec_query_tests/ex_prior_match_generic.sql b/testgen/template/flavors/generic/exec_query_tests/ex_prior_match_generic.sql index a649f6b..b4c5fd1 100644 --- a/testgen/template/flavors/generic/exec_query_tests/ex_prior_match_generic.sql +++ b/testgen/template/flavors/generic/exec_query_tests/ex_prior_match_generic.sql @@ -1,6 +1,6 @@ -SELECT '{PROJECT_CODE}' as project_code, '{TEST_TYPE}' as test_type, +SELECT '{TEST_TYPE}' as test_type, '{TEST_DEFINITION_ID}' as test_definition_id, - '{TEST_SUITE}' as test_suite, + '{TEST_SUITE_ID}' as test_suite_id, '{RUN_DATE}' as test_time, '{START_TIME}' as starttime,CURRENT_TIMESTAMP as endtime, '{SCHEMA_NAME}' as schema_name, '{TABLE_NAME}' as table_name, '{COLUMN_NAME}' as column_names, {SKIP_ERRORS} as skip_errors, diff --git a/testgen/template/flavors/generic/exec_query_tests/ex_relative_entropy_generic.sql b/testgen/template/flavors/generic/exec_query_tests/ex_relative_entropy_generic.sql index f27b31b..b6e340f 100644 --- a/testgen/template/flavors/generic/exec_query_tests/ex_relative_entropy_generic.sql +++ b/testgen/template/flavors/generic/exec_query_tests/ex_relative_entropy_generic.sql @@ -25,10 +25,9 @@ dataset FROM latest_ver l FULL JOIN older_ver o ON (l.category = o.category) ) -SELECT '{PROJECT_CODE}' as project_code, - '{TEST_TYPE}' as test_type, +SELECT '{TEST_TYPE}' as test_type, '{TEST_DEFINITION_ID}' as test_definition_id, - '{TEST_SUITE}' as test_suite, + '{TEST_SUITE_ID}' as test_suite_id, '{TEST_RUN_ID}' as test_run_id, '{RUN_DATE}' as test_time, '{START_TIME}' as starttime, diff --git a/testgen/template/flavors/generic/exec_query_tests/ex_window_match_no_drops_generic.sql b/testgen/template/flavors/generic/exec_query_tests/ex_window_match_no_drops_generic.sql index 10dbda2..f1f07eb 100644 --- a/testgen/template/flavors/generic/exec_query_tests/ex_window_match_no_drops_generic.sql +++ b/testgen/template/flavors/generic/exec_query_tests/ex_window_match_no_drops_generic.sql @@ -1,7 +1,6 @@ -SELECT '{PROJECT_CODE}' as project_code, - '{TEST_TYPE}' as test_type, +SELECT '{TEST_TYPE}' as test_type, '{TEST_DEFINITION_ID}' as test_definition_id, - '{TEST_SUITE}' as test_suite, + '{TEST_SUITE_ID}' as test_suite_id, '{TEST_RUN_ID}' as test_run_id, '{RUN_DATE}' as test_time, '{START_TIME}' as starttime, diff --git a/testgen/template/flavors/generic/exec_query_tests/ex_window_match_same_generic.sql b/testgen/template/flavors/generic/exec_query_tests/ex_window_match_same_generic.sql index 4b0c2b8..3e60ba2 100644 --- a/testgen/template/flavors/generic/exec_query_tests/ex_window_match_same_generic.sql +++ b/testgen/template/flavors/generic/exec_query_tests/ex_window_match_same_generic.sql @@ -1,7 +1,6 @@ -SELECT '{PROJECT_CODE}' as project_code, - '{TEST_TYPE}' as test_type, +SELECT '{TEST_TYPE}' as test_type, '{TEST_DEFINITION_ID}' as test_definition_id, - '{TEST_SUITE}' as test_suite, + '{TEST_SUITE_ID}' as test_suite_id, '{TEST_RUN_ID}' as test_run_id, '{RUN_DATE}' as test_time, '{START_TIME}' as starttime, diff --git a/testgen/template/flavors/mssql/exec_query_tests/ex_relative_entropy_mssql.sql b/testgen/template/flavors/mssql/exec_query_tests/ex_relative_entropy_mssql.sql index 2d51aa0..e619675 100644 --- a/testgen/template/flavors/mssql/exec_query_tests/ex_relative_entropy_mssql.sql +++ b/testgen/template/flavors/mssql/exec_query_tests/ex_relative_entropy_mssql.sql @@ -25,10 +25,9 @@ dataset FROM latest_ver l FULL JOIN older_ver o ON (l.category = o.category) ) -SELECT '{PROJECT_CODE}' as project_code, - '{TEST_TYPE}' as test_type, +SELECT '{TEST_TYPE}' as test_type, '{TEST_DEFINITION_ID}' as test_definition_id, - '{TEST_SUITE}' as test_suite, + '{TEST_SUITE_ID}' as test_suite_id, '{TEST_RUN_ID}' as test_run_id, '{RUN_DATE}' as test_time, '{START_TIME}' as starttime, diff --git a/testgen/template/flavors/postgresql/exec_query_tests/ex_window_match_no_drops_postgresql.sql b/testgen/template/flavors/postgresql/exec_query_tests/ex_window_match_no_drops_postgresql.sql index 92d9554..1385657 100644 --- a/testgen/template/flavors/postgresql/exec_query_tests/ex_window_match_no_drops_postgresql.sql +++ b/testgen/template/flavors/postgresql/exec_query_tests/ex_window_match_no_drops_postgresql.sql @@ -1,7 +1,6 @@ -SELECT '{PROJECT_CODE}' as project_code, - '{TEST_TYPE}' as test_type, +SELECT '{TEST_TYPE}' as test_type, '{TEST_DEFINITION_ID}' as test_definition_id, - '{TEST_SUITE}' as test_suite, + '{TEST_SUITE_ID}' as test_suite_id, '{TEST_RUN_ID}' as test_run_id, '{RUN_DATE}' as test_time, '{START_TIME}' as starttime, diff --git a/testgen/template/flavors/postgresql/exec_query_tests/ex_window_match_same_postgresql.sql b/testgen/template/flavors/postgresql/exec_query_tests/ex_window_match_same_postgresql.sql index 202ebba..20a0791 100644 --- a/testgen/template/flavors/postgresql/exec_query_tests/ex_window_match_same_postgresql.sql +++ b/testgen/template/flavors/postgresql/exec_query_tests/ex_window_match_same_postgresql.sql @@ -1,7 +1,6 @@ -SELECT '{PROJECT_CODE}' as project_code, - '{TEST_TYPE}' as test_type, +SELECT '{TEST_TYPE}' as test_type, '{TEST_DEFINITION_ID}' as test_definition_id, - '{TEST_SUITE}' as test_suite, + '{TEST_SUITE_ID}' as test_suite_id, '{TEST_RUN_ID}' as test_run_id, '{RUN_DATE}' as test_time, '{START_TIME}' as starttime, diff --git a/testgen/template/get_entities/get_test_results_for_run_cli.sql b/testgen/template/get_entities/get_test_results_for_run_cli.sql index 3acf956..9716167 100644 --- a/testgen/template/get_entities/get_test_results_for_run_cli.sql +++ b/testgen/template/get_entities/get_test_results_for_run_cli.sql @@ -1,4 +1,4 @@ -SELECT test_suite as test_suite_key, +SELECT ts.test_suite as test_suite_key, table_name, column_names as column_name, r.test_type, @@ -11,6 +11,7 @@ SELECT test_suite as test_suite_key, tt.measure_uom FROM test_results r INNER JOIN test_types tt ON r.test_type = tt.test_type +INNER JOIN test_suites ts ON r.test_suite_id = ts.id WHERE test_run_id = '{TEST_RUN_ID}'::UUID {ERRORS_ONLY} ORDER BY r.schema_name, r.table_name, r.column_names, r.test_type; diff --git a/testgen/template/get_entities/get_test_run_list.sql b/testgen/template/get_entities/get_test_run_list.sql index f3ad50c..bc25ccc 100644 --- a/testgen/template/get_entities/get_test_run_list.sql +++ b/testgen/template/get_entities/get_test_run_list.sql @@ -3,22 +3,22 @@ Output: list of test runs performed for a test_suite Alternative: project-code, table-name Optional: table-name, column-name, from-date, thru-date*/ -Select tr.test_suite as test_suite_key, - tr.test_starttime as test_time, - tr.status, - tr.id::VARCHAR(50) as test_run_id, - COUNT(DISTINCT lower(r.schema_name || '.' || table_name)) as table_ct, - COUNT(*) as result_ct, - SUM(CASE WHEN r.result_code = 0 THEN 1 END) as fail_ct, - SUM(CASE WHEN r.observability_status = 'Sent' THEN 1 END) as sent_to_obs, - process_id -from test_runs tr -INNER JOIN test_results r - ON (tr.id = r.test_run_id) -where tr.project_code = '{PROJECT_CODE}' -and tr.test_suite = '{TEST_SUITE}' -GROUP BY tr.project_code, - tr.test_suite, - tr.test_starttime, - tr.status, - tr.id; + SELECT ts.test_suite as test_suite_key, + tr.test_starttime as test_time, + tr.status, + tr.id::VARCHAR as test_run_id, + COUNT(DISTINCT lower(r.schema_name || '.' || table_name)) as table_ct, + COUNT(*) as result_ct, + SUM(CASE WHEN r.result_code = 0 THEN 1 END) as fail_ct, + SUM(CASE WHEN r.observability_status = 'Sent' THEN 1 END) as sent_to_obs, + process_id + FROM test_runs tr +INNER JOIN test_results r ON tr.id = r.test_run_id +INNER JOIN test_suites ts ON tr.test_suite_id = ts.id + WHERE ts.project_code = '{PROJECT_CODE}' + AND ts.test_suite = '{TEST_SUITE}' + GROUP BY tr.id, + ts.project_code, + ts.test_suite, + tr.test_starttime, + tr.status; diff --git a/testgen/template/get_entities/get_test_suite_list.sql b/testgen/template/get_entities/get_test_suite_list.sql index 840bc5e..294eb65 100644 --- a/testgen/template/get_entities/get_test_suite_list.sql +++ b/testgen/template/get_entities/get_test_suite_list.sql @@ -1,18 +1,11 @@ -SELECT - ts.id as test_suite_id, - ts.project_code as project_key, - ts.test_suite as test_suite_key, - ts.connection_id, - ts.test_suite_description, - MAX(tr.test_starttime) as last_run -FROM test_suites ts -LEFT JOIN test_runs tr - ON ts.project_code = tr.project_code - AND ts.test_suite = tr.test_suite -WHERE ts.project_code = '{PROJECT_CODE}' -GROUP BY ts.id, - ts.project_code, - ts.test_suite, - ts.connection_id, - ts.test_suite_description -ORDER BY ts.test_suite; + SELECT ts.id as test_suite_id, + ts.project_code as project_key, + ts.test_suite as test_suite_key, + ts.connection_id, + ts.test_suite_description, + MAX(tr.test_starttime) as last_run + FROM test_suites ts + LEFT JOIN test_runs tr + ON tr.test_suite_id = ts.id + WHERE ts.project_code = '{PROJECT_CODE}' + ORDER BY ts.test_suite; diff --git a/testgen/template/observability/get_event_data.sql b/testgen/template/observability/get_event_data.sql index ec247af..d3f531e 100644 --- a/testgen/template/observability/get_event_data.sql +++ b/testgen/template/observability/get_event_data.sql @@ -19,5 +19,4 @@ from test_suites ts join connections c on c.connection_id = ts.connection_id join projects pr on pr.project_code = ts.project_code join table_groups tg on tg.id = ts.table_groups_id -where pr.project_code = '{PROJECT_CODE}' -and ts.test_suite = '{TEST_SUITE}' +where ts.id = '{TEST_SUITE_ID}' diff --git a/testgen/template/observability/get_test_results.sql b/testgen/template/observability/get_test_results.sql index 29f060d..afc29ec 100644 --- a/testgen/template/observability/get_test_results.sql +++ b/testgen/template/observability/get_test_results.sql @@ -35,7 +35,6 @@ SELECT measure_uom, measure_uom_description FROM v_queued_observability_results -where project_code = '{PROJECT_CODE}' -and test_suite = '{TEST_SUITE}' +where test_suite_id = '{TEST_SUITE_ID}' order by start_time asc limit {MAX_QTY_EVENTS} diff --git a/testgen/template/observability/update_test_results_exported_to_observability.sql b/testgen/template/observability/update_test_results_exported_to_observability.sql index c94363e..2bb1e15 100644 --- a/testgen/template/observability/update_test_results_exported_to_observability.sql +++ b/testgen/template/observability/update_test_results_exported_to_observability.sql @@ -8,5 +8,4 @@ with selects INNER JOIN selects s ON (r.result_id = s.selected_id) where r.id = test_results.id and r.observability_status = 'Queued' - and r.project_code = '{PROJECT_CODE}' - and r.test_suite = '{TEST_SUITE}' + and r.test_suite_id = '{TEST_SUITE_ID}' diff --git a/testgen/template/validate_tests/ex_write_test_val_errors.sql b/testgen/template/validate_tests/ex_write_test_val_errors.sql index ef03a93..8546863 100644 --- a/testgen/template/validate_tests/ex_write_test_val_errors.sql +++ b/testgen/template/validate_tests/ex_write_test_val_errors.sql @@ -1,6 +1,5 @@ INSERT INTO test_results - ( project_code, - test_suite, + ( test_suite_id, test_type, test_definition_id, schema_name, @@ -12,13 +11,12 @@ INSERT INTO test_results result_code, result_message, result_measure ) - SELECT '{PROJECT_CODE}' as project_code, - '{TEST_SUITE}' as test_suite, - td.test_type, - td.id, - td.schema_name, - td.table_name, - td.column_name, + SELECT '{TEST_SUITE_ID}'::UUID, + test_type, + id, + schema_name, + table_name, + column_name, '{RUN_DATE}' as test_time, '{TEST_RUN_ID}' as test_run_id, NULL as input_parameters, @@ -26,8 +24,6 @@ INSERT INTO test_results -- TODO: show only missing columns referenced in this test left('ERROR - TEST COLUMN MISSING: {MISSING_COLUMNS_NO_QUOTES}', 470) AS result_message, NULL as result_measure - FROM test_definitions td - INNER JOIN test_suites ts ON td.test_suite_id = ts.id - WHERE td.test_active = '-1' - AND ts.project_code = '{PROJECT_CODE}' - AND ts.test_suite = '{TEST_SUITE}'; + FROM test_definitions + WHERE test_active = '-1' + AND test_suite_id = '{TEST_SUITE_ID}'; diff --git a/testgen/ui/queries/table_group_queries.py b/testgen/ui/queries/table_group_queries.py index 4e047ee..0663a6f 100644 --- a/testgen/ui/queries/table_group_queries.py +++ b/testgen/ui/queries/table_group_queries.py @@ -10,7 +10,7 @@ def _get_select_statement(schema): profiling_include_mask, profiling_exclude_mask, profiling_table_set, profile_id_column_mask, profile_sk_column_mask, - data_source, source_system, data_location, business_domain, + data_source, source_system, data_location, business_domain, transform_level, source_process, stakeholder_group, profile_use_sampling, profile_sample_percent, profile_sample_min_count, profiling_delay_days @@ -27,16 +27,18 @@ def get_by_id(schema, table_group_id): return db.retrieve_data(sql) -def get_test_suite_names_by_table_group_names(schema, table_group_names): - items = [f"'{item}'" for item in table_group_names] - sql = f"""select test_suite -from {schema}.test_suites ts -inner join {schema}.table_groups tg on tg.id = ts.table_groups_id -where tg.table_groups_name in ({",".join(items)}) +def get_test_suite_ids_by_table_group_names(schema, table_group_names): + names_str = ", ".join([f"'{item}'" for item in table_group_names]) + sql = f""" + SELECT ts.id::VARCHAR + FROM {schema}.test_suites ts + INNER JOIN {schema}.table_groups tg ON tg.id = ts.table_groups_id + WHERE tg.table_groups_name in ({names_str}) """ return db.retrieve_data(sql) + def get_table_group_dependencies(schema, table_group_names): if table_group_names is None or len(table_group_names) == 0: raise ValueError("No Table Group is specified.") diff --git a/testgen/ui/queries/test_definition_queries.py b/testgen/ui/queries/test_definition_queries.py index ba285ab..fdfcf3f 100644 --- a/testgen/ui/queries/test_definition_queries.py +++ b/testgen/ui/queries/test_definition_queries.py @@ -238,9 +238,9 @@ def add(schema, test_definition): def get_test_definition_usage(schema, test_definition_ids): - test_definition_names_join = [f"'{item}'" for item in test_definition_ids] + ids_str = ",".join([f"'{item}'" for item in test_definition_ids]) sql = f""" - select distinct test_definition_id from {schema}.test_results where test_definition_id in ({",".join(test_definition_names_join)}); + select distinct test_definition_id from {schema}.test_results where test_definition_id in ({ids_str}); """ return db.retrieve_data(sql) @@ -255,14 +255,13 @@ def delete(schema, test_definition_ids): st.cache_data.clear() -def cascade_delete(schema, test_suite_names): - if test_suite_names is None or len(test_suite_names) == 0: +def cascade_delete(schema, test_suite_ids): + if not test_suite_ids: raise ValueError("No Test Suite is specified.") - items = [f"'{item}'" for item in test_suite_names] + ids_str = ", ".join([f"'{item}'" for item in test_suite_ids]) sql = f""" - DELETE FROM {schema}.test_definitions - WHERE test_suite_id in (select id from {schema}.test_suites where test_suite in ({",".join(items)})) + DELETE FROM {schema}.test_definitions WHERE test_suite_id in ({ids_str}) """ db.execute_sql(sql) st.cache_data.clear() diff --git a/testgen/ui/queries/test_run_queries.py b/testgen/ui/queries/test_run_queries.py index ba1e3a9..34692de 100644 --- a/testgen/ui/queries/test_run_queries.py +++ b/testgen/ui/queries/test_run_queries.py @@ -4,16 +4,20 @@ import testgen.ui.services.database_service as db -def cascade_delete(schema: str, test_suite_names: list[str]) -> None: - if test_suite_names is None or len(test_suite_names) == 0: +def cascade_delete(schema: str, test_suite_ids: list[str]) -> None: + if not test_suite_ids: raise ValueError("No Test Suite is specified.") - items = [f"'{item}'" for item in test_suite_names] + ids_str = ", ".join([f"'{item}'" for item in test_suite_ids]) sql = f""" - delete from {schema}.working_agg_cat_results where test_suite in ({",".join(items)}); - delete from {schema}.working_agg_cat_tests where test_suite in ({",".join(items)}); - delete from {schema}.test_runs where test_suite in ({",".join(items)}); - delete from {schema}.test_results where test_suite in ({",".join(items)}); + DELETE + FROM {schema}.working_agg_cat_results + WHERE test_run_id in (select id from {schema}.test_runs where test_suite_id in ({ids_str})); + DELETE + FROM {schema}.working_agg_cat_tests + WHERE test_run_id in (select id from {schema}.test_runs where test_suite_id in ({ids_str})); + DELETE FROM {schema}.test_runs WHERE test_suite_id in ({ids_str}); + DELETE FROM {schema}.test_results WHERE test_suite_id in ({ids_str}); """ db.execute_sql(sql) st.cache_data.clear() @@ -25,9 +29,11 @@ def update_status(schema: str, test_run_id: str, status: str) -> None: now = date_service.get_now_as_string() - sql = f"""UPDATE {schema}.test_runs -SET status = '{status}', - test_endtime = '{now}' -where id = '{test_run_id}' :: UUID;""" + sql = f""" + UPDATE {schema}.test_runs + SET status = '{status}', + test_endtime = '{now}' + WHERE id = '{test_run_id}'::UUID; + """ db.execute_sql(sql) st.cache_data.clear() diff --git a/testgen/ui/queries/test_suite_queries.py b/testgen/ui/queries/test_suite_queries.py index 99ccc76..d63c6bc 100644 --- a/testgen/ui/queries/test_suite_queries.py +++ b/testgen/ui/queries/test_suite_queries.py @@ -67,47 +67,35 @@ def add(schema, test_suite): st.cache_data.clear() -def delete(schema, test_suite_ids): - if test_suite_ids is None or len(test_suite_ids) == 0: +def delete(schema, test_suite_ids: list[str]): + if not test_suite_ids: raise ValueError("No table group is specified.") - items = [f"'{item}'" for item in test_suite_ids] - sql = f"""DELETE FROM {schema}.test_suites WHERE id in ({",".join(items)})""" + ids_str = ",".join([f"'{item}'" for item in test_suite_ids]) + sql = f"""DELETE FROM {schema}.test_suites WHERE id in ({ids_str})""" db.execute_sql(sql) st.cache_data.clear() -def cascade_delete(schema: str, test_suite_names: list[str]) -> None: - if test_suite_names is None or len(test_suite_names) == 0: - raise ValueError("No Test Suite is specified.") - - items = [f"'{item}'" for item in test_suite_names] - sql = f"""delete from {schema}.test_suites where test_suite in ({",".join(items)})""" - db.execute_sql(sql) - st.cache_data.clear() - - -def get_test_suite_dependencies(schema: str, test_suite_names: list[str]) -> pd.DataFrame: - test_suite_names_join = [f"'{item}'" for item in test_suite_names] +def get_test_suite_dependencies(schema: str, test_suite_ids: list[str]) -> pd.DataFrame: + ids_str = ", ".join([f"'{item}'" for item in test_suite_ids]) sql = f""" - select distinct ts.test_suite - from {schema}.test_definitions td join {schema}.test_suites ts on ts.id = td.test_suite_id - where ts.test_suite in ({",".join(test_suite_names_join)}) - union - select distinct test_suite from {schema}.test_results where test_suite in ({",".join(test_suite_names_join)}); + SELECT DISTINCT test_suite_id FROM {schema}.test_definitions WHERE test_suite_id in ({ids_str}) + UNION + SELECT DISTINCT test_suite_id FROM {schema}.test_results WHERE test_suite_id in ({ids_str}); """ return db.retrieve_data(sql) -def get_test_suite_usage(schema: str, test_suite_names: list[str]) -> pd.DataFrame: - test_suite_names_join = [f"'{item}'" for item in test_suite_names] +def get_test_suite_usage(schema: str, test_suite_ids: list[str]) -> pd.DataFrame: + ids_str = ", ".join([f"'{item}'" for item in test_suite_ids]) sql = f""" - select distinct test_suite from {schema}.test_runs where test_suite in ({",".join(test_suite_names_join)}) and status = 'Running' + SELECT DISTINCT test_suite_id FROM {schema}.test_runs WHERE test_suite_id in ({ids_str}) AND status = 'Running' """ return db.retrieve_data(sql) -def get_test_suite_refresh_check(schema, table_groups_id, test_suite_id): +def get_test_suite_refresh_check(schema, test_suite_id): sql = f""" SELECT COUNT(*) as test_ct, SUM(CASE WHEN COALESCE(d.lock_refresh, 'N') = 'N' THEN 1 ELSE 0 END) as unlocked_test_ct, @@ -115,8 +103,7 @@ def get_test_suite_refresh_check(schema, table_groups_id, test_suite_id): FROM {schema}.test_definitions d INNER JOIN {schema}.test_types t ON (d.test_type = t.test_type) - WHERE d.table_groups_id = '{table_groups_id}'::UUID - AND d.test_suite_id = '{test_suite_id}' + WHERE d.test_suite_id = '{test_suite_id}' AND t.run_type = 'CAT' AND t.selection_criteria IS NOT NULL; """ diff --git a/testgen/ui/services/query_service.py b/testgen/ui/services/query_service.py index 0fecadd..3343010 100644 --- a/testgen/ui/services/query_service.py +++ b/testgen/ui/services/query_service.py @@ -60,15 +60,15 @@ def run_test_type_lookup_query(str_schema, str_test_type=None, boo_show_referent tt.test_name_short, tt.test_name_long, tt.test_description, tt.measure_uom, COALESCE(tt.measure_uom_description, '') as measure_uom_description, tt.default_parm_columns, tt.default_severity, - tt.run_type, tt.test_scope, tt.dq_dimension, tt.threshold_description, - tt.column_name_prompt, tt.column_name_help, + tt.run_type, tt.test_scope, tt.dq_dimension, tt.threshold_description, + tt.column_name_prompt, tt.column_name_help, tt.default_parm_prompts, tt.default_parm_help, tt.usage_notes, - CASE tt.test_scope WHEN 'referential' THEN '⧉ ' WHEN 'custom' THEN '⛭ ' WHEN 'table' THEN '⊞ ' WHEN 'column' THEN '≣ ' ELSE '? ' END + CASE tt.test_scope WHEN 'referential' THEN '⧉ ' WHEN 'custom' THEN '⛭ ' WHEN 'table' THEN '⊞ ' WHEN 'column' THEN '≣ ' ELSE '? ' END || tt.test_name_short || ': ' || lower(tt.test_name_long) || CASE WHEN tt.selection_criteria > '' THEN ' [auto-generated]' ELSE '' END as select_name FROM {str_schema}.test_types tt WHERE tt.active = 'Y' {str_criteria} - ORDER BY CASE tt.test_scope WHEN 'referential' THEN 1 WHEN 'custom' THEN 2 WHEN 'table' THEN 3 WHEN 'column' THEN 4 ELSE 5 END, + ORDER BY CASE tt.test_scope WHEN 'referential' THEN 1 WHEN 'custom' THEN 2 WHEN 'table' THEN 3 WHEN 'column' THEN 4 ELSE 5 END, tt.test_name_short; """ return db.retrieve_data(str_sql) @@ -175,59 +175,21 @@ def run_test_suite_lookup_by_project_query(str_schema, str_project): return db.retrieve_data(str_sql) -def run_last_test_run(str_schema, str_project_code): - str_sql = f""" - SELECT MIN(EXTRACT(DAY FROM (CURRENT_DATE - r.test_starttime))) as days_back - FROM {str_schema}.test_runs r - WHERE r.project_code = '{str_project_code}' - AND r.status = 'Complete'; - """ - return db.retrieve_data(str_sql) - - def run_test_run_lookup_by_date(str_schema, str_project_code, str_run_date): str_sql = f""" - SELECT r.id::VARCHAR(50), - r.test_starttime::VARCHAR || ' - ' || s.test_suite as test_run_desc - FROM {str_schema}.test_runs r - LEFT JOIN {str_schema}.test_suites s - ON (r.project_code = s.project_code - AND r.test_suite = s.test_suite) - WHERE r.project_code = '{str_project_code}' - AND r.test_starttime::DATE = '{str_run_date}' - ORDER BY r.test_starttime DESC + SELECT + r.id::VARCHAR(50), + r.test_starttime::VARCHAR || ' - ' || s.test_suite as test_run_desc + FROM {str_schema}.test_runs r + LEFT JOIN {str_schema}.test_suites s ON r.test_suite_id = s.id) + WHERE + s.project_code = '{str_project_code}' + AND r.test_starttime::DATE = '{str_run_date}' + ORDER BY r.test_starttime DESC """ return db.retrieve_data(str_sql) -def update_anomaly_disposition_old(selected, str_schema, str_new_status): - int_batch_size = 50 - - def finalize_query(status, ids): - return f"""UPDATE {str_schema}.profile_anomaly_results - SET disposition = NULLIF('{status}', 'No Decision') - WHERE id IN ({ids.rstrip(',')});""" - - lst_ids = [row["id"] for row in selected if "id" in row] - lst_updates = [] - i = 0 - str_ids = "" - - for my_id in lst_ids: - i += 1 - str_ids += f" '{my_id}'::UUID," - if i >= int_batch_size: - lst_updates.append(finalize_query(str_new_status, str_ids)) - # reset for next batch - i = 0 - if i > 0: - lst_updates.append(finalize_query(str_new_status, str_ids)) - for upd in lst_updates: - db.execute_sql(upd) - - return True - - def update_anomaly_disposition(selected, str_schema, str_new_status): def finalize_small_update(status, ids): return f"""UPDATE {str_schema}.profile_anomaly_results diff --git a/testgen/ui/services/table_group_service.py b/testgen/ui/services/table_group_service.py index e9ce018..c78f1b5 100644 --- a/testgen/ui/services/table_group_service.py +++ b/testgen/ui/services/table_group_service.py @@ -28,23 +28,26 @@ def add(table_group): def cascade_delete(table_group_names, dry_run=False): schema = st.session_state["dbschema"] - test_suite_names = get_test_suite_names_by_table_group_names(table_group_names) - can_be_deleted = not table_group_has_dependencies(schema, table_group_names, test_suite_names) + test_suite_ids = get_test_suite_ids_by_table_group_names(table_group_names) + + can_be_deleted = not any( + ( + table_group_has_dependencies(table_group_names), + test_suite_service.has_test_suite_dependencies(test_suite_ids), + ) + ) + if not dry_run: - test_suite_service.cascade_delete(test_suite_names) + test_suite_service.cascade_delete(test_suite_ids) table_group_queries.cascade_delete(schema, table_group_names) return can_be_deleted -def table_group_has_dependencies(schema, table_group_names, test_suite_names): - return any( - ( - table_group_names and not table_group_queries.get_table_group_dependencies( - schema, table_group_names - ).empty, - test_suite_service.has_test_suite_dependencies(schema, test_suite_names), - ) - ) +def table_group_has_dependencies(table_group_names): + if not table_group_names: + return False + schema = st.session_state["dbschema"] + return not table_group_queries.get_table_group_dependencies(schema, table_group_names).empty def are_table_groups_in_use(table_group_names): @@ -53,8 +56,8 @@ def are_table_groups_in_use(table_group_names): schema = st.session_state["dbschema"] - test_suite_names = get_test_suite_names_by_table_group_names(table_group_names) - test_suites_in_use = test_suite_service.are_test_suites_in_use(test_suite_names) + test_suite_ids = get_test_suite_ids_by_table_group_names(table_group_names) + test_suites_in_use = test_suite_service.are_test_suites_in_use(test_suite_ids) table_groups_in_use_result = table_group_queries.get_table_group_usage(schema, table_group_names) table_groups_in_use = not table_groups_in_use_result.empty @@ -62,12 +65,12 @@ def are_table_groups_in_use(table_group_names): return test_suites_in_use or table_groups_in_use -def get_test_suite_names_by_table_group_names(table_group_names): +def get_test_suite_ids_by_table_group_names(table_group_names): if not table_group_names: return [] schema = st.session_state["dbschema"] - test_suite_names = table_group_queries.get_test_suite_names_by_table_group_names(schema, table_group_names) - return test_suite_names.to_dict()["test_suite"].values() + result = table_group_queries.get_test_suite_ids_by_table_group_names(schema, table_group_names) + return result.to_dict()["id"].values() def test_table_group(table_group, connection_id, project_code): diff --git a/testgen/ui/services/test_definition_service.py b/testgen/ui/services/test_definition_service.py index 28d19b0..036a7d7 100644 --- a/testgen/ui/services/test_definition_service.py +++ b/testgen/ui/services/test_definition_service.py @@ -31,10 +31,10 @@ def delete(test_definition_ids, dry_run=False): return can_be_deleted -def cascade_delete(test_suite_names): +def cascade_delete(test_suite_ids: list[str]): schema = st.session_state["dbschema"] - test_run_service.cascade_delete(test_suite_names) - test_definition_queries.cascade_delete(schema, test_suite_names) + test_run_service.cascade_delete(test_suite_ids) + test_definition_queries.cascade_delete(schema, test_suite_ids) def add(test_definition): diff --git a/testgen/ui/services/test_run_service.py b/testgen/ui/services/test_run_service.py index c3b7b2c..ccbbccd 100644 --- a/testgen/ui/services/test_run_service.py +++ b/testgen/ui/services/test_run_service.py @@ -3,9 +3,9 @@ import testgen.ui.queries.test_run_queries as test_run_queries -def cascade_delete(test_suite_names): +def cascade_delete(test_suite_ids): schema = st.session_state["dbschema"] - test_run_queries.cascade_delete(schema, test_suite_names) + test_run_queries.cascade_delete(schema, test_suite_ids) def update_status(test_run_id, status): diff --git a/testgen/ui/services/test_suite_service.py b/testgen/ui/services/test_suite_service.py index f23d444..f71e125 100644 --- a/testgen/ui/services/test_suite_service.py +++ b/testgen/ui/services/test_suite_service.py @@ -19,36 +19,37 @@ def add(test_suite): test_suite_queries.add(schema, test_suite) -def cascade_delete(test_suite_names, dry_run=False): - if not test_suite_names: +def cascade_delete(test_suite_ids, dry_run=False): + if not test_suite_ids: return True schema = st.session_state["dbschema"] - can_be_deleted = not has_test_suite_dependencies(schema, test_suite_names) + can_be_deleted = not has_test_suite_dependencies(test_suite_ids) if not dry_run: - test_definition_service.cascade_delete(test_suite_names) - test_suite_queries.cascade_delete(schema, test_suite_names) + test_definition_service.cascade_delete(test_suite_ids) + test_suite_queries.delete(schema, test_suite_ids) return can_be_deleted -def has_test_suite_dependencies(schema, test_suite_names): - if not test_suite_names: +def has_test_suite_dependencies(test_suite_ids: list[str]): + schema = st.session_state["dbschema"] + if not test_suite_ids: return False - return not test_suite_queries.get_test_suite_dependencies(schema, test_suite_names).empty + return not test_suite_queries.get_test_suite_dependencies(schema, test_suite_ids).empty -def are_test_suites_in_use(test_suite_names): - if not test_suite_names: +def are_test_suites_in_use(test_suite_ids: list[str]): + if not test_suite_ids: return False schema = st.session_state["dbschema"] - usage_result = test_suite_queries.get_test_suite_usage(schema, test_suite_names) + usage_result = test_suite_queries.get_test_suite_usage(schema, test_suite_ids) return not usage_result.empty -def get_test_suite_refresh_warning(table_groups_id, test_suite_id): +def get_test_suite_refresh_warning(test_suite_id): if not test_suite_id: return False schema = st.session_state["dbschema"] - row_result = test_suite_queries.get_test_suite_refresh_check(schema, table_groups_id, test_suite_id) + row_result = test_suite_queries.get_test_suite_refresh_check(schema, test_suite_id) test_ct = None unlocked_test_ct = None diff --git a/testgen/ui/views/test_results.py b/testgen/ui/views/test_results.py index cffaa24..8f2e815 100644 --- a/testgen/ui/views/test_results.py +++ b/testgen/ui/views/test_results.py @@ -154,11 +154,12 @@ def run_test_run_lookup_by_date(str_project_code, str_run_date): def get_drill_test_run(str_test_run_id): str_schema = st.session_state["dbschema"] str_sql = f""" - SELECT id::VARCHAR as test_run_id, - test_starttime as test_date, - test_suite as test_suite_description - FROM {str_schema}.test_runs - WHERE id = '{str_test_run_id}'::UUID; + SELECT tr.id::VARCHAR as test_run_id, + tr.test_starttime as test_date, + ts.test_suite as test_suite_description + FROM {str_schema}.test_runs tr + INNER JOIN {str_schema}.test_suites ts ON tr.test_suite_id = ts.id + WHERE tr.id = '{str_test_run_id}'::UUID; """ return db.retrieve_data(str_sql) @@ -180,7 +181,7 @@ def get_test_results_uncached(str_schema, str_run_id, str_sel_test_status): ) SELECT r.table_name, p.project_name, ts.test_suite, tg.table_groups_name, cn.connection_name, cn.project_host, cn.sql_flavor, - tt.dq_dimension, tt.test_scope, + tt.dq_dimension, tt.test_scope, r.schema_name, r.column_names, r.test_time::DATE as test_date, r.test_type, tt.id as test_type_id, tt.test_name_short, tt.test_name_long, r.test_description, tt.measure_uom, tt.measure_uom_description, c.test_operator, r.threshold_value::NUMERIC(16, 5), r.result_measure::NUMERIC(16, 5), r.result_status, @@ -203,7 +204,7 @@ def get_test_results_uncached(str_schema, str_run_id, str_sel_test_status): CASE WHEN result_message ILIKE 'ERROR - TEST COLUMN MISSING%%' THEN 1 END as execution_error_ct, - r.project_code, r.table_groups_id::VARCHAR, + p.project_code, r.table_groups_id::VARCHAR, r.id::VARCHAR as test_result_id, r.test_run_id::VARCHAR, c.id::VARCHAR as connection_id, r.test_suite_id::VARCHAR, r.test_definition_id::VARCHAR as test_definition_id_runtime, @@ -225,10 +226,9 @@ def get_test_results_uncached(str_schema, str_run_id, str_sel_test_status): AND r.auto_gen = TRUE AND d.last_auto_gen_date IS NOT NULL) INNER JOIN {str_schema}.test_suites ts - ON (r.project_code = ts.project_code - AND r.test_suite = ts.test_suite) + ON r.test_suite_id = ts.id INNER JOIN {str_schema}.projects p - ON (r.project_code = p.project_code) + ON (ts.project_code = p.project_code) INNER JOIN {str_schema}.table_groups tg ON (ts.table_groups_id = tg.id) INNER JOIN {str_schema}.connections cn @@ -267,7 +267,9 @@ def get_test_disposition(str_run_id): def get_test_result_summary(str_run_id): str_schema = st.session_state["dbschema"] str_sql = f""" - SELECT passed_ct, warning_ct, failed_ct, + SELECT passed_ct, + warning_ct, + failed_ct, COALESCE(error_ct, 0) as error_ct FROM {str_schema}.test_runs WHERE id = '{str_run_id}'::UUID; @@ -329,7 +331,7 @@ def get_test_definition_uncached(str_schema, str_test_def_id): d.baseline_value, d.baseline_ct, d.baseline_avg, d.baseline_sd, d.threshold_value, d.subset_condition, d.groupby_names, d.having_condition, d.match_schema_name, d.match_table_name, d.match_column_names, d.match_subset_condition, - d.match_groupby_names, d.match_having_condition, + d.match_groupby_names, d.match_having_condition, d.window_date_column, d.window_days::VARCHAR as window_days, d.custom_query, d.severity, tt.default_severity, diff --git a/testgen/ui/views/test_runs.py b/testgen/ui/views/test_runs.py index f2d740d..bdaa407 100644 --- a/testgen/ui/views/test_runs.py +++ b/testgen/ui/views/test_runs.py @@ -85,12 +85,13 @@ def render(self) -> None: def run_test_suite_lookup_query(str_schema, str_project, str_tg=None): str_tg_condition = f" AND s.table_groups_id = '{str_tg}' " if str_tg else "" str_sql = f""" - SELECT s.id::VARCHAR(50), s.test_suite, s.test_suite_description + SELECT s.id::VARCHAR(50), + s.test_suite, + COALESCE(s.test_suite_description, s.test_suite) AS test_suite_description FROM {str_schema}.test_suites s - LEFT JOIN {str_schema}.table_groups tg - ON (s.table_groups_id = tg.id) + LEFT JOIN {str_schema}.table_groups tg ON s.table_groups_id = tg.id WHERE s.project_code = '{str_project}' {str_tg_condition} - ORDER BY s.test_suite + ORDER BY s.test_suite """ return db.retrieve_data(str_sql) @@ -114,7 +115,7 @@ def get_db_test_runs(str_project_code, str_tg=None, str_ts=None): str_ts_condition = f" AND s.id = '{str_ts}' " if str_ts else "" str_sql = f""" SELECT r.test_starttime as run_date, - r.test_suite, s.test_suite_description, + s.test_suite, s.test_suite_description, r.status, r.duration, r.test_ct, r.passed_ct, r.failed_ct, r.warning_ct, r.error_ct, @@ -126,13 +127,13 @@ def get_db_test_runs(str_project_code, str_tg=None, str_ts=None): p.project_name, s.table_groups_id::VARCHAR, tg.table_groups_name, tg.table_group_schema, process_id FROM {str_schema}.test_runs r - INNER JOIN {str_schema}.projects p - ON (r.project_code = p.project_code) INNER JOIN {str_schema}.test_suites s - ON (r.test_suite = s.test_suite) + ON (r.test_suite_id = s.id) INNER JOIN {str_schema}.table_groups tg ON (s.table_groups_id = tg.id) - WHERE r.project_code = '{str_project_code}' {str_tg_condition} {str_ts_condition} + INNER JOIN {str_schema}.projects p + ON (s.project_code = p.project_code) + WHERE s.project_code = '{str_project_code}' {str_tg_condition} {str_ts_condition} ORDER BY r.test_starttime DESC; """ diff --git a/testgen/ui/views/test_suites.py b/testgen/ui/views/test_suites.py index 6ed52b1..3d5f618 100644 --- a/testgen/ui/views/test_suites.py +++ b/testgen/ui/views/test_suites.py @@ -184,7 +184,7 @@ def generate_tests_dialog(selected_test_suite): status_container = st.empty() test_ct, unlocked_test_ct, unlocked_edits_ct = test_suite_service.get_test_suite_refresh_warning( - selected_test_suite["table_groups_id"], selected_test_suite["id"] + selected_test_suite["id"] ) if test_ct: warning_msg = "" @@ -240,8 +240,8 @@ def generate_tests_dialog(selected_test_suite): @st.dialog(title="Delete Test Suite") def delete_test_suite_dialog(selected): selected_test_suite = selected[0] - test_suite_name = selected_test_suite["test_suite"] - can_be_deleted = test_suite_service.cascade_delete([test_suite_name], dry_run=True) + test_suite_id = selected_test_suite["id"] + can_be_deleted = test_suite_service.cascade_delete([test_suite_id], dry_run=True) fm.render_html_list( selected_test_suite, @@ -268,11 +268,11 @@ def delete_test_suite_dialog(selected): delete = st.form_submit_button("Delete", disabled=disable_delete_button, type="primary") if delete: - if test_suite_service.are_test_suites_in_use([test_suite_name]): + if test_suite_service.are_test_suites_in_use([test_suite_id]): st.error("This Test Suite is in use by a running process and cannot be deleted.") else: - test_suite_service.cascade_delete([test_suite_name]) - success_message = f"Test Suite {test_suite_name} has been deleted. " + test_suite_service.cascade_delete([test_suite_id]) + success_message = f"Test Suite {selected_test_suite['test_suite']} has been deleted. " st.success(success_message) time.sleep(1) st.rerun() @@ -457,12 +457,10 @@ def observability_export_dialog(selected_test_suite): if test_generation_button: button_container.empty() - test_suite_key = selected_test_suite["test_suite"] - project_key = selected_test_suite["project_code"] status_container.info("Executing Export ...") try: - qty_of_exported_events = export_test_results(project_key, test_suite_key) + qty_of_exported_events = export_test_results(selected_test_suite["id"]) status_container.empty() status_container.success( f"Process has successfully finished, {qty_of_exported_events} events have been exported." From 3ea89e9a3dcf59ee23dea9036ad250cdfb225fe9 Mon Sep 17 00:00:00 2001 From: Ricardo Boni Date: Tue, 27 Aug 2024 15:14:16 -0400 Subject: [PATCH 25/78] feat(table groups): Ensure Table Groups Name Uniqueness per project --- .../030_initialize_new_schema_structure.sql | 3 ++ .../dbupgrade/0110_incremental_upgrade.sql | 15 ++++++++ testgen/ui/views/table_groups.py | 36 ++++++++++--------- 3 files changed, 38 insertions(+), 16 deletions(-) create mode 100644 testgen/template/dbupgrade/0110_incremental_upgrade.sql diff --git a/testgen/template/dbsetup/030_initialize_new_schema_structure.sql b/testgen/template/dbsetup/030_initialize_new_schema_structure.sql index db24128..e2cd625 100644 --- a/testgen/template/dbsetup/030_initialize_new_schema_structure.sql +++ b/testgen/template/dbsetup/030_initialize_new_schema_structure.sql @@ -592,6 +592,9 @@ CREATE TABLE tg_revision ( revision INTEGER ); + +CREATE UNIQUE INDEX table_groups_name_unique ON table_groups(project_code, table_groups_name); + -- Index Connections CREATE UNIQUE INDEX uix_con_id ON connections(id); diff --git a/testgen/template/dbupgrade/0110_incremental_upgrade.sql b/testgen/template/dbupgrade/0110_incremental_upgrade.sql new file mode 100644 index 0000000..6aa0712 --- /dev/null +++ b/testgen/template/dbupgrade/0110_incremental_upgrade.sql @@ -0,0 +1,15 @@ +SET SEARCH_PATH TO {SCHEMA_NAME}; + +WITH duplicates AS ( + SELECT + id, + ROW_NUMBER() OVER (PARTITION BY table_groups_name ORDER BY ctid) AS row_num + FROM + table_groups +) +UPDATE table_groups tg +SET table_groups_name = tg.table_groups_name || ' ' || to_hex((random() * 10000000)::int) +FROM duplicates d +WHERE tg.id = d.id AND d.row_num > 1; + +CREATE UNIQUE INDEX table_groups_name_unique ON table_groups(project_code, table_groups_name); diff --git a/testgen/ui/views/table_groups.py b/testgen/ui/views/table_groups.py index 43bbc0c..5d22356 100644 --- a/testgen/ui/views/table_groups.py +++ b/testgen/ui/views/table_groups.py @@ -3,6 +3,7 @@ import pandas as pd import streamlit as st +from sqlalchemy.exc import IntegrityError import testgen.ui.services.authentication_service as authentication_service import testgen.ui.services.connection_service as connection_service @@ -266,11 +267,7 @@ def show_table_group_form(mode, project_code, connection, selected=None): # establish default values table_group_id = selected_table_group["id"] if mode == "edit" else None - table_groups_name = ( - selected_table_group["table_groups_name"] - if mode == "edit" - else f'{connection["connection_name"]}_table_group' - ) + table_groups_name = selected_table_group["table_groups_name"] if mode == "edit" else "" table_group_schema = selected_table_group["table_group_schema"] if mode == "edit" else "" profiling_table_set = ( selected_table_group["profiling_table_set"] @@ -427,18 +424,25 @@ def show_table_group_form(mode, project_code, connection, selected=None): ) if submit: - if mode == "edit": - table_group_service.edit(entity) + + if not entity["table_groups_name"]: + st.error("'Name' is required. ") + return + + try: + if mode == "edit": + table_group_service.edit(entity) + success_message = "Changes have been saved successfully. " + else: + table_group_service.add(entity) + success_message = "New Table Group added successfully. " + except IntegrityError: + st.error("A Table Group with the same name already exists. ") + return else: - table_group_service.add(entity) - success_message = ( - "Changes have been saved successfully. " - if mode == "edit" - else "New Table Group added successfully. " - ) - st.success(success_message) - time.sleep(1) - st.rerun() + st.success(success_message) + time.sleep(1) + st.rerun() with table_groups_preview_tab: if mode == "edit": From a9e3783e0af70ef7d892674a7f047f54184ae26b Mon Sep 17 00:00:00 2001 From: Astor Date: Wed, 28 Aug 2024 17:03:11 -0300 Subject: [PATCH 26/78] Download-excel-file --- testgen/ui/services/form_service.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/testgen/ui/services/form_service.py b/testgen/ui/services/form_service.py index 9adef9f..41d51f9 100644 --- a/testgen/ui/services/form_service.py +++ b/testgen/ui/services/form_service.py @@ -259,11 +259,19 @@ def _generate_excel_export( def render_excel_export( df, lst_export_columns, str_export_title=None, str_caption=None, lst_wrap_columns=None, lst_column_headers=None ): - # Set up the download button + + if st.button(label=":blue[**⤓**]", use_container_width=True): + download_excel(df, lst_export_columns, str_export_title, str_caption, lst_wrap_columns, lst_column_headers) + + +@st.dialog(title="Download to Excel") +def download_excel( + df, lst_export_columns, str_export_title=None, str_caption=None, lst_wrap_columns=None, lst_column_headers=None +): + st.write(f'**Are you sure you want to download "{str_export_title}.xlsx"?**') + st.download_button( - label=":blue[**⤓**]", - use_container_width=True, - help="Download to Excel", + label="Yes", data=_generate_excel_export( df, lst_export_columns, str_export_title, str_caption, lst_wrap_columns, lst_column_headers ), From b6be8741ceef25d34842d9f9b6a7e63deacd13a1 Mon Sep 17 00:00:00 2001 From: Luis Trinidad Date: Thu, 29 Aug 2024 21:08:52 -0400 Subject: [PATCH 27/78] misc(ui): add custom component for cards --- testgen/ui/assets/style.css | 40 +++++++++++++++++++++ testgen/ui/components/widgets/__init__.py | 1 + testgen/ui/components/widgets/card.py | 43 +++++++++++++++++++++++ 3 files changed, 84 insertions(+) create mode 100644 testgen/ui/components/widgets/card.py diff --git a/testgen/ui/assets/style.css b/testgen/ui/assets/style.css index a96e650..e112d95 100644 --- a/testgen/ui/assets/style.css +++ b/testgen/ui/assets/style.css @@ -14,6 +14,8 @@ body { --field-underline-color: #9e9e9e; --dk-text-value-background: aliceblue; + --dk-card-background: #fff; + } img.dk-logo-img { @@ -101,6 +103,43 @@ button[title="Show password text"] { } /* ... */ +/* Cards Component */ +[data-testid="stVerticalBlockBorderWrapper"]:has(> div > div[data-testid="stVerticalBlock"] > div.element-container > div.stHtml > i.testgen_card) { + background-color: var(--dk-card-background); +} + +[data-testid="stVerticalBlockBorderWrapper"]:has(> div > div[data-testid="stVerticalBlock"] > div.element-container > div.stHtml > i.testgen_card) .testgen_card-header > .testgen_card-title { + margin: unset; + padding: unset; + line-height: 25px; +} + +[data-testid="stVerticalBlockBorderWrapper"]:has(> div > div[data-testid="stVerticalBlock"] > div.element-container > div.stHtml > i.testgen_card) .testgen_card-header > .testgen_card-subtitle { + margin: unset; + padding: unset; + margin-top: 4px; + line-height: 15px; + color: var(--caption-text-color); + font-style: italic; +} + +[data-testid="column"]:has(> div[data-testid="stVerticalBlockBorderWrapper"] > div > div[data-testid="stVerticalBlock"] > div.element-container > div.stHtml > i.testgen_card-actions) [data-testid="stVerticalBlock"] { + width: 100%; + flex-direction: row; + justify-content: flex-end; +} + +[data-testid="column"]:has(> div[data-testid="stVerticalBlockBorderWrapper"] > div > div[data-testid="stVerticalBlock"] > div.element-container > div.stHtml > i.testgen_card-actions) [data-testid="stVerticalBlock"] > div[data-testid="element-container"], +[data-testid="column"]:has(> div[data-testid="stVerticalBlockBorderWrapper"] > div > div[data-testid="stVerticalBlock"] > div.element-container > div.stHtml > i.testgen_card-actions) [data-testid="stVerticalBlock"] > div[data-testid="element-container"] > div[data-testid] { + width: auto !important; + max-height: 40px; +} +/* ... */ + +[data-testid="stVerticalBlock"]:has(> div.element-container > div.stHtml > i.no-flex-gap) { + gap: unset; +} + /* Dark mode */ @media (prefers-color-scheme: dark) { body { @@ -114,6 +153,7 @@ button[title="Show password text"] { --sidebar-active-item-color: #10141b; --sidebar-active-item-border-color: #b4e3c9; --dk-text-value-background: unset; + --dk-card-background: #14181f; } /* Main content */ diff --git a/testgen/ui/components/widgets/__init__.py b/testgen/ui/components/widgets/__init__.py index a627555..b98d5fe 100644 --- a/testgen/ui/components/widgets/__init__.py +++ b/testgen/ui/components/widgets/__init__.py @@ -4,3 +4,4 @@ from testgen.ui.components.widgets.expander_toggle import expander_toggle from testgen.ui.components.widgets.sidebar import sidebar from testgen.ui.components.widgets.summary_bar import summary_bar +from testgen.ui.components.widgets.card import card diff --git a/testgen/ui/components/widgets/card.py b/testgen/ui/components/widgets/card.py new file mode 100644 index 0000000..d43fc3c --- /dev/null +++ b/testgen/ui/components/widgets/card.py @@ -0,0 +1,43 @@ +import contextlib +import dataclasses +import typing + +import streamlit as st +from streamlit.delta_generator import DeltaGenerator + +CARD_CLASS: str = "testgen_card" +CARD_HEADER_CLASS: str = "testgen_card-header" +CARD_TITLE_CLASS: str = "testgen_card-title" +CARD_SUBTITLE_CLASS: str = "testgen_card-subtitle" +CARD_ACTIONS_CLASS: str = "testgen_card-actions" + + +@contextlib.contextmanager +def card( + title: str = "", + subtitle: str = "", + border: bool = True, + extra_css_class: str = "", +) -> typing.Generator["CardContext", None, None]: + with st.container(border=border): + st.html(f'') + + title_column, actions_column = st.columns([.5, .5], vertical_alignment="center") + if title or subtitle: + with title_column: + header_html: str = f'
' + if title: + header_html += f'

{title}

' + if subtitle: + header_html += f'{subtitle}' + header_html += '
' + st.html(header_html) + + actions_column.html(f'') + + yield CardContext(actions=actions_column) + + +@dataclasses.dataclass +class CardContext: + actions: DeltaGenerator From 9d8be9b359dffbf3331435055b3bb3885d7b9c26 Mon Sep 17 00:00:00 2001 From: Luis Trinidad Date: Thu, 29 Aug 2024 21:19:23 -0400 Subject: [PATCH 28/78] misc(ui): add reusable component for custom buttons --- testgen/ui/assets/style.css | 37 ++++++++ testgen/ui/components/frontend/css/shared.css | 21 +++++ .../frontend/js/components/button.js | 90 ++++++++++++++++--- testgen/ui/components/frontend/js/main.js | 10 +-- testgen/ui/components/frontend/js/utils.js | 12 +++ testgen/ui/components/widgets/__init__.py | 1 + testgen/ui/components/widgets/button.py | 39 ++++++++ 7 files changed, 193 insertions(+), 17 deletions(-) create mode 100644 testgen/ui/components/frontend/js/utils.js create mode 100644 testgen/ui/components/widgets/button.py diff --git a/testgen/ui/assets/style.css b/testgen/ui/assets/style.css index e112d95..50ba9a1 100644 --- a/testgen/ui/assets/style.css +++ b/testgen/ui/assets/style.css @@ -16,6 +16,7 @@ body { --dk-text-value-background: aliceblue; --dk-card-background: #fff; + --dk-tooltip-background: rgb(255, 255, 255); } img.dk-logo-img { @@ -140,6 +141,41 @@ button[title="Show password text"] { gap: unset; } +/* Tooltips */ +[data-tooltip] { + position: relative; +} + +[data-tooltip]::after { + position: absolute; + opacity: 0; + pointer-events: none; + content: attr(data-tooltip); + left: 0; + top: calc(100% + 10px); + border-radius: 4px; + box-shadow: 0 0 5px 2px rgba(100, 100, 100, 0.6); + color: var(--primary-text-color); + background-color: var(--dk-tooltip-background); + z-index: 10; + padding: 8px; + width: 200px; + transform: translateY(-20px); + transition: all 150ms cubic-bezier(.25, .8, .25, 1); +} + +[data-tooltip]:hover::after { + opacity: 1; + transform: translateY(0); + transition-duration: 300ms; +} + +[data-tooltip-position="right"]::after { + left: unset; + right: 0; +} +/* */ + /* Dark mode */ @media (prefers-color-scheme: dark) { body { @@ -154,6 +190,7 @@ button[title="Show password text"] { --sidebar-active-item-border-color: #b4e3c9; --dk-text-value-background: unset; --dk-card-background: #14181f; + --dk-tooltip-background: rgb(14, 17, 23); } /* Main content */ diff --git a/testgen/ui/components/frontend/css/shared.css b/testgen/ui/components/frontend/css/shared.css index ef2f7eb..4c5cc99 100644 --- a/testgen/ui/components/frontend/css/shared.css +++ b/testgen/ui/components/frontend/css/shared.css @@ -21,6 +21,21 @@ body { --sidebar-active-item-border-color: #b4e3c9; --field-underline-color: #9e9e9e; + + --button-text-color: var(--primary-text-color); + + --button-hover-state-background: var(--primary-color); + --button-hover-state-opacity: 0.12; + + --button-basic-text-color: var(--primary-color); + --button-basic-background: transparent; + + --button-flat-text-color: rgba(255, 255, 255); + --button-flat-background: rgba(0, 0, 0, .54); + + --button-stroked-text-color: var(--primary-color); + --button-stroked-background: transparent; + --button-stroked-border: 1px solid rgba(0, 0, 0, .12); } @media (prefers-color-scheme: dark) { @@ -35,5 +50,11 @@ body { --sidebar-active-item-color: #10141b; --sidebar-active-item-border-color: #b4e3c9; --dk-text-value-background: unset; + + --button-text-color: var(--primary-text-color); + + --button-flat-background: rgba(255, 255, 255, .54); + + --button-stroked-border: 1px solid rgba(255, 255, 255, .12); } } diff --git a/testgen/ui/components/frontend/js/components/button.js b/testgen/ui/components/frontend/js/components/button.js index d263267..02eedac 100644 --- a/testgen/ui/components/frontend/js/components/button.js +++ b/testgen/ui/components/frontend/js/components/button.js @@ -1,36 +1,65 @@ /** * @typedef Properties * @type {object} + * @property {(string)} type + * @property {(string|null)} label * @property {(string|null)} icon - * @property {(string|null)} class + * @property {(string|null)} tooltip + * @property {(string|null)} tooltipPosition * @property {(Function|null)} onclick */ +import { enforceElementWidth } from '../utils.js'; import van from '../van.min.js'; import { Streamlit } from '../streamlit.js'; const { button, i, span } = van.tags; +const BUTTON_TYPE = { + BASIC: 'basic', + FLAT: 'flat', + ICON: 'icon', + STROKED: 'stroked', +}; + +const Button = (/** @type Properties */ props) => { + Streamlit.setFrameHeight(40); + + const isIconOnly = props.type === BUTTON_TYPE.ICON || (props.icon?.val && !props.label?.val); + if (isIconOnly) { // Force a 40px width for the parent iframe & handle window resizing + enforceElementWidth(window.frameElement, 40); + } -const Button = (/** @type Properties */ props, /** @type string */ text) => { - Streamlit.setFrameHeight(); + if (props.tooltip) { + window.frameElement.parentElement.setAttribute('data-tooltip', props.tooltip.val); + window.frameElement.parentElement.setAttribute('data-tooltip-position', props.tooltipPosition.val); + } if (!window.testgen.loadedStylesheets.button) { document.adoptedStyleSheets.push(stylesheet); window.testgen.loadedStylesheets.button = true; } + const onClickHandler = props.onclick || post; return button( { - class: `tg-button ${props.icon ? 'tg-icon-button' : ''} ${props.class ?? ''}`, - onclick: props.onclick, + class: `tg-button tg-${props.type.val}-button ${props.type.val !== 'icon' && isIconOnly ? 'tg-icon-button' : ''}`, + onclick: onClickHandler, }, + span({class: 'tg-button-focus-state-indicator'}, ''), props.icon ? i({class: 'material-symbols-rounded'}, props.icon) : undefined, - span(text), + !isIconOnly ? span(props.label) : undefined, ); }; +function post() { + Streamlit.sendData({ value: Math.random() }); +} + const stylesheet = new CSSStyleSheet(); stylesheet.replace(` button.tg-button { + width: 100%; + height: 40px; + position: relative; overflow: hidden; @@ -41,26 +70,63 @@ button.tg-button { outline: 0; border: unset; - background: transparent; border-radius: 4px; - padding: 8px 16px; + padding: 8px 11px; - color: var(--primary-text-color); cursor: pointer; font-size: 14px; + color: var(--button-text-color); + background: var(--button-basic-background); +} + +button.tg-button .tg-button-focus-state-indicator::before { + content: ""; + opacity: 0; + top: 0; + left: 0; + right: 0; + bottom: 0; + position: absolute; + pointer-events: none; + border-radius: inherit; + background: var(--button-hover-state-background); +} - transition: background 400ms; +button.tg-button.tg-basic-button { + color: var(--button-basic-text-color); } -button.tg-button:hover { - background: rgba(0, 0, 0, 0.04); +button.tg-button.tg-flat-button { + color: var(--button-flat-text-color); + background: var(--button-flat-background); +} + +button.tg-button.tg-stroked-button { + color: var(--button-stroked-text-color); + background: var(--button-stroked-background); + border: var(--button-stroked-border); +} + +button.tg-button.tg-icon-button { + width: 40px; +} + +button.tg-button:has(span) { + padding: 8px 16px; } button.tg-button.tg-icon-button > i { font-size: 18px; +} + +button.tg-button > i:has(+ span) { margin-right: 8px; } + +button.tg-button:hover .tg-button-focus-state-indicator::before { + opacity: var(--button-hover-state-opacity); +} `); export { Button }; diff --git a/testgen/ui/components/frontend/js/main.js b/testgen/ui/components/frontend/js/main.js index 99dee47..c4c0859 100644 --- a/testgen/ui/components/frontend/js/main.js +++ b/testgen/ui/components/frontend/js/main.js @@ -8,22 +8,22 @@ import van from './van.min.js'; import { Streamlit } from './streamlit.js'; import { Button } from './components/button.js' -import { Select } from './components/select.js' import { Breadcrumbs } from './components/breadcrumbs.js' -import { SummaryBar } from './components/summary_bar.js'; import { ExpanderToggle } from './components/expander_toggle.js'; +import { Select } from './components/select.js' +import { SummaryBar } from './components/summary_bar.js'; let currentWindowVan = van; let topWindowVan = window.top.van; const TestGenComponent = (/** @type {string} */ id, /** @type {object} */ props) => { const componentById = { - select: Button, - button: Select, breadcrumbs: Breadcrumbs, - summary_bar: SummaryBar, + button: Button, expander_toggle: ExpanderToggle, + select: Select, sidebar: window.top.testgen.components.Sidebar, + summary_bar: SummaryBar, }; if (Object.keys(componentById).includes(id)) { diff --git a/testgen/ui/components/frontend/js/utils.js b/testgen/ui/components/frontend/js/utils.js new file mode 100644 index 0000000..7757caa --- /dev/null +++ b/testgen/ui/components/frontend/js/utils.js @@ -0,0 +1,12 @@ +function enforceElementWidth( + /** @type Element */element, + /** @type number */width, +) { + const observer = new ResizeObserver(() => { + element.width = width; + }); + + observer.observe(element); +} + +export { enforceElementWidth }; diff --git a/testgen/ui/components/widgets/__init__.py b/testgen/ui/components/widgets/__init__.py index b98d5fe..5eda61b 100644 --- a/testgen/ui/components/widgets/__init__.py +++ b/testgen/ui/components/widgets/__init__.py @@ -5,3 +5,4 @@ from testgen.ui.components.widgets.sidebar import sidebar from testgen.ui.components.widgets.summary_bar import summary_bar from testgen.ui.components.widgets.card import card +from testgen.ui.components.widgets.button import button diff --git a/testgen/ui/components/widgets/button.py b/testgen/ui/components/widgets/button.py new file mode 100644 index 0000000..da28825 --- /dev/null +++ b/testgen/ui/components/widgets/button.py @@ -0,0 +1,39 @@ +import typing + +from testgen.ui.components.utils.component import component + +ButtonType = typing.Literal["basic", "flat", "icon", "stroked"] +TooltipPosition = typing.Literal["left", "right"] + + +def button( + type: ButtonType = "basic", + label: str | None = None, + icon: str | None = None, + tooltip: str | None = None, + tooltip_position: TooltipPosition = "left", + on_click: typing.Callable[..., None] | None = None, + key: str | None = None, +) -> None: + """ + Testgen component to create custom styled buttons. + + # Parameters + :param key: unique key to give the component a persisting state + :param icon: icon name of material rounded icon fonts + :param on_click: click handler for this button + """ + + props = {"type": type} + if type != "icon": + if not label: + raise ValueError(f"A label is required for {type} buttons") + props.update({"label": label}) + + if icon: + props.update({"icon": icon}) + + if tooltip: + props.update({"tooltip": tooltip, "tooltipPosition": tooltip_position}) + + component(id_="button", key=key, props=props, on_change=on_click) From 80ec243dd13a506c6a8b9e849b52e9f5d76531cc Mon Sep 17 00:00:00 2001 From: Luis Trinidad Date: Thu, 29 Aug 2024 21:21:35 -0400 Subject: [PATCH 29/78] misc(ui): add custom link component --- testgen/ui/assets/style.css | 16 ++++ testgen/ui/components/frontend/css/shared.css | 2 +- .../components/frontend/js/components/link.js | 86 +++++++++++++++++++ testgen/ui/components/frontend/js/main.js | 2 + testgen/ui/components/widgets/__init__.py | 1 + testgen/ui/components/widgets/link.py | 40 +++++++++ 6 files changed, 146 insertions(+), 1 deletion(-) create mode 100644 testgen/ui/components/frontend/js/components/link.js create mode 100644 testgen/ui/components/widgets/link.py diff --git a/testgen/ui/assets/style.css b/testgen/ui/assets/style.css index 50ba9a1..61396ae 100644 --- a/testgen/ui/assets/style.css +++ b/testgen/ui/assets/style.css @@ -1,5 +1,6 @@ body { --primary-color: #06a04a; + --link-color: #1976d2; --primary-text-color: #000000de; --secondary-text-color: #0000008a; @@ -94,6 +95,21 @@ label[data-baseweb="checkbox"]:has(input[aria-checked="true"]) > div:first-child } /* */ +/* Theming for links */ +body a, +body a:active, +body a:hover, +body a:visited { + color: var(--link-color); +} + +body a, +body a:active, +body a:visited { + text-decoration: unset; +} +/* */ + button[title="Show password text"] { display: none; } diff --git a/testgen/ui/components/frontend/css/shared.css b/testgen/ui/components/frontend/css/shared.css index 4c5cc99..42419d5 100644 --- a/testgen/ui/components/frontend/css/shared.css +++ b/testgen/ui/components/frontend/css/shared.css @@ -9,11 +9,11 @@ body { body { --primary-color: #06a04a; - --primary-text-color: #000000de; --secondary-text-color: #0000008a; --disabled-text-color: #00000042; --caption-text-color: rgba(49, 51, 63, 0.6); /* Match Streamlit's caption color */ + --link-color: #1976d2; --sidebar-background-color: white; --sidebar-item-hover-color: #f5f5f5; diff --git a/testgen/ui/components/frontend/js/components/link.js b/testgen/ui/components/frontend/js/components/link.js new file mode 100644 index 0000000..09500ff --- /dev/null +++ b/testgen/ui/components/frontend/js/components/link.js @@ -0,0 +1,86 @@ +/** + * @typedef Properties + * @type {object} + * @property {string} href + * @property {string} label + * @property {boolean} underline + * @property {string?} left_icon + * @property {number?} left_icon_size + * @property {string?} right_icon + * @property {number?} right_icon_size + * @property {number?} height + * @property {string?} style + */ +import van from '../van.min.js'; +import { Streamlit } from '../streamlit.js'; + +const { a, div, i, span } = van.tags; + +const Link = (/** @type Properties */ props) => { + Streamlit.setFrameHeight(props.height?.val || 24); + + if (!window.testgen.loadedStylesheets.link) { + document.adoptedStyleSheets.push(stylesheet); + window.testgen.loadedStylesheets.link = true; + } + + return a( + { + class: `tg-link ${props.underline.val ? 'tg-link--underline' : ''}`, + style: props.style, + onclick: () => navigate(props.href.val), + }, + div( + {class: 'tg-link--wrapper'}, + props.left_icon ? LinkIcon(props.left_icon, props.left_icon_size, 'left') : undefined, + span({class: 'tg-link--text'}, props.label), + props.right_icon ? LinkIcon(props.right_icon, props.right_icon_size, 'right') : undefined, + ), + ); +}; + +const LinkIcon = ( + /** @type string */icon, + /** @type number */size, + /** @type string */position, +) => { + return i( + {class: `material-symbols-rounded tg-link--icon tg-link--icon-${position}`, style: `font-size: ${size.val}px;`}, + icon, + ); +}; + +function navigate(href) { + Streamlit.sendData({ href }); +} + +const stylesheet = new CSSStyleSheet(); +stylesheet.replace(` + .tg-link { + width: fit-content; + display: flex; + flex-direction: column; + text-decoration: unset !important; + color: var(--link-color); + cursor: pointer; + } + + .tg-link .tg-link--wrapper { + display: flex; + align-items: center; + } + + .tg-link.tg-link--underline::after { + content: ""; + height: 0; + width: 0; + border-top: 1px solid #1976d2; /* pseudo elements do not inherit variables */ + transition: width 50ms linear; + } + + .tg-link.tg-link--underline:hover::after { + width: 100%; + } +`); + +export { Link }; diff --git a/testgen/ui/components/frontend/js/main.js b/testgen/ui/components/frontend/js/main.js index c4c0859..31f2db7 100644 --- a/testgen/ui/components/frontend/js/main.js +++ b/testgen/ui/components/frontend/js/main.js @@ -10,6 +10,7 @@ import { Streamlit } from './streamlit.js'; import { Button } from './components/button.js' import { Breadcrumbs } from './components/breadcrumbs.js' import { ExpanderToggle } from './components/expander_toggle.js'; +import { Link } from './components/link.js'; import { Select } from './components/select.js' import { SummaryBar } from './components/summary_bar.js'; @@ -21,6 +22,7 @@ const TestGenComponent = (/** @type {string} */ id, /** @type {object} */ props) breadcrumbs: Breadcrumbs, button: Button, expander_toggle: ExpanderToggle, + link: Link, select: Select, sidebar: window.top.testgen.components.Sidebar, summary_bar: SummaryBar, diff --git a/testgen/ui/components/widgets/__init__.py b/testgen/ui/components/widgets/__init__.py index 5eda61b..4c40b33 100644 --- a/testgen/ui/components/widgets/__init__.py +++ b/testgen/ui/components/widgets/__init__.py @@ -5,4 +5,5 @@ from testgen.ui.components.widgets.sidebar import sidebar from testgen.ui.components.widgets.summary_bar import summary_bar from testgen.ui.components.widgets.card import card +from testgen.ui.components.widgets.link import link from testgen.ui.components.widgets.button import button diff --git a/testgen/ui/components/widgets/link.py b/testgen/ui/components/widgets/link.py new file mode 100644 index 0000000..8f2dc95 --- /dev/null +++ b/testgen/ui/components/widgets/link.py @@ -0,0 +1,40 @@ +import random + +from testgen.ui.navigation.router import Router +from testgen.ui.components.utils.component import component + + +def link( + href: str, + label: str, + *, + underline: bool = True, + left_icon: str | None = None, + left_icon_size: float = 20.0, + right_icon: str | None = None, + right_icon_size: float = 20.0, + height: float | None = 21.0, + style: str | None = None, + key: str | None = None, +) -> None: + if not key: + key = f"testgen:link:{round(random.random() * 10_000)}" + + props = { + "href": href, + "label": label, + "height": height, + "underline": underline, + } + if left_icon: + props.update({"left_icon": left_icon, "left_icon_size": left_icon_size}) + + if right_icon: + props.update({"right_icon": right_icon, "right_icon_size": right_icon_size}) + + if style: + props.update({"style": style}) + + clicked = component(id_="link", key=key, props=props) + if clicked: + Router().navigate(to=href, with_args={}) From 49575e4c3347ff4b0777036609930f9e2423c763 Mon Sep 17 00:00:00 2001 From: Luis Trinidad Date: Thu, 29 Aug 2024 21:23:11 -0400 Subject: [PATCH 30/78] fix(ui): use the session value as default for the expand toggle --- .../components/frontend/js/components/expander_toggle.js | 1 - testgen/ui/components/widgets/expander_toggle.py | 7 ++++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/testgen/ui/components/frontend/js/components/expander_toggle.js b/testgen/ui/components/frontend/js/components/expander_toggle.js index 1a31d9b..0a5220d 100644 --- a/testgen/ui/components/frontend/js/components/expander_toggle.js +++ b/testgen/ui/components/frontend/js/components/expander_toggle.js @@ -18,7 +18,6 @@ const ExpanderToggle = (/** @type Properties */ props) => { window.testgen.loadedStylesheets.expanderToggle = true; } - console.log(props) const expandedState = van.state(!!props.default.val); const expandLabel = props.expandLabel.val || 'Expand'; const collapseLabel = props.collapseLabel.val || 'Collapse'; diff --git a/testgen/ui/components/widgets/expander_toggle.py b/testgen/ui/components/widgets/expander_toggle.py index 0b776c7..16e1bf3 100644 --- a/testgen/ui/components/widgets/expander_toggle.py +++ b/testgen/ui/components/widgets/expander_toggle.py @@ -1,5 +1,7 @@ import logging +import streamlit as st + from testgen.ui.components.utils.component import component LOG = logging.getLogger("testgen") @@ -10,7 +12,7 @@ def expander_toggle( expand_label: str | None = None, collapse_label: str | None = None, key: str = "testgen:expander_toggle", -) -> None: +) -> bool: """ Testgen component to display a toggle for an expandable container. @@ -22,6 +24,9 @@ def expander_toggle( """ LOG.debug(key) + if key in st.session_state: + default = st.session_state[key] + return component( id_="expander_toggle", key=key, From d3c3f5b0cd4e838b4f84abf67e87a174f004bacb Mon Sep 17 00:00:00 2001 From: Luis Trinidad Date: Thu, 29 Aug 2024 21:24:14 -0400 Subject: [PATCH 31/78] refactor(test suites): display as list of cards instead of grid view --- testgen/ui/queries/test_suite_queries.py | 37 +- testgen/ui/views/test_suites.py | 518 +++++++++++------------ 2 files changed, 285 insertions(+), 270 deletions(-) diff --git a/testgen/ui/queries/test_suite_queries.py b/testgen/ui/queries/test_suite_queries.py index d63c6bc..a406635 100644 --- a/testgen/ui/queries/test_suite_queries.py +++ b/testgen/ui/queries/test_suite_queries.py @@ -8,17 +8,32 @@ def get_by_table_group(schema, project_code, table_group_id): sql = f""" SELECT - id::VARCHAR(50), - project_code, test_suite, - connection_id::VARCHAR(50), - table_groups_id::VARCHAR(50), - test_suite_description, test_action, - case when severity is null then 'Inherit' else severity end, - export_to_observability, test_suite_schema, component_key, component_type, component_name - FROM {schema}.test_suites - WHERE project_code = '{project_code}' - AND table_groups_id = '{table_group_id}' - ORDER BY test_suite; + suites.id::VARCHAR(50), + suites.project_code, + suites.test_suite, + suites.connection_id::VARCHAR(50), + suites.table_groups_id::VARCHAR(50), + suites.test_suite_description, + suites.test_action, + CASE WHEN suites.severity IS NULL THEN 'Inherit' ELSE suites.severity END, + suites.export_to_observability, + suites.test_suite_schema, + suites.component_key, + suites.component_type, + suites.component_name, + COALESCE(last_run.test_ct, 0) as test_ct, + last_run.test_starttime as latest_run_start, + last_run.passed_ct as last_run_passed_ct, + last_run.warning_ct as last_run_warning_ct, + last_run.failed_ct as last_run_failed_ct, + last_run.error_ct as last_run_error_ct + FROM {schema}.test_suites as suites + LEFT OUTER JOIN ( + SELECT * FROM {schema}.test_runs ORDER BY test_starttime DESC LIMIT 1 + ) AS last_run ON (last_run.project_code = suites.project_code AND last_run.test_suite = suites.test_suite) + WHERE suites.project_code = '{project_code}' + AND suites.table_groups_id = '{table_group_id}' + ORDER BY suites.test_suite; """ return db.retrieve_data(sql) diff --git a/testgen/ui/views/test_suites.py b/testgen/ui/views/test_suites.py index 3d5f618..abc26c7 100644 --- a/testgen/ui/views/test_suites.py +++ b/testgen/ui/views/test_suites.py @@ -1,12 +1,12 @@ import time import typing +import pandas as pd import streamlit as st import testgen.ui.services.authentication_service as authentication_service import testgen.ui.services.form_service as fm import testgen.ui.services.test_suite_service as test_suite_service -import testgen.ui.services.toolbar_service as tb from testgen.commands.run_execute_tests import run_execution_steps_in_background from testgen.commands.run_generate_tests import run_test_gen_queries from testgen.commands.run_observability_exporter import export_test_results @@ -14,6 +14,7 @@ from testgen.ui.services import connection_service, table_group_service from testgen.ui.services.string_service import empty_if_null from testgen.ui.session import session +from testgen.ui.components import widgets as testgen class TestSuitesPage(Page): @@ -47,243 +48,125 @@ def render(self, connection_id: str | None = None, table_group_id: str | None = connection_id = connection["connection_id"] table_group_id = table_group["id"] - tool_bar = tb.ToolBar(2, 5, 0, None) + tool_bar = st.columns([.2, .2, .4, .2], vertical_alignment="bottom") - with tool_bar.long_slots[0]: + with tool_bar[0]: st.selectbox("Connection", [connection["connection_name"]], disabled=True) - with tool_bar.long_slots[1]: + with tool_bar[1]: st.selectbox("Table Group", [table_group["table_groups_name"]], disabled=True) - df = test_suite_service.get_by_table_group(project_code, table_group_id) - - show_columns = [ - "test_suite", - "test_suite_description", - "severity", - "export_to_observability", - "component_key", - "component_type", - "component_name", - ] - - selected = fm.render_grid_select(df, show_columns) - - if tool_bar.short_slots[1].button("➕ Add", help="Add a new Test Run", use_container_width=True): # NOQA RUF001 - add_test_suite_dialog(project_code, connection, table_group) - - disable_buttons = selected is None - if tool_bar.short_slots[2].button( - "🖊️ Edit", help="Edit the selected Test Run", disabled=disable_buttons, use_container_width=True - ): - edit_test_suite_dialog(project_code, connection, table_group, selected) - - if tool_bar.short_slots[3].button( - "❌ Delete", help="Delete the selected Test Run", disabled=disable_buttons, use_container_width=True - ): - delete_test_suite_dialog(selected) - - if tool_bar.short_slots[4].button( - f":{'gray' if disable_buttons else 'green'}[Tests →]", - help="View and edit Test Definitions for selected Test Suite", - disabled=disable_buttons, - use_container_width=True, - ): - st.session_state["test_suite"] = selected[0] - - self.router.navigate( - "connections:test-definitions", - { - "connection_id": connection["connection_id"], - "table_group_id": table_group_id, - "test_suite_id": selected[0]["id"], - }, + with tool_bar[3]: + st.button( + ":material/add: Add Test Suite", + key="test_suite:keys:add", + help="Add a new test suite", + use_container_width=True, + on_click=lambda: add_test_suite_dialog(project_code, connection, table_group), ) - if not selected: - st.markdown(":orange[Select a row to see Test Suite details.]") - else: - show_record_detail(project_code, selected[0]) - - -def show_record_detail(project_code, selected): - left_column, right_column = st.columns([0.5, 0.5]) - - with left_column: - fm.render_html_list( - selected, - [ - "id", - "project_code", - "test_suite", - "connection_id", - "table_groups_id", - "test_suite_description", - "severity", - "export_to_observability", - "component_key", - "component_name", - "component_type", - ], - "Test Suite Information", - int_data_width=700, - ) - - with right_column: - # st.write("

", unsafe_allow_html=True) - _, button_column = st.columns([0.2, 0.8]) - with button_column: - run_now_commands_tab, cli_commands_tab = st.tabs(["Test Suite Actions", "View CLI Commands"]) - - with cli_commands_tab: - if st.button( - "Test Generation Command", - help="Shows the run-test-generation CLI command", - use_container_width=True, - ): - generate_tests_cli_dialog(selected) - - if st.button( - "Test Execution Command", - help="Shows the run-tests CLI command", - use_container_width=True, - ): - run_tests_cli_dialog(project_code, selected) - - if st.button( - "Observability Export Command", - help="Shows the export-observability CLI command", - use_container_width=True, - ): - observability_export_cli_dialog(selected) - - with run_now_commands_tab: - if st.button("Run Test Generation", help="Run Test Generation", use_container_width=True): - generate_tests_dialog(selected) - - if st.button("Run Test Execution", help="Run the tests", use_container_width=True): - run_tests_dialog(project_code, selected) - - if st.button( - "Run Observability Export", - help="Exports test results to Observability for the current Test Suite", - use_container_width=True, - ): - observability_export_dialog(selected) - - -@st.dialog(title="Generate Tests") -def generate_tests_dialog(selected_test_suite): - container = st.empty() - with container: - st.markdown(":green[**Execute Test Generation for the Test Suite**]") - - warning_container = st.container() - options_container = st.container() - button_container = st.empty() - status_container = st.empty() - - test_ct, unlocked_test_ct, unlocked_edits_ct = test_suite_service.get_test_suite_refresh_warning( - selected_test_suite["id"] - ) - if test_ct: - warning_msg = "" - counts_msg = f"\n\nAuto-Generated Tests: {test_ct}, Unlocked: {unlocked_test_ct}, Edited Unlocked: {unlocked_edits_ct}" - if unlocked_edits_ct > 0: - if unlocked_edits_ct > 1: - warning_msg = "Manual changes have been made to auto-generated tests in this Test Suite that have not been locked. " - else: - warning_msg = "A manual change has been made to an auto-generated test in this Test Suite that has not been locked. " - elif unlocked_test_ct > 0: - warning_msg = "Auto-generated tests are present in this Test Suite that have not been locked. " - warning_msg = f"{warning_msg}Generating tests now will overwrite unlocked tests subject to auto-generation based on the latest profiling.{counts_msg}" - with warning_container: - st.warning(warning_msg) - if unlocked_edits_ct > 0: - lock_edits_button = st.button("Lock Edited Tests") - if lock_edits_button: - edits_locked = test_suite_service.lock_edited_tests(selected_test_suite["id"]) - if edits_locked: - st.info("Edited tests have been successfully locked.") - - with options_container: - lst_generation_sets = test_suite_service.get_generation_set_choices() - if lst_generation_sets: - lst_generation_sets.insert(0, "(All Test Types)") - str_generation_set = st.selectbox("Generation Set", lst_generation_sets) - if str_generation_set == "(All Test Types)": - str_generation_set = "" - else: - str_generation_set = "" - - with button_container: - start_process_button_message = "Start" - test_generation_button = st.button(start_process_button_message) - - if test_generation_button: - button_container.empty() - - table_group_id = selected_test_suite["table_groups_id"] - test_suite_key = selected_test_suite["test_suite"] - status_container.info("Executing Test Generation...") - - try: - run_test_gen_queries(table_group_id, test_suite_key, str_generation_set) - except Exception as e: - status_container.empty() - status_container.error(f"Process had errors: {e!s}.") - - status_container.empty() - status_container.success("Process has successfully finished.") - + df = test_suite_service.get_by_table_group(project_code, table_group_id) -@st.dialog(title="Delete Test Suite") -def delete_test_suite_dialog(selected): - selected_test_suite = selected[0] - test_suite_id = selected_test_suite["id"] - can_be_deleted = test_suite_service.cascade_delete([test_suite_id], dry_run=True) + for _, test_suite in df.iterrows(): + subtitle = f"{connection['connection_name']} > {table_group['table_groups_name']}" + with testgen.card(title=test_suite["test_suite"], subtitle=subtitle) as test_suite_card: + with test_suite_card.actions: + testgen.button( + type="icon", + icon="output", + tooltip="Export results to observability", + tooltip_position="right", + on_click=lambda: observability_export_dialog(test_suite), + key=f"test_suite:keys:export:{test_suite['id']}", + ) + testgen.button( + type="icon", + icon="edit", + tooltip="Edit test suite", + tooltip_position="right", + on_click=lambda: edit_test_suite_dialog(project_code, connection, table_group, test_suite), + key=f"test_suite:keys:edit:{test_suite['id']}", + ) + testgen.button( + type="icon", + icon="delete", + tooltip="Delete test suite", + tooltip_position="right", + on_click=lambda: delete_test_suite_dialog(test_suite), + key=f"test_suite:keys:delete:{test_suite['id']}", + ) + + main_section, latest_run_section, actions_section = st.columns([.4, .4, .2]) + + with main_section: + testgen.link( + label=f"{test_suite['test_ct']} tests definitions", + href="test-definitions", + right_icon="chevron_right", + key=f"test_suite:keys:go-to-definitions:{test_suite['id']}", + ) + + st.html(f""" +
+
Description
+

{test_suite['test_suite_description']}

+
+ """) + + if (latest_run_start := test_suite['latest_run_start']) and not pd.isnull(latest_run_start): + with latest_run_section: + st.html('') + st.html('
Latest Run
') + testgen.link( + label=latest_run_start.strftime("%B %d, %H:%M %p"), + href="test-runs", + right_icon="chevron_right", + style="margin-bottom: 8px;", + height=29, + key=f"test_suite:keys:go-to-runs:{test_suite['id']}", + ) + testgen.summary_bar( + items=[ + { "label": "Passed", "value": int(test_suite["last_run_passed_ct"]), "color": "green" }, + { "label": "Warnings", "value": int(test_suite["last_run_warning_ct"]), "color": "yellow" }, + { "label": "Failed", "value": int(test_suite["last_run_failed_ct"]), "color": "red" }, + { "label": "Errors", "value": int(test_suite["last_run_error_ct"]), "color": "grey" }, + ], + height=30, + width=100, + key=f"test_suite:keys:run-rummary:{test_suite['id']}", + ) + + with actions_section: + testgen.button( + type="stroked", + label="Run Tests", + on_click=lambda: run_tests_dialog(project_code, test_suite), + key=f"test_suite:keys:runtests:{test_suite['id']}", + ) + testgen.button( + type="stroked", + label="Generate Tests", + on_click=lambda: generate_tests_dialog(test_suite), + key=f"test_suite:keys:generatetests:{test_suite['id']}", + ) - fm.render_html_list( - selected_test_suite, - [ - "id", - "test_suite", - "test_suite_description", - ], - "Test Suite Information", - int_data_width=700, - ) - if not can_be_deleted: - st.markdown( - ":orange[This Test Suite has related data, which includes test definitions and may include test results. If you proceed, all related data will be permanently deleted.
Are you sure you want to proceed?]", - unsafe_allow_html=True, - ) - accept_cascade_delete = st.toggle("I accept deletion of this Test Suite and all related TestGen data.") +@st.dialog(title="Add Test Suite") +def add_test_suite_dialog(project_code, connection, table_group): + show_test_suite("add", project_code, connection, table_group) - with st.form("Delete Test Suite", clear_on_submit=True): - disable_delete_button = authentication_service.current_user_has_read_role() or ( - not can_be_deleted and not accept_cascade_delete - ) - delete = st.form_submit_button("Delete", disabled=disable_delete_button, type="primary") - if delete: - if test_suite_service.are_test_suites_in_use([test_suite_id]): - st.error("This Test Suite is in use by a running process and cannot be deleted.") - else: - test_suite_service.cascade_delete([test_suite_id]) - success_message = f"Test Suite {selected_test_suite['test_suite']} has been deleted. " - st.success(success_message) - time.sleep(1) - st.rerun() +@st.dialog(title="Edit Test Suite") +def edit_test_suite_dialog(project_code, connection, table_group, selected): + show_test_suite("edit", project_code, connection, table_group, selected) def show_test_suite(mode, project_code, connection, table_group, selected=None): connection_id = connection["connection_id"] table_group_id = table_group["id"] severity_options = ["Inherit", "Failed", "Warning"] - - selected_test_suite = selected[0] if mode == "edit" else None + selected_test_suite = selected if mode == "edit" else None if mode == "edit" and not selected_test_suite["severity"]: selected_test_suite["severity"] = severity_options[0] @@ -307,7 +190,7 @@ def show_test_suite(mode, project_code, connection, table_group, selected=None): with expander: expander_left_column, expander_right_column = st.columns([0.50, 0.50]) - with st.form("Test Suite Add / Edit", clear_on_submit=True): + with st.form("Test Suite Add / Edit", clear_on_submit=True, border=False): entity = { "id": test_suite_id, "project_code": project_code, @@ -351,10 +234,13 @@ def show_test_suite(mode, project_code, connection, table_group, selected=None): ), } - submit_button_text = "Save" if mode == "edit" else "Add" - submit = st.form_submit_button( - submit_button_text, disabled=authentication_service.current_user_has_read_role() - ) + _, button_column = st.columns([.85, .15]) + with button_column: + submit = st.form_submit_button( + "Save" if mode == "edit" else "Add", + use_container_width=True, + disabled=authentication_service.current_user_has_read_role(), + ) if submit: if " " in entity["test_suite"]: @@ -377,33 +263,82 @@ def show_test_suite(mode, project_code, connection, table_group, selected=None): st.rerun() -@st.dialog(title="Add Test Suite") -def add_test_suite_dialog(project_code, connection, table_group): - show_test_suite("add", project_code, connection, table_group) +@st.dialog(title="Delete Test Suite") +def delete_test_suite_dialog(selected_test_suite): + test_suite_id = selected_test_suite["id"] + test_suite_name = selected_test_suite["test_suite"] + can_be_deleted = test_suite_service.cascade_delete([test_suite_id], dry_run=True) + fm.render_html_list( + selected_test_suite, + [ + "id", + "test_suite", + "test_suite_description", + ], + "Test Suite Information", + int_data_width=700, + ) -@st.dialog(title="Edit Test Suite") -def edit_test_suite_dialog(project_code, connection, table_group, selected): - show_test_suite("edit", project_code, connection, table_group, selected) + if not can_be_deleted: + st.markdown( + ":orange[This Test Suite has related data, which includes test definitions and may include test results. If you proceed, all related data will be permanently deleted.
Are you sure you want to proceed?]", + unsafe_allow_html=True, + ) + accept_cascade_delete = st.toggle("I accept deletion of this Test Suite and all related TestGen data.") + + with st.form("Delete Test Suite", clear_on_submit=True, border=False): + disable_delete_button = authentication_service.current_user_has_read_role() or ( + not can_be_deleted and not accept_cascade_delete + ) + + delete = False + _, button_column = st.columns([.85, .15]) + with button_column: + delete = st.form_submit_button( + "Delete", + type="primary", + disabled=disable_delete_button, + use_container_width=True, + ) + + if delete: + if test_suite_service.are_test_suites_in_use([test_suite_id]): + st.error("This Test Suite is in use by a running process and cannot be deleted.") + else: + test_suite_service.cascade_delete([test_suite_id]) + success_message = f"Test Suite {test_suite_name} has been deleted. " + st.success(success_message) + time.sleep(1) + st.rerun() @st.dialog(title="Run Tests") def run_tests_dialog(project_code, selected_test_suite): - container = st.empty() - with container: - st.markdown(":green[**Run Tests for the Test Suite**]") + test_suite_key = selected_test_suite["test_suite"] + start_process_button_message = "Start" + + with st.container(): + st.markdown(f"Run tests for the test suite :green[{test_suite_key}]?") + + if testgen.expander_toggle(expand_label="Show CLI command", key="test_suite:keys:run-tests-show-cli"): + st.code( + f"testgen run-tests --project-key {project_code} --test-suite-key {selected_test_suite['test_suite']}", + language="shellSession" + ) button_container = st.empty() status_container = st.empty() + run_test_button = None with button_container: - start_process_button_message = "Start" - run_test_button = st.button(start_process_button_message) + _, button_column = st.columns([.85, .15]) + with button_column: + run_test_button = st.button(start_process_button_message, use_container_width=True) if run_test_button: button_container.empty() - test_suite_key = selected_test_suite["test_suite"] status_container.info(f"Running tests for test suite {test_suite_key}") try: @@ -418,41 +353,106 @@ def run_tests_dialog(project_code, selected_test_suite): ) -@st.dialog(title="Run Tests CLI Command") -def run_tests_cli_dialog(project_code, selected_test_suite): - test_suite_name = selected_test_suite["test_suite"] - command = f"testgen run-tests --project-key {project_code} --test-suite-key {test_suite_name}" - st.code(command, language="shellSession") - - -@st.dialog(title="Generate Tests CLI Command") -def generate_tests_cli_dialog(selected_test_suite): +@st.dialog(title="Generate Tests") +def generate_tests_dialog(selected_test_suite): + test_suite_id = selected_test_suite["id"] test_suite_key = selected_test_suite["test_suite"] table_group_id = selected_test_suite["table_groups_id"] - command = f"testgen run-test-generation --table-group-id {table_group_id} --test-suite-key {test_suite_key}" - st.code(command, language="shellSession") + start_process_button_message = "Start" + with st.container(): + st.markdown(f"Execute the test generation for test suite :green[{test_suite_key}]?") -@st.dialog(title="Observability Export CLI Command") -def observability_export_cli_dialog(selected_test_suite): - test_suite_key = selected_test_suite["test_suite"] - project_key = selected_test_suite["project_code"] - command = f"testgen export-observability --project-key {project_key} --test-suite-key {test_suite_key}" - st.code(command, language="shellSession") + warning_container = st.container() + options_container = st.container() + + if testgen.expander_toggle(expand_label="Show CLI command", key="test_suite:keys:generate-tests-show-cli"): + st.code( + f"testgen run-test-generation --table-group-id {table_group_id} --test-suite-key {test_suite_key}", + language="shellSession", + ) + + button_container = st.empty() + status_container = st.empty() + + test_ct, unlocked_test_ct, unlocked_edits_ct = test_suite_service.get_test_suite_refresh_warning(test_suite_id) + if test_ct: + warning_msg = "" + counts_msg = f"\n\nAuto-Generated Tests: {test_ct}, Unlocked: {unlocked_test_ct}, Edited Unlocked: {unlocked_edits_ct}" + if unlocked_edits_ct > 0: + if unlocked_edits_ct > 1: + + warning_msg = "Manual changes have been made to auto-generated tests in this Test Suite that have not been locked. " + else: + warning_msg = "A manual change has been made to an auto-generated test in this Test Suite that has not been locked. " + elif unlocked_test_ct > 0: + warning_msg = "Auto-generated tests are present in this Test Suite that have not been locked. " + warning_msg = f"{warning_msg}Generating tests now will overwrite unlocked tests subject to auto-generation based on the latest profiling.{counts_msg}" + with warning_container: + st.warning(warning_msg) + if unlocked_edits_ct > 0: + lock_edits_button = st.button("Lock Edited Tests") + if lock_edits_button: + edits_locked = test_suite_service.lock_edited_tests(test_suite_id) + if edits_locked: + st.info("Edited tests have been successfully locked.") + + with options_container: + lst_generation_sets = test_suite_service.get_generation_set_choices() + if lst_generation_sets: + lst_generation_sets.insert(0, "(All Test Types)") + str_generation_set = st.selectbox("Generation Set", lst_generation_sets) + if str_generation_set == "(All Test Types)": + str_generation_set = "" + else: + str_generation_set = "" + + test_generation_button = None + with button_container: + _, button_column = st.columns([.85, .15]) + with button_column: + test_generation_button = st.button(start_process_button_message, use_container_width=True) + + if test_generation_button: + button_container.empty() + + table_group_id = selected_test_suite["table_groups_id"] + test_suite_key = selected_test_suite["test_suite"] + status_container.info("Executing Test Generation...") + + try: + run_test_gen_queries(table_group_id, test_suite_key, str_generation_set) + except Exception as e: + status_container.empty() + status_container.error(f"Process had errors: {e!s}.") + + status_container.empty() + status_container.success("Process has successfully finished.") @st.dialog(title="Export to Observability") def observability_export_dialog(selected_test_suite): - container = st.empty() - with container: - st.markdown(":green[**Execute the test export for the current Test Suite**]") + project_key = selected_test_suite["project_code"] + test_suite_key = selected_test_suite["test_suite"] + start_process_button_message = "Start" + + with st.container(): + st.markdown(f"Execute the test export for test suite :green[{test_suite_key}]?") + + if testgen.expander_toggle(expand_label="Show CLI command", key="test_suite:keys:export-tests-show-cli"): + st.code( + f"testgen export-observability --project-key {project_key} --test-suite-key {test_suite_key}", + language="shellSession" + ) button_container = st.empty() status_container = st.empty() + test_generation_button = None with button_container: - start_process_button_message = "Start" - test_generation_button = st.button(start_process_button_message) + _, button_column = st.columns([.85, .15]) + with button_column: + test_generation_button = st.button(start_process_button_message, use_container_width=True) if test_generation_button: button_container.empty() From e1f899eecb31345b46fd100a055b5e95377507ce Mon Sep 17 00:00:00 2001 From: Luis Trinidad Date: Thu, 29 Aug 2024 21:59:51 -0400 Subject: [PATCH 32/78] fix(ui): count of test definitions for each test suite card --- testgen/ui/queries/test_suite_queries.py | 17 ++++++++++------- testgen/ui/views/test_suites.py | 11 ++++++----- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/testgen/ui/queries/test_suite_queries.py b/testgen/ui/queries/test_suite_queries.py index a406635..7293ecb 100644 --- a/testgen/ui/queries/test_suite_queries.py +++ b/testgen/ui/queries/test_suite_queries.py @@ -21,18 +21,21 @@ def get_by_table_group(schema, project_code, table_group_id): suites.component_key, suites.component_type, suites.component_name, - COALESCE(last_run.test_ct, 0) as test_ct, - last_run.test_starttime as latest_run_start, - last_run.passed_ct as last_run_passed_ct, - last_run.warning_ct as last_run_warning_ct, - last_run.failed_ct as last_run_failed_ct, - last_run.error_ct as last_run_error_ct + COUNT(definitions.id) as test_ct, + MAX(last_run.test_starttime) as latest_run_start, + MAX(last_run.passed_ct) as last_run_passed_ct, + MAX(last_run.warning_ct) as last_run_warning_ct, + MAX(last_run.failed_ct) as last_run_failed_ct, + MAX(last_run.error_ct) as last_run_error_ct FROM {schema}.test_suites as suites LEFT OUTER JOIN ( SELECT * FROM {schema}.test_runs ORDER BY test_starttime DESC LIMIT 1 - ) AS last_run ON (last_run.project_code = suites.project_code AND last_run.test_suite = suites.test_suite) + ) AS last_run ON (last_run.test_suite_id = suites.id) + LEFT OUTER JOIN {schema}.test_definitions AS definitions + ON (definitions.test_suite_id = suites.id) WHERE suites.project_code = '{project_code}' AND suites.table_groups_id = '{table_group_id}' + GROUP BY suites.id ORDER BY suites.test_suite; """ return db.retrieve_data(sql) diff --git a/testgen/ui/views/test_suites.py b/testgen/ui/views/test_suites.py index abc26c7..5e2f9b7 100644 --- a/testgen/ui/views/test_suites.py +++ b/testgen/ui/views/test_suites.py @@ -1,5 +1,6 @@ import time import typing +from functools import partial import pandas as pd import streamlit as st @@ -76,7 +77,7 @@ def render(self, connection_id: str | None = None, table_group_id: str | None = icon="output", tooltip="Export results to observability", tooltip_position="right", - on_click=lambda: observability_export_dialog(test_suite), + on_click=partial(observability_export_dialog, test_suite), key=f"test_suite:keys:export:{test_suite['id']}", ) testgen.button( @@ -84,7 +85,7 @@ def render(self, connection_id: str | None = None, table_group_id: str | None = icon="edit", tooltip="Edit test suite", tooltip_position="right", - on_click=lambda: edit_test_suite_dialog(project_code, connection, table_group, test_suite), + on_click=partial(edit_test_suite_dialog, project_code, connection, table_group, test_suite), key=f"test_suite:keys:edit:{test_suite['id']}", ) testgen.button( @@ -92,7 +93,7 @@ def render(self, connection_id: str | None = None, table_group_id: str | None = icon="delete", tooltip="Delete test suite", tooltip_position="right", - on_click=lambda: delete_test_suite_dialog(test_suite), + on_click=partial(delete_test_suite_dialog, test_suite), key=f"test_suite:keys:delete:{test_suite['id']}", ) @@ -141,13 +142,13 @@ def render(self, connection_id: str | None = None, table_group_id: str | None = testgen.button( type="stroked", label="Run Tests", - on_click=lambda: run_tests_dialog(project_code, test_suite), + on_click=partial(run_tests_dialog, project_code, test_suite), key=f"test_suite:keys:runtests:{test_suite['id']}", ) testgen.button( type="stroked", label="Generate Tests", - on_click=lambda: generate_tests_dialog(test_suite), + on_click=partial(generate_tests_dialog, test_suite), key=f"test_suite:keys:generatetests:{test_suite['id']}", ) From ce86ab302d3c5af41d6f267f44c0e58785116e29 Mon Sep 17 00:00:00 2001 From: Luis Trinidad Date: Fri, 30 Aug 2024 10:38:59 -0400 Subject: [PATCH 33/78] misc: address linting issues --- testgen/ui/components/widgets/__init__.py | 6 +++--- testgen/ui/components/widgets/button.py | 8 ++++---- testgen/ui/components/widgets/card.py | 2 +- testgen/ui/components/widgets/link.py | 4 ++-- testgen/ui/views/test_suites.py | 14 +++++++------- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/testgen/ui/components/widgets/__init__.py b/testgen/ui/components/widgets/__init__.py index 4c40b33..ee16b62 100644 --- a/testgen/ui/components/widgets/__init__.py +++ b/testgen/ui/components/widgets/__init__.py @@ -1,9 +1,9 @@ # ruff: noqa: F401 from testgen.ui.components.widgets.breadcrumbs import breadcrumbs +from testgen.ui.components.widgets.button import button +from testgen.ui.components.widgets.card import card from testgen.ui.components.widgets.expander_toggle import expander_toggle +from testgen.ui.components.widgets.link import link from testgen.ui.components.widgets.sidebar import sidebar from testgen.ui.components.widgets.summary_bar import summary_bar -from testgen.ui.components.widgets.card import card -from testgen.ui.components.widgets.link import link -from testgen.ui.components.widgets.button import button diff --git a/testgen/ui/components/widgets/button.py b/testgen/ui/components/widgets/button.py index da28825..c248981 100644 --- a/testgen/ui/components/widgets/button.py +++ b/testgen/ui/components/widgets/button.py @@ -7,7 +7,7 @@ def button( - type: ButtonType = "basic", + type_: ButtonType = "basic", label: str | None = None, icon: str | None = None, tooltip: str | None = None, @@ -24,10 +24,10 @@ def button( :param on_click: click handler for this button """ - props = {"type": type} - if type != "icon": + props = {"type": type_} + if type_ != "icon": if not label: - raise ValueError(f"A label is required for {type} buttons") + raise ValueError(f"A label is required for {type_} buttons") props.update({"label": label}) if icon: diff --git a/testgen/ui/components/widgets/card.py b/testgen/ui/components/widgets/card.py index d43fc3c..afcd9ed 100644 --- a/testgen/ui/components/widgets/card.py +++ b/testgen/ui/components/widgets/card.py @@ -30,7 +30,7 @@ def card( header_html += f'

{title}

' if subtitle: header_html += f'{subtitle}' - header_html += '' + header_html += "" st.html(header_html) actions_column.html(f'') diff --git a/testgen/ui/components/widgets/link.py b/testgen/ui/components/widgets/link.py index 8f2dc95..0685c3f 100644 --- a/testgen/ui/components/widgets/link.py +++ b/testgen/ui/components/widgets/link.py @@ -1,7 +1,7 @@ import random -from testgen.ui.navigation.router import Router from testgen.ui.components.utils.component import component +from testgen.ui.navigation.router import Router def link( @@ -18,7 +18,7 @@ def link( key: str | None = None, ) -> None: if not key: - key = f"testgen:link:{round(random.random() * 10_000)}" + key = f"testgen:link:{round(random.random() * 10_000)}" # noqa: S311 props = { "href": href, diff --git a/testgen/ui/views/test_suites.py b/testgen/ui/views/test_suites.py index 5e2f9b7..8663b7c 100644 --- a/testgen/ui/views/test_suites.py +++ b/testgen/ui/views/test_suites.py @@ -11,11 +11,11 @@ from testgen.commands.run_execute_tests import run_execution_steps_in_background from testgen.commands.run_generate_tests import run_test_gen_queries from testgen.commands.run_observability_exporter import export_test_results +from testgen.ui.components import widgets as testgen from testgen.ui.navigation.page import Page from testgen.ui.services import connection_service, table_group_service from testgen.ui.services.string_service import empty_if_null from testgen.ui.session import session -from testgen.ui.components import widgets as testgen class TestSuitesPage(Page): @@ -73,7 +73,7 @@ def render(self, connection_id: str | None = None, table_group_id: str | None = with testgen.card(title=test_suite["test_suite"], subtitle=subtitle) as test_suite_card: with test_suite_card.actions: testgen.button( - type="icon", + type_="icon", icon="output", tooltip="Export results to observability", tooltip_position="right", @@ -81,7 +81,7 @@ def render(self, connection_id: str | None = None, table_group_id: str | None = key=f"test_suite:keys:export:{test_suite['id']}", ) testgen.button( - type="icon", + type_="icon", icon="edit", tooltip="Edit test suite", tooltip_position="right", @@ -89,7 +89,7 @@ def render(self, connection_id: str | None = None, table_group_id: str | None = key=f"test_suite:keys:edit:{test_suite['id']}", ) testgen.button( - type="icon", + type_="icon", icon="delete", tooltip="Delete test suite", tooltip_position="right", @@ -114,7 +114,7 @@ def render(self, connection_id: str | None = None, table_group_id: str | None = """) - if (latest_run_start := test_suite['latest_run_start']) and not pd.isnull(latest_run_start): + if (latest_run_start := test_suite["latest_run_start"]) and not pd.isnull(latest_run_start): with latest_run_section: st.html('') st.html('
Latest Run
') @@ -140,13 +140,13 @@ def render(self, connection_id: str | None = None, table_group_id: str | None = with actions_section: testgen.button( - type="stroked", + type_="stroked", label="Run Tests", on_click=partial(run_tests_dialog, project_code, test_suite), key=f"test_suite:keys:runtests:{test_suite['id']}", ) testgen.button( - type="stroked", + type_="stroked", label="Generate Tests", on_click=partial(generate_tests_dialog, test_suite), key=f"test_suite:keys:generatetests:{test_suite['id']}", From a0700c1332430abeb725331a9bf4f7acd4ee3470 Mon Sep 17 00:00:00 2001 From: Ricardo Boni Date: Fri, 30 Aug 2024 11:36:56 -0400 Subject: [PATCH 34/78] refactor(test_suites): Addressing code review feedback --- .../030_initialize_new_schema_structure.sql | 21 ++- .../dbsetup/060_create_standard_views.sql | 27 ++++ .../dbupgrade/0108_incremental_upgrade.sql | 70 +-------- .../dbupgrade/0109_incremental_upgrade.sql | 148 +----------------- .../dbupgrade/0111_incremental_upgrade.sql | 13 ++ 5 files changed, 61 insertions(+), 218 deletions(-) create mode 100644 testgen/template/dbupgrade/0111_incremental_upgrade.sql diff --git a/testgen/template/dbsetup/030_initialize_new_schema_structure.sql b/testgen/template/dbsetup/030_initialize_new_schema_structure.sql index db24128..cf631bf 100644 --- a/testgen/template/dbsetup/030_initialize_new_schema_structure.sql +++ b/testgen/template/dbsetup/030_initialize_new_schema_structure.sql @@ -511,9 +511,7 @@ CREATE TABLE working_agg_cat_tests ( test_measures TEXT, test_conditions TEXT, CONSTRAINT working_agg_cat_tests_trid_sn_tn_cs - PRIMARY KEY (test_run_id, schema_name, table_name, cat_sequence), - CONSTRAINT working_agg_cat_tests_test_runs_fk - FOREIGN KEY (test_run_id) REFERENCES test_runs + PRIMARY KEY (test_run_id, schema_name, table_name, cat_sequence) ); CREATE TABLE working_agg_cat_results ( @@ -524,9 +522,7 @@ CREATE TABLE working_agg_cat_results ( measure_results TEXT, test_results TEXT, CONSTRAINT working_agg_cat_results_tri_sn_tn_cs - PRIMARY KEY (test_run_id, schema_name, table_name, cat_sequence), - CONSTRAINT working_agg_cat_results_test_runs_fk - FOREIGN KEY (test_run_id) REFERENCES test_runs + PRIMARY KEY (test_run_id, schema_name, table_name, cat_sequence) ); CREATE TABLE cat_test_conditions ( @@ -592,6 +588,10 @@ CREATE TABLE tg_revision ( revision INTEGER ); +-- Index working table - ORIGINAL +CREATE INDEX working_agg_cat_tests_test_run_id_index + ON working_agg_cat_tests(test_run_id); + -- Index Connections CREATE UNIQUE INDEX uix_con_id ON connections(id); @@ -617,6 +617,9 @@ CREATE INDEX ix_ts_con ON test_suites(connection_id); -- Index test_definitions +CREATE INDEX ix_td_ts_fk + ON test_definitions(test_suite_id); + CREATE INDEX ix_td_pc_stc_tst ON test_definitions(test_suite_id, schema_name, table_name, column_name, test_type); @@ -630,6 +633,9 @@ CREATE INDEX ix_td_ts_tc ON test_definitions(test_suite_id, table_name, column_name, test_type); -- Index test_runs +CREATE INDEX ix_trun_ts_fk + ON test_runs(test_suite_id); + CREATE INDEX ix_trun_pc_ts_time ON test_runs(test_suite_id, test_starttime); @@ -640,6 +646,9 @@ CREATE INDEX ix_trun_time CREATE UNIQUE INDEX uix_tr_id ON test_results(id); +CREATE INDEX ix_tr_pc_ts + ON test_results(test_suite_id); + CREATE INDEX ix_tr_trun ON test_results(test_run_id); diff --git a/testgen/template/dbsetup/060_create_standard_views.sql b/testgen/template/dbsetup/060_create_standard_views.sql index f0b451b..2984bcf 100644 --- a/testgen/template/dbsetup/060_create_standard_views.sql +++ b/testgen/template/dbsetup/060_create_standard_views.sql @@ -80,6 +80,33 @@ GROUP BY r.id, r.project_code, cc.connection_name, r.connection_id, r.profiling_starttime, r.profiling_endtime, r.status; +DROP VIEW IF EXISTS v_test_runs; + +CREATE VIEW v_test_runs + AS +SELECT r.id as test_run_id, + p.project_code, + p.project_name, + ts.test_suite, + r.test_starttime, + TO_CHAR(r.test_endtime - r.test_starttime, 'HH24:MI:SS') as duration, + r.status, r.log_message, + COUNT(*) as test_ct, + SUM(result_code) as passed_ct, + COALESCE(SUM(CASE WHEN tr.result_status = 'Failed' THEN 1 END), 0) as failed_ct, + COALESCE(SUM(CASE WHEN tr.result_status = 'Warning' THEN 1 END), 0) as warning_ct, + r.process_id + FROM test_runs r +INNER JOIN test_suites ts + ON (r.test_suite_id = ts.id) +INNER JOIN projects p + ON (ts.project_code = p.project_code) +INNER JOIN test_results tr + ON (r.id = tr.test_run_id) +GROUP BY r.id, p.project_code, ts.test_suite, r.test_starttime, r.test_endtime, + r.process_id, r.status, r.log_message, p.project_name; + + DROP VIEW IF EXISTS v_test_results; CREATE VIEW v_test_results diff --git a/testgen/template/dbupgrade/0108_incremental_upgrade.sql b/testgen/template/dbupgrade/0108_incremental_upgrade.sql index 39f0ed3..df8b7cb 100644 --- a/testgen/template/dbupgrade/0108_incremental_upgrade.sql +++ b/testgen/template/dbupgrade/0108_incremental_upgrade.sql @@ -3,7 +3,7 @@ SET SEARCH_PATH TO {SCHEMA_NAME}; -- Step 1: Drop everything that depends on the current state DROP TABLE execution_queue; -DROP VIEW v_test_results; +DROP VIEW IF EXISTS v_test_results; ALTER TABLE test_definitions DROP CONSTRAINT test_definitions_test_suites_project_code_test_suite_fk; ALTER TABLE test_results DROP CONSTRAINT test_results_test_suites_project_code_test_suite_fk; ALTER TABLE test_suites DROP CONSTRAINT test_suites_project_code_test_suite_pk; @@ -37,71 +37,3 @@ ALTER TABLE test_definitions DROP COLUMN project_code; CREATE INDEX ix_td_pc_stc_tst ON test_definitions(test_suite_id, schema_name, table_name, column_name, test_type); - -CREATE VIEW v_test_results AS - SELECT p.project_name, - ts.test_suite, - tg.table_groups_name, - cn.connection_name, cn.project_host, cn.sql_flavor, - tt.dq_dimension, - r.schema_name, r.table_name, r.column_names, - r.test_time as test_date, - r.test_type, tt.id as test_type_id, tt.test_name_short, tt.test_name_long, - r.test_description, - tt.measure_uom, tt.measure_uom_description, - c.test_operator, - r.threshold_value::NUMERIC(16, 5) as threshold_value, - r.result_measure::NUMERIC(16, 5), - r.result_status, - r.input_parameters, - r.result_message, - CASE WHEN result_code <> 1 THEN r.severity END as severity, - CASE - WHEN result_code <> 1 THEN r.disposition - ELSE 'Passed' - END AS disposition, - r.result_code as passed_ct, - (1 - r.result_code)::INTEGER as exception_ct, - CASE - WHEN result_status = 'Warning' - AND result_message NOT ILIKE 'ERROR - TEST COLUMN MISSING%' THEN 1 - END::INTEGER as warning_ct, - CASE - WHEN result_status = 'Failed' - AND result_message NOT ILIKE 'ERROR - TEST COLUMN MISSING%' THEN 1 - END::INTEGER as failed_ct, - CASE - WHEN result_message ILIKE 'ERROR - TEST COLUMN MISSING%' THEN 1 - END AS execution_error_ct, - r.project_code, - r.table_groups_id, - r.id as test_result_id, c.id as connection_id, - r.test_suite_id, - r.test_definition_id as test_definition_id_runtime, - CASE - WHEN r.auto_gen = TRUE THEN d.id - ELSE r.test_definition_id - END as test_definition_id_current, - r.test_run_id as test_run_id, - r.auto_gen - FROM test_results r - INNER JOIN test_types tt - ON r.test_type = tt.test_type - LEFT JOIN test_definitions d - ON r.test_suite_id = d.test_suite_id - AND r.table_name = d.table_name - AND r.column_names = COALESCE(d.column_name, 'N/A') - AND r.test_type = d.test_type - AND r.auto_gen = TRUE - AND d.last_auto_gen_date IS NOT NULL - INNER JOIN test_suites ts - ON r.test_suite_id = ts.id - INNER JOIN projects p - ON r.project_code = p.project_code - INNER JOIN table_groups tg - ON r.table_groups_id = tg.id - INNER JOIN connections cn - ON tg.connection_id = cn.connection_id - LEFT JOIN cat_test_conditions c - ON cn.sql_flavor = c.sql_flavor - AND r.test_type = c.test_type; diff --git a/testgen/template/dbupgrade/0109_incremental_upgrade.sql b/testgen/template/dbupgrade/0109_incremental_upgrade.sql index 1a74941..028dcc4 100644 --- a/testgen/template/dbupgrade/0109_incremental_upgrade.sql +++ b/testgen/template/dbupgrade/0109_incremental_upgrade.sql @@ -2,14 +2,14 @@ SET SEARCH_PATH TO {SCHEMA_NAME}; -- Step 1: Drop everything that depends on the current state -DROP VIEW v_test_runs; -- Not needed, unused -DROP VIEW v_test_results; -DROP VIEW v_queued_observability_results; +DROP VIEW IF EXISTS v_test_runs; +DROP VIEW IF EXISTS v_test_results; +DROP VIEW IF EXISTS v_queued_observability_results; DROP INDEX cix_tr_pc_ts; -DROP INDEX ix_tr_pc_ts; -- Not needed, replaced by a FK +DROP INDEX ix_tr_pc_ts; DROP INDEX ix_tr_pc_sctc_tt; DROP INDEX ix_trun_pc_ts_time; -DROP INDEX working_agg_cat_tests_test_run_id_index; -- Not needed, given the column is a FK +DROP INDEX working_agg_cat_tests_test_run_id_index; -- Step 2: Adjust the tables @@ -74,141 +74,3 @@ CREATE INDEX cix_tr_pc_ts CREATE INDEX ix_trun_pc_ts_time ON test_runs(test_suite_id, test_starttime); - -CREATE VIEW v_test_results -AS -SELECT p.project_name, - ts.test_suite, - tg.table_groups_name, - cn.connection_name, cn.project_host, cn.sql_flavor, - tt.dq_dimension, - r.schema_name, r.table_name, r.column_names, - r.test_time as test_date, - r.test_type, tt.id as test_type_id, tt.test_name_short, tt.test_name_long, - r.test_description, - tt.measure_uom, tt.measure_uom_description, - c.test_operator, - r.threshold_value::NUMERIC(16, 5) as threshold_value, - r.result_measure::NUMERIC(16, 5), - r.result_status, - r.input_parameters, - r.result_message, - CASE WHEN result_code <> 1 THEN r.severity END as severity, - CASE - WHEN result_code <> 1 THEN r.disposition - ELSE 'Passed' - END as disposition, - r.result_code as passed_ct, - (1 - r.result_code)::INTEGER as exception_ct, - CASE - WHEN result_status = 'Warning' - AND result_message NOT ILIKE 'ERROR - TEST COLUMN MISSING%' THEN 1 - END::INTEGER as warning_ct, - CASE - WHEN result_status = 'Failed' - AND result_message NOT ILIKE 'ERROR - TEST COLUMN MISSING%' THEN 1 - END::INTEGER as failed_ct, - CASE - WHEN result_message ILIKE 'ERROR - TEST COLUMN MISSING%' THEN 1 - END as execution_error_ct, - p.project_code, - r.table_groups_id, - r.id as test_result_id, c.id as connection_id, - r.test_suite_id, - r.test_definition_id as test_definition_id_runtime, - CASE - WHEN r.auto_gen = TRUE THEN d.id - ELSE r.test_definition_id - END as test_definition_id_current, - r.test_run_id as test_run_id, - r.auto_gen - FROM test_results r -INNER JOIN test_types tt - ON (r.test_type = tt.test_type) -LEFT JOIN test_definitions d - ON (r.test_suite_id = d.test_suite_id - AND r.table_name = d.table_name - AND r.column_names = COALESCE(d.column_name, 'N/A') - AND r.test_type = d.test_type - AND r.auto_gen = TRUE - AND d.last_auto_gen_date IS NOT NULL) -INNER JOIN test_suites ts - ON (r.test_suite_id = ts.id) -INNER JOIN projects p - ON (ts.project_code = p.project_code) -INNER JOIN table_groups tg - ON (r.table_groups_id = tg.id) -INNER JOIN connections cn - ON (tg.connection_id = cn.connection_id) -LEFT JOIN cat_test_conditions c - ON (cn.sql_flavor = c.sql_flavor - AND r.test_type = c.test_type); - -CREATE VIEW v_queued_observability_results - AS -SELECT - p.project_name, - cn.sql_flavor as component_tool, - ts.test_suite_schema as schema, - cn.connection_name, - cn.project_db, - - CASE - WHEN tg.profile_use_sampling = 'Y' THEN tg.profile_sample_min_count - END as sample_min_count, - tg.id as group_id, - tg.profile_use_sampling = 'Y' as uses_sampling, - ts.project_code, - CASE - WHEN tg.profile_use_sampling = 'Y' THEN tg.profile_sample_percent - END as sample_percentage, - - tg.profiling_table_set, - tg.profiling_include_mask, - tg.profiling_exclude_mask, - - COALESCE(ts.component_type, 'dataset') as component_type, - COALESCE(ts.component_key, tg.id::VARCHAR) as component_key, - COALESCE(ts.component_name, tg.table_groups_name) as component_name, - - r.column_names, - r.table_name, - ts.test_suite, - ts.id AS test_suite_id, - r.input_parameters, - r.test_definition_id, - tt.test_name_short as type, - CASE - WHEN c.test_operator IN ('>', '>=') THEN d.threshold_value - END as min_threshold, - CASE - WHEN c.test_operator IN ('<', '<=') THEN d.threshold_value - END as max_threshold, - tt.test_name_long as name, - tt.test_description as description, - r.test_time as start_time, - r.test_time as end_time, - r.result_message as result_message, - tt.dq_dimension, - r.result_status, - r.result_id, - r.result_measure as metric_value, - tt.measure_uom, - tt.measure_uom_description - FROM test_results r -INNER JOIN test_types tt - ON (r.test_type = tt.test_type) -INNER JOIN test_definitions d - ON (r.test_definition_id = d.id) -INNER JOIN test_suites ts - ON r.test_suite_id = ts.id -INNER JOIN table_groups tg - ON (d.table_groups_id = tg.id) -INNER JOIN connections cn - ON (tg.connection_id = cn.connection_id) -INNER JOIN projects p - ON (ts.project_code = p.project_code) -INNER JOIN cat_test_conditions c - ON (cn.sql_flavor = c.sql_flavor - AND d.test_type = c.test_type) -WHERE r.observability_status = 'Queued'; diff --git a/testgen/template/dbupgrade/0111_incremental_upgrade.sql b/testgen/template/dbupgrade/0111_incremental_upgrade.sql new file mode 100644 index 0000000..b53065a --- /dev/null +++ b/testgen/template/dbupgrade/0111_incremental_upgrade.sql @@ -0,0 +1,13 @@ +SET SEARCH_PATH TO {SCHEMA_NAME}; + +CREATE INDEX working_agg_cat_tests_test_run_id_index + ON working_agg_cat_tests(test_run_id); + +CREATE INDEX ix_td_ts_fk + ON test_definitions(test_suite_id); + +CREATE INDEX ix_trun_ts_fk + ON test_runs(test_suite_id); + +CREATE INDEX ix_tr_pc_ts + ON test_results(test_suite_id); From 6f49f82c8c2329737850ab897b3b8f2684af43f7 Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Thu, 5 Sep 2024 02:25:18 -0400 Subject: [PATCH 35/78] feat(ui): add query parameters to all pages --- testgen/common/date_service.py | 4 +- testgen/ui/app.py | 20 +- testgen/ui/assets/style.css | 25 +- testgen/ui/bootstrap.py | 3 +- .../frontend/js/components/breadcrumbs.js | 7 +- .../components/frontend/js/components/link.js | 7 +- .../frontend/js/components/summary_bar.js | 14 +- testgen/ui/components/widgets/__init__.py | 8 + testgen/ui/components/widgets/breadcrumbs.py | 10 +- testgen/ui/components/widgets/card.py | 2 +- testgen/ui/components/widgets/link.py | 11 +- testgen/ui/components/widgets/page.py | 98 +++++ testgen/ui/components/widgets/sidebar.py | 3 +- testgen/ui/components/widgets/summary_bar.py | 3 +- testgen/ui/navigation/page.py | 15 +- testgen/ui/navigation/router.py | 8 + testgen/ui/queries/profiling_queries.py | 4 +- testgen/ui/queries/test_suite_queries.py | 44 ++- testgen/ui/services/form_service.py | 41 +- testgen/ui/services/project_service.py | 27 ++ testgen/ui/services/table_group_service.py | 2 +- .../ui/services/test_definition_service.py | 4 +- testgen/ui/services/test_suite_service.py | 9 +- testgen/ui/services/toolbar_service.py | 77 ---- testgen/ui/views/connections.py | 41 +- testgen/ui/views/login.py | 2 +- testgen/ui/views/overview.py | 6 +- testgen/ui/views/profiling_anomalies.py | 352 +++++++++--------- testgen/ui/views/profiling_results.py | 296 +++++++-------- testgen/ui/views/profiling_summary.py | 78 ++-- testgen/ui/views/project_settings.py | 23 +- testgen/ui/views/table_groups.py | 55 ++- testgen/ui/views/test_definitions.py | 328 +++++----------- testgen/ui/views/test_results.py | 218 +++++------ testgen/ui/views/test_runs.py | 109 +++--- testgen/ui/views/test_suites.py | 101 ++--- 36 files changed, 969 insertions(+), 1086 deletions(-) create mode 100644 testgen/ui/components/widgets/page.py create mode 100644 testgen/ui/services/project_service.py delete mode 100644 testgen/ui/services/toolbar_service.py diff --git a/testgen/common/date_service.py b/testgen/common/date_service.py index 510bbdc..e0e37f2 100644 --- a/testgen/common/date_service.py +++ b/testgen/common/date_service.py @@ -52,13 +52,13 @@ def create_timezoned_column_in_dataframe(streamlit_session, df, new_column_name, ) -def get_timezoned_timestamp(streamlit_session, value): +def get_timezoned_timestamp(streamlit_session, value, dateformat="%b %-d, %-I:%M %p"): ret = None if value and "browser_timezone" in streamlit_session: data = {"value": [value]} df = pd.DataFrame(data) timezone = streamlit_session["browser_timezone"] - df["value"] = df["value"].dt.tz_localize("UTC").dt.tz_convert(timezone).dt.strftime("%Y-%m-%d %H:%M:%S") + df["value"] = df["value"].dt.tz_localize("UTC").dt.tz_convert(timezone).dt.strftime(dateformat) ret = df.iloc[0, 0] return ret diff --git a/testgen/ui/app.py b/testgen/ui/app.py index 437f483..4308d40 100644 --- a/testgen/ui/app.py +++ b/testgen/ui/app.py @@ -8,9 +8,8 @@ from testgen.common.docker_service import check_basic_configuration from testgen.ui import bootstrap from testgen.ui.components import widgets as testgen -from testgen.ui.queries import project_queries from testgen.ui.services import database_service as db -from testgen.ui.services import javascript_service, user_session_service +from testgen.ui.services import javascript_service, project_service, user_session_service from testgen.ui.session import session @@ -33,9 +32,9 @@ def render(log_level: int = logging.INFO): session.dbschema = db.get_schema() - projects = get_projects() + projects = project_service.get_projects() if not session.project and len(projects) > 0: - set_current_project(projects[0]["code"]) + project_service.set_current_project(projects[0]["code"]) if session.authentication_status is None and not session.logging_out: user_session_service.load_user_session() @@ -68,19 +67,6 @@ def set_locale(): st.session_state["browser_timezone"] = timezone -@st.cache_data(show_spinner=False) -def get_projects(): - projects = project_queries.get_projects() - projects = [ - {"code": project["project_code"], "name": project["project_name"]} for project in projects.to_dict("records") - ] - - return projects - -def set_current_project(project_code: str) -> None: - session.project = project_code - - def get_image_path(path: str) -> str: return str(Path(__file__).parent / path) diff --git a/testgen/ui/assets/style.css b/testgen/ui/assets/style.css index 61396ae..c3a39c5 100644 --- a/testgen/ui/assets/style.css +++ b/testgen/ui/assets/style.css @@ -59,6 +59,14 @@ section[data-testid="stSidebar"] { section.main > :nth-child(1 of div).block-container { padding: 24px; } + +div[data-testid="stVerticalBlock"] { + gap: 0.5rem; +} + +div[data-testid="collapsedControl"] { + top: 0.5rem; +} /* */ /* Dialog - sets the width of all st.dialog */ @@ -139,19 +147,26 @@ button[title="Show password text"] { color: var(--caption-text-color); font-style: italic; } +/* ... */ -[data-testid="column"]:has(> div[data-testid="stVerticalBlockBorderWrapper"] > div > div[data-testid="stVerticalBlock"] > div.element-container > div.stHtml > i.testgen_card-actions) [data-testid="stVerticalBlock"] { +[data-testid="column"]:has(> div[data-testid="stVerticalBlockBorderWrapper"] > div > div[data-testid="stVerticalBlock"] > div.element-container > div.stHtml > i.flex-row) [data-testid="stVerticalBlock"] { width: 100%; flex-direction: row; - justify-content: flex-end; } -[data-testid="column"]:has(> div[data-testid="stVerticalBlockBorderWrapper"] > div > div[data-testid="stVerticalBlock"] > div.element-container > div.stHtml > i.testgen_card-actions) [data-testid="stVerticalBlock"] > div[data-testid="element-container"], -[data-testid="column"]:has(> div[data-testid="stVerticalBlockBorderWrapper"] > div > div[data-testid="stVerticalBlock"] > div.element-container > div.stHtml > i.testgen_card-actions) [data-testid="stVerticalBlock"] > div[data-testid="element-container"] > div[data-testid] { +[data-testid="column"]:has(> div[data-testid="stVerticalBlockBorderWrapper"] > div > div[data-testid="stVerticalBlock"] > div.element-container > div.stHtml > i.flex-row) [data-testid="stVerticalBlock"] > div[data-testid="element-container"], +[data-testid="column"]:has(> div[data-testid="stVerticalBlockBorderWrapper"] > div > div[data-testid="stVerticalBlock"] > div.element-container > div.stHtml > i.flex-row) [data-testid="stVerticalBlock"] > div[data-testid="element-container"] > div[data-testid] { width: auto !important; max-height: 40px; } -/* ... */ + +[data-testid="column"]:has(> div[data-testid="stVerticalBlockBorderWrapper"] > div > div[data-testid="stVerticalBlock"] > div.element-container > div.stHtml > i.flex-start) [data-testid="stVerticalBlock"] { + justify-content: flex-start; +} + +[data-testid="column"]:has(> div[data-testid="stVerticalBlockBorderWrapper"] > div > div[data-testid="stVerticalBlock"] > div.element-container > div.stHtml > i.flex-end) [data-testid="stVerticalBlock"] { + justify-content: flex-end; +} [data-testid="stVerticalBlock"]:has(> div.element-container > div.stHtml > i.no-flex-gap) { gap: unset; diff --git a/testgen/ui/bootstrap.py b/testgen/ui/bootstrap.py index 1f9a8f2..03a95f5 100644 --- a/testgen/ui/bootstrap.py +++ b/testgen/ui/bootstrap.py @@ -20,7 +20,7 @@ from testgen.ui.views.profiling_summary import DataProfilingPage from testgen.ui.views.project_settings import ProjectSettingsPage from testgen.ui.views.table_groups import TableGroupsPage -from testgen.ui.views.test_definitions import TestDefinitionsPage, TestDefinitionsPageFromSuite +from testgen.ui.views.test_definitions import TestDefinitionsPage from testgen.ui.views.test_results import TestResultsPage from testgen.ui.views.test_runs import TestRunsPage from testgen.ui.views.test_suites import TestSuitesPage @@ -38,7 +38,6 @@ TableGroupsPage, TestSuitesPage, TestDefinitionsPage, - TestDefinitionsPageFromSuite, ProjectSettingsPage, ] diff --git a/testgen/ui/components/frontend/js/components/breadcrumbs.js b/testgen/ui/components/frontend/js/components/breadcrumbs.js index e8ba99e..d6976c8 100644 --- a/testgen/ui/components/frontend/js/components/breadcrumbs.js +++ b/testgen/ui/components/frontend/js/components/breadcrumbs.js @@ -2,6 +2,7 @@ * @typedef Breadcrumb * @type {object} * @property {string} path + * @property {object} params * @property {string} label * * @typedef Properties @@ -30,7 +31,7 @@ const Breadcrumbs = (/** @type Properties */ props) => { { class: 'tg-breadcrumbs' }, breadcrumbs.reduce((items, b, idx) => { const isLastItem = idx === breadcrumbs.length - 1; - items.push(a({ class: `tg-breadcrumbs--${ isLastItem ? 'current' : 'active'}`, href: `#/${b.path}`, onclick: () => navigate(b.path) }, b.label)) + items.push(a({ class: `tg-breadcrumbs--${ isLastItem ? 'current' : 'active'}`, href: `#/${b.path}`, onclick: () => navigate(b.path, b.params) }, b.label)) if (!isLastItem) { items.push(span({class: 'tg-breadcrumbs--arrow'}, '>')); } @@ -41,8 +42,8 @@ const Breadcrumbs = (/** @type Properties */ props) => { ) }; -function navigate(/** @type string */ path) { - Streamlit.sendData(path); +function navigate(/** @type string */ path, /** @type object */ params) { + Streamlit.sendData({ path, params }); return false; } diff --git a/testgen/ui/components/frontend/js/components/link.js b/testgen/ui/components/frontend/js/components/link.js index 09500ff..17463d4 100644 --- a/testgen/ui/components/frontend/js/components/link.js +++ b/testgen/ui/components/frontend/js/components/link.js @@ -2,6 +2,7 @@ * @typedef Properties * @type {object} * @property {string} href + * @property {object} params * @property {string} label * @property {boolean} underline * @property {string?} left_icon @@ -28,7 +29,7 @@ const Link = (/** @type Properties */ props) => { { class: `tg-link ${props.underline.val ? 'tg-link--underline' : ''}`, style: props.style, - onclick: () => navigate(props.href.val), + onclick: () => navigate(props.href.val, props.params.val), }, div( {class: 'tg-link--wrapper'}, @@ -50,8 +51,8 @@ const LinkIcon = ( ); }; -function navigate(href) { - Streamlit.sendData({ href }); +function navigate(href, params) { + Streamlit.sendData({ href, params }); } const stylesheet = new CSSStyleSheet(); diff --git a/testgen/ui/components/frontend/js/components/summary_bar.js b/testgen/ui/components/frontend/js/components/summary_bar.js index 6049a2b..ec67e01 100644 --- a/testgen/ui/components/frontend/js/components/summary_bar.js +++ b/testgen/ui/components/frontend/js/components/summary_bar.js @@ -8,6 +8,7 @@ * @typedef Properties * @type {object} * @property {Array.} items + * @property {string} label * @property {number} height * @property {number} width */ @@ -30,9 +31,10 @@ const SummaryBar = (/** @type Properties */ props) => { const height = props.height.val || 24; const width = props.width.val; const summaryItems = props.items.val; + const label = props.label.val; const total = summaryItems.reduce((sum, item) => sum + item.value, 0); - Streamlit.setFrameHeight(height + 24); + Streamlit.setFrameHeight(height + 24 + (label ? 24 : 0)); if (!window.testgen.loadedStylesheets.summaryBar) { document.adoptedStyleSheets.push(stylesheet); @@ -41,6 +43,12 @@ const SummaryBar = (/** @type Properties */ props) => { return div( { class: 'tg-summary-bar-wrapper' }, + () => { + return label ? div( + { class: 'tg-summary-bar--label' }, + label, + ) : null; + }, div( { class: 'tg-summary-bar', @@ -62,6 +70,10 @@ const SummaryBar = (/** @type Properties */ props) => { const stylesheet = new CSSStyleSheet(); stylesheet.replace(` +.tg-summary-bar--label { + margin-bottom: 4px; +} + .tg-summary-bar { height: 100%; display: flex; diff --git a/testgen/ui/components/widgets/__init__.py b/testgen/ui/components/widgets/__init__.py index ee16b62..7c25862 100644 --- a/testgen/ui/components/widgets/__init__.py +++ b/testgen/ui/components/widgets/__init__.py @@ -5,5 +5,13 @@ from testgen.ui.components.widgets.card import card from testgen.ui.components.widgets.expander_toggle import expander_toggle from testgen.ui.components.widgets.link import link +from testgen.ui.components.widgets.page import ( + flex_row_end, + flex_row_start, + no_flex_gap, + page_header, + toolbar_select, + whitespace, +) from testgen.ui.components.widgets.sidebar import sidebar from testgen.ui.components.widgets.summary_bar import summary_bar diff --git a/testgen/ui/components/widgets/breadcrumbs.py b/testgen/ui/components/widgets/breadcrumbs.py index f997e9a..bb258d1 100644 --- a/testgen/ui/components/widgets/breadcrumbs.py +++ b/testgen/ui/components/widgets/breadcrumbs.py @@ -1,11 +1,8 @@ -import logging import typing from testgen.ui.components.utils.component import component from testgen.ui.navigation.router import Router -LOG = logging.getLogger("testgen") - def breadcrumbs( key: str = "testgen:breadcrumbs", @@ -20,14 +17,15 @@ def breadcrumbs( :param breadcrumbs: list of dicts with label and path """ - path = component( + data = component( id_="breadcrumbs", key=key, props={"breadcrumbs": breadcrumbs}, ) - if path: - Router().navigate(to=path) + if data: + Router().navigate(to=data["path"], with_args=data["params"]) class Breadcrumb(typing.TypedDict): path: str | None + params: dict label: str diff --git a/testgen/ui/components/widgets/card.py b/testgen/ui/components/widgets/card.py index afcd9ed..97677c8 100644 --- a/testgen/ui/components/widgets/card.py +++ b/testgen/ui/components/widgets/card.py @@ -33,7 +33,7 @@ def card( header_html += "" st.html(header_html) - actions_column.html(f'') + actions_column.html(f'') yield CardContext(actions=actions_column) diff --git a/testgen/ui/components/widgets/link.py b/testgen/ui/components/widgets/link.py index 0685c3f..14511a8 100644 --- a/testgen/ui/components/widgets/link.py +++ b/testgen/ui/components/widgets/link.py @@ -1,5 +1,3 @@ -import random - from testgen.ui.components.utils.component import component from testgen.ui.navigation.router import Router @@ -8,6 +6,7 @@ def link( href: str, label: str, *, + params: dict = {}, # noqa: B006 underline: bool = True, left_icon: str | None = None, left_icon_size: float = 20.0, @@ -15,13 +14,11 @@ def link( right_icon_size: float = 20.0, height: float | None = 21.0, style: str | None = None, - key: str | None = None, + key: str = "testgen:link", ) -> None: - if not key: - key = f"testgen:link:{round(random.random() * 10_000)}" # noqa: S311 - props = { "href": href, + "params": params, "label": label, "height": height, "underline": underline, @@ -37,4 +34,4 @@ def link( clicked = component(id_="link", key=key, props=props) if clicked: - Router().navigate(to=href, with_args={}) + Router().navigate(to=href, with_args=params) diff --git a/testgen/ui/components/widgets/page.py b/testgen/ui/components/widgets/page.py new file mode 100644 index 0000000..2715aff --- /dev/null +++ b/testgen/ui/components/widgets/page.py @@ -0,0 +1,98 @@ +import pandas as pd +import streamlit as st +from streamlit.delta_generator import DeltaGenerator +from streamlit_extras.no_default_selectbox import selectbox + +from testgen.ui.components.widgets.breadcrumbs import Breadcrumb +from testgen.ui.components.widgets.breadcrumbs import breadcrumbs as tg_breadcrumbs +from testgen.ui.navigation.router import Router + + +def page_header( + title: str, + help_link:str | None = None, + breadcrumbs: list["Breadcrumb"] | None = None, +): + hcol1, hcol2 = st.columns([0.95, 0.05]) + hcol1.subheader(title, anchor=False) + if help_link: + with hcol2: + whitespace(0.8) + st.page_link(help_link, label=" ", icon=":material/help:") + + if breadcrumbs: + tg_breadcrumbs(breadcrumbs=breadcrumbs) + + st.write( + '
', + unsafe_allow_html=True, + ) + if "last_page" in st.session_state: + if title != st.session_state["last_page"]: + st.cache_data.clear() + st.session_state["last_page"] = title + + +def toolbar_select( + options: pd.DataFrame | list[str], + value_column: str | None = None, + display_column: str | None = None, + default_value = None, + required: bool = False, + bind_to_query: str | None = None, + **kwargs, +): + kwargs = {**kwargs} + + if isinstance(options, pd.DataFrame): + value_column = value_column or options.columns[0] + display_column = display_column or value_column + kwargs["options"] = options[display_column] + if default_value in options[value_column].values: + kwargs["index"] = int(options[options[value_column] == default_value].index[0]) + (0 if required else 1) + else: + kwargs["options"] = options + if default_value in options: + kwargs["index"] = options.index(default_value) + + if bind_to_query: + kwargs["key"] = kwargs.get("key", f"toolbar_select_{bind_to_query}") + + def update_query_params(): + query_value = st.session_state[kwargs["key"]] + if isinstance(options, pd.DataFrame): + query_value = options.loc[options[display_column] == query_value, value_column].iloc[0] if query_value != "---" else None + Router().set_query_params({ bind_to_query: query_value }) + + kwargs["on_change"] = update_query_params + + selected = st.selectbox(**kwargs) if required else selectbox(**kwargs) + + if selected and isinstance(options, pd.DataFrame): + return options.loc[options[display_column] == selected, value_column].iloc[0] + + return selected + + +def whitespace(size: float, container: DeltaGenerator | None = None): + _apply_html(f'
', container) + + +def flex_row_start(container: DeltaGenerator | None = None): + _apply_html('', container) + + +def flex_row_end(container: DeltaGenerator | None = None): + _apply_html('', container) + + +def no_flex_gap(container: DeltaGenerator | None = None): + _apply_html('', container) + + +def _apply_html(html: str, container: DeltaGenerator | None = None): + if container: + container.html(html) + else: + st.html(html) diff --git a/testgen/ui/components/widgets/sidebar.py b/testgen/ui/components/widgets/sidebar.py index 2a9e880..5644c98 100644 --- a/testgen/ui/components/widgets/sidebar.py +++ b/testgen/ui/components/widgets/sidebar.py @@ -32,7 +32,8 @@ def sidebar( if session.page_pending_sidebar is not None: path = session.page_pending_sidebar session.page_pending_sidebar = None - Router().navigate(to=path) + params = { "project_code": session.project } if path != "" else {} + Router().navigate(to=path, with_args=params) component( id_="sidebar", diff --git a/testgen/ui/components/widgets/summary_bar.py b/testgen/ui/components/widgets/summary_bar.py index ccc80f3..fe1576a 100644 --- a/testgen/ui/components/widgets/summary_bar.py +++ b/testgen/ui/components/widgets/summary_bar.py @@ -8,6 +8,7 @@ def summary_bar( items: list["SummaryItem"], + label: str | None = None, height: int | None = None, width: int | None = None, key: str = "testgen:summary_bar", @@ -26,7 +27,7 @@ def summary_bar( id_="summary_bar", key=key, default={}, - props={"items": items, "height": height, "width": width}, + props={"items": items, "label": label, "height": height, "width": width}, ) diff --git a/testgen/ui/navigation/page.py b/testgen/ui/navigation/page.py index c29f9c3..b7a53cc 100644 --- a/testgen/ui/navigation/page.py +++ b/testgen/ui/navigation/page.py @@ -8,6 +8,7 @@ import testgen.ui.navigation.router from testgen.ui.navigation.menu import MenuItem +from testgen.ui.services import project_service from testgen.ui.session import session CanActivateGuard = typing.Callable[[], bool | str] @@ -37,7 +38,19 @@ def _navigate(self) -> None: session.page_pending_login = self.path return self.router.navigate(to="") - self.render(**(session.current_page_args or {})) + session.current_page_args = session.current_page_args or {} + self._validate_project_query_param() + + self.render(**session.current_page_args) + + def _validate_project_query_param(self) -> None: + if self.path != "" and ":" not in self.path: + valid_project_codes = [ project["code"] for project in project_service.get_projects() ] + if session.current_page_args.get("project_code") not in valid_project_codes: # Ensure top-level pages have valid project_code + session.current_page_args.update({ "project_code": session.project}) + self.router.set_query_params({ "project_code": session.project}) + else: + session.current_page_args.pop("project_code", None) @abc.abstractmethod def render(self, **kwargs) -> None: diff --git a/testgen/ui/navigation/router.py b/testgen/ui/navigation/router.py index dc375c9..8480ec5 100644 --- a/testgen/ui/navigation/router.py +++ b/testgen/ui/navigation/router.py @@ -28,6 +28,7 @@ def run(self, hide_sidebar=False) -> None: # The default [data-testid="stSidebarNav"] element seems to be needed to keep the sidebar DOM stable # Otherwise anything custom in the sidebar randomly flickers on page navigation current_page = st.navigation(streamlit_pages, position="hidden" if hide_sidebar else "sidebar") + session.current_page_args = st.query_params # This hack is needed because the auth cookie is not retrieved on the first run # We have to store the page and wait for the second run @@ -64,3 +65,10 @@ def navigate(self, /, to: str, with_args: dict = {}) -> None: # noqa: B006 error_message = f"{to}: {e!s}" st.error(error_message) LOG.exception(error_message) + + + def set_query_params(self, with_args: dict = {}) -> None: # noqa: B006 + params = st.query_params + params.update(with_args) + params = {k: v for k, v in params.items() if v not in [None, "None", ""]} + st.query_params.from_dict(params) diff --git a/testgen/ui/queries/profiling_queries.py b/testgen/ui/queries/profiling_queries.py index d33a7e0..8f6c089 100644 --- a/testgen/ui/queries/profiling_queries.py +++ b/testgen/ui/queries/profiling_queries.py @@ -67,7 +67,7 @@ def lookup_db_parentage_from_run(str_profile_run_id): str_schema = st.session_state["dbschema"] # Define the query str_sql = f""" - SELECT profiling_starttime as profile_run_date, g.table_groups_name + SELECT profiling_starttime as profile_run_date, table_groups_id, g.table_groups_name, g.project_code FROM {str_schema}.profiling_runs pr INNER JOIN {str_schema}.table_groups g ON pr.table_groups_id = g.id @@ -75,7 +75,7 @@ def lookup_db_parentage_from_run(str_profile_run_id): """ df = db.retrieve_data(str_sql) if not df.empty: - return df.at[0, "profile_run_date"], df.at[0, "table_groups_name"] + return df.at[0, "profile_run_date"], df.at[0, "table_groups_id"], df.at[0, "table_groups_name"], df.at[0, "project_code"] @st.cache_data(show_spinner="Retrieving Data") diff --git a/testgen/ui/queries/test_suite_queries.py b/testgen/ui/queries/test_suite_queries.py index 7293ecb..57ffa16 100644 --- a/testgen/ui/queries/test_suite_queries.py +++ b/testgen/ui/queries/test_suite_queries.py @@ -5,14 +5,16 @@ @st.cache_data(show_spinner=False) -def get_by_table_group(schema, project_code, table_group_id): +def get_by_project(schema, project_code, table_group_id=None): sql = f""" SELECT suites.id::VARCHAR(50), suites.project_code, suites.test_suite, suites.connection_id::VARCHAR(50), + connections.connection_name, suites.table_groups_id::VARCHAR(50), + groups.table_groups_name, suites.test_suite_description, suites.test_action, CASE WHEN suites.severity IS NULL THEN 'Inherit' ELSE suites.severity END, @@ -22,6 +24,7 @@ def get_by_table_group(schema, project_code, table_group_id): suites.component_type, suites.component_name, COUNT(definitions.id) as test_ct, + last_run.id as latest_run_id, MAX(last_run.test_starttime) as latest_run_start, MAX(last_run.passed_ct) as last_run_passed_ct, MAX(last_run.warning_ct) as last_run_warning_ct, @@ -33,11 +36,46 @@ def get_by_table_group(schema, project_code, table_group_id): ) AS last_run ON (last_run.test_suite_id = suites.id) LEFT OUTER JOIN {schema}.test_definitions AS definitions ON (definitions.test_suite_id = suites.id) + LEFT OUTER JOIN {schema}.connections AS connections + ON (connections.connection_id = suites.connection_id) + LEFT OUTER JOIN {schema}.table_groups as groups + ON (groups.id = suites.table_groups_id) WHERE suites.project_code = '{project_code}' - AND suites.table_groups_id = '{table_group_id}' - GROUP BY suites.id + """ + + if table_group_id: + sql += f""" + AND suites.table_groups_id = '{table_group_id}' + """ + + sql += """ + GROUP BY suites.id, groups.table_groups_name, connections.connection_id, last_run.id ORDER BY suites.test_suite; """ + + return db.retrieve_data(sql) + + +@st.cache_data(show_spinner=False) +def get_by_id(schema, test_suite_id): + sql = f""" + SELECT + suites.id::VARCHAR(50), + suites.project_code, + suites.test_suite, + suites.connection_id::VARCHAR(50), + suites.table_groups_id::VARCHAR(50), + suites.test_suite_description, + suites.test_action, + CASE WHEN suites.severity IS NULL THEN 'Inherit' ELSE suites.severity END, + suites.export_to_observability, + suites.test_suite_schema, + suites.component_key, + suites.component_type, + suites.component_name + FROM {schema}.test_suites as suites + WHERE suites.id = '{test_suite_id}'; + """ return db.retrieve_data(sql) diff --git a/testgen/ui/services/form_service.py b/testgen/ui/services/form_service.py index 41d51f9..a1a56de 100644 --- a/testgen/ui/services/form_service.py +++ b/testgen/ui/services/form_service.py @@ -19,7 +19,6 @@ import testgen.common.date_service as date_service import testgen.ui.services.authentication_service as authentication_service import testgen.ui.services.database_service as db -from testgen.ui.components import widgets as testgen """ Shared rendering of UI elements @@ -260,7 +259,7 @@ def render_excel_export( df, lst_export_columns, str_export_title=None, str_caption=None, lst_wrap_columns=None, lst_column_headers=None ): - if st.button(label=":blue[**⤓**]", use_container_width=True): + if st.button(label=":material/download: Export", help="Download to Excel"): download_excel(df, lst_export_columns, str_export_title, str_caption, lst_wrap_columns, lst_column_headers) @@ -271,7 +270,7 @@ def download_excel( st.write(f'**Are you sure you want to download "{str_export_title}.xlsx"?**') st.download_button( - label="Yes", + label="Download", data=_generate_excel_export( df, lst_export_columns, str_export_title, str_caption, lst_wrap_columns, lst_column_headers ), @@ -279,10 +278,9 @@ def download_excel( mime="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", ) - def render_refresh_button(button_container): with button_container: - do_refresh = st.button(":blue[**⟳**]", help="Refresh page data", use_container_width=False) + do_refresh = st.button(":material/refresh:", help="Refresh page data", use_container_width=False) if do_refresh: reset_post_updates("Refreshing page", True, True) @@ -400,39 +398,6 @@ def reset_post_updates(str_message=None, as_toast=False, clear_cache=True, lst_c st.rerun() -def render_page_header( - str_page_title, str_help_link=None, str_description=None, lst_breadcrumbs=None, boo_show_refresh=False -): - hcol1, hcol2 = st.columns([9, 1]) - hcol1.subheader(str_page_title, anchor=False) - if str_help_link: - with hcol2: - st.caption(" ") - render_icon_link(str_help_link) - st.write( - '
', - unsafe_allow_html=True, - ) - if str_description: - st.caption(str_description) - - if "last_page" in st.session_state: - if str_page_title != st.session_state["last_page"]: - st.cache_data.clear() - st.session_state["last_page"] = str_page_title - - if lst_breadcrumbs: - if boo_show_refresh: - bcol1, bcol2, bcol3, _ = st.columns([875, 60, 60, 5]) - render_refresh_button(bcol3) - else: - bcol1, bcol2, _ = st.columns([95, 4, 1]) - with bcol1: - testgen.breadcrumbs(breadcrumbs=lst_breadcrumbs) - return bcol2 - - def render_select( str_label, df_options, str_show_column, str_return_column, boo_required=True, str_default=None, boo_disabled=False ): diff --git a/testgen/ui/services/project_service.py b/testgen/ui/services/project_service.py new file mode 100644 index 0000000..24a41ab --- /dev/null +++ b/testgen/ui/services/project_service.py @@ -0,0 +1,27 @@ +import streamlit as st + +from testgen.ui.queries import project_queries +from testgen.ui.services import query_service +from testgen.ui.session import session + + +@st.cache_data(show_spinner=False) +def get_projects(): + projects = project_queries.get_projects() + projects = [ + {"code": project["project_code"], "name": project["project_name"]} for project in projects.to_dict("records") + ] + + return projects + + +def set_current_project(project_code: str) -> None: + session.project = project_code + + +@st.cache_data(show_spinner=False) +def get_project_by_code(code: str): + if not code: + return None + return query_service.get_project_by_code(session.dbschema, code) + \ No newline at end of file diff --git a/testgen/ui/services/table_group_service.py b/testgen/ui/services/table_group_service.py index c78f1b5..57ea6bd 100644 --- a/testgen/ui/services/table_group_service.py +++ b/testgen/ui/services/table_group_service.py @@ -8,7 +8,7 @@ def get_by_id(table_group_id: str): schema = st.session_state["dbschema"] - return table_group_queries.get_by_id(schema, table_group_id) + return table_group_queries.get_by_id(schema, table_group_id).iloc[0] def get_by_connection(project_code, connection_id): diff --git a/testgen/ui/services/test_definition_service.py b/testgen/ui/services/test_definition_service.py index 036a7d7..3d7d64b 100644 --- a/testgen/ui/services/test_definition_service.py +++ b/testgen/ui/services/test_definition_service.py @@ -95,9 +95,9 @@ def validate_test(test_definition): sql_query = sql_query.replace("{DATA_SCHEMA}", schema) table_group_id = test_definition["table_groups_id"] - table_group_df = table_group_service.get_by_id(table_group_id) + table_group = table_group_service.get_by_id(table_group_id) - connection_id = table_group_df.iloc[0]["connection_id"] + connection_id = table_group["connection_id"] connection = connection_service.get_by_id(connection_id, hide_passwords=False) diff --git a/testgen/ui/services/test_suite_service.py b/testgen/ui/services/test_suite_service.py index f71e125..720695e 100644 --- a/testgen/ui/services/test_suite_service.py +++ b/testgen/ui/services/test_suite_service.py @@ -4,9 +4,14 @@ import testgen.ui.services.test_definition_service as test_definition_service -def get_by_table_group(project_code, table_group_id): +def get_by_project(project_code, table_group_id=None): schema = st.session_state["dbschema"] - return test_suite_queries.get_by_table_group(schema, project_code, table_group_id) + return test_suite_queries.get_by_project(schema, project_code, table_group_id) + + +def get_by_id(test_suite_id): + schema = st.session_state["dbschema"] + return test_suite_queries.get_by_id(schema, test_suite_id).iloc[0] def edit(test_suite): diff --git a/testgen/ui/services/toolbar_service.py b/testgen/ui/services/toolbar_service.py deleted file mode 100644 index d89f4b4..0000000 --- a/testgen/ui/services/toolbar_service.py +++ /dev/null @@ -1,77 +0,0 @@ -from time import sleep - -import streamlit as st - - -class ToolBar: - slot_count = 5 - toolbar_prompt = None - action_prompt = None - help_link = "https://docs.datakitchen.io/article/dataops-testgen-help/dataops-testgen-help" - - long_slots = None - short_slots = None - button_slots = None - status_bar = None - action_container = None - - def __init__(self, long_slot_count=5, short_slot_count=0, button_slot_count=0, prompt=None, multiline=False): - self.toolbar_prompt = prompt - - lst_slots_line2 = [] - slots_line2 = None - - # Initialize Toolbar Slots for widgets at right size ratio - lst_slots_line1 = [10] * long_slot_count - if multiline: - lst_slots_line2 = [7] * short_slot_count - lst_slots_line2 += [2] * button_slot_count - else: - lst_slots_line1 += [7] * short_slot_count - lst_slots_line1 += [2] * button_slot_count - - slots_line1 = st.columns(lst_slots_line1) - if multiline: - slots_line2 = st.columns(lst_slots_line2) - - if long_slot_count > 0: - self.long_slots = slots_line1[:long_slot_count] - if multiline: - if short_slot_count > 0: - self.short_slots = slots_line2[0:short_slot_count] - if button_slot_count > 0: - self.button_slots = slots_line2[-1 * button_slot_count :] - else: - if short_slot_count > 0: - self.short_slots = slots_line1[long_slot_count : long_slot_count + short_slot_count] - if button_slot_count > 0: - self.button_slots = slots_line1[-1 * button_slot_count :] - - # Add vertical space to short slots - for i in range(short_slot_count): - self.short_slots[i].markdown("

 
", unsafe_allow_html=True) - - # Add vertical space to button slots - for i in range(button_slot_count): - self.button_slots[i].markdown("

 
", unsafe_allow_html=True) - - self.status_bar = st.empty() - self.set_prompt() - - def set_prompt(self, str_new_prompt=None): - str_prompt = self.toolbar_prompt if str_new_prompt is None else str_new_prompt - if str_prompt: - self.toolbar_prompt = str_prompt - self.status_bar.markdown(f":green[**{str_prompt}**]") - else: - self.status_bar.empty() - - def show_status(self, str_message, str_type): - if str_type == "success": - self.status_bar.success(str_message, icon="✅") - elif str_type == "error": - self.status_bar.error(str_message, icon="❌") - elif str_type == "info": - self.status_bar.info(str_message, icon="💡") - sleep(2) - self.set_prompt() diff --git a/testgen/ui/views/connections.py b/testgen/ui/views/connections.py index 99e0aa2..31e50b6 100644 --- a/testgen/ui/views/connections.py +++ b/testgen/ui/views/connections.py @@ -3,8 +3,7 @@ import streamlit as st -import testgen.ui.services.form_service as fm -import testgen.ui.services.toolbar_service as tb +from testgen.ui.components import widgets as testgen from testgen.ui.navigation.menu import MenuItem from testgen.ui.navigation.page import Page from testgen.ui.services import connection_service @@ -21,21 +20,17 @@ class ConnectionsPage(Page): ] menu_item = MenuItem(icon="database", label="Data Configuration", order=3) - def render(self) -> None: - fm.render_page_header( + def render(self, project_code: str, **_kwargs) -> None: + dataframe = connection_service.get_connections(project_code) + connection = dataframe.iloc[0] + + testgen.page_header( "Connection", "https://docs.datakitchen.io/article/dataops-testgen-help/connect-your-database", - lst_breadcrumbs=[ - {"label": "Overview", "path": "overview"}, - {"label": "Connection", "path": None}, - ], ) - project_code = session.project - dataframe = connection_service.get_connections(project_code) - connection = dataframe.iloc[0] - - tool_bar = tb.ToolBar(long_slot_count=6, short_slot_count=0, button_slot_count=0, prompt=None) + _, actions_column = st.columns([.1, .9]) + testgen.flex_row_end(actions_column) enable_table_groups = connection["project_host"] and connection["project_db"] and connection["project_qc_schema"] @@ -44,23 +39,17 @@ def render(self) -> None: mode = "edit" show_connection_form(connection, mode, project_code) - if tool_bar.long_slots[-1].button( + if actions_column.button( + "Configure QC Utility Schema", + help="Creates the required Utility schema and related functions in the target database", + ): + create_qc_schema_dialog(connection) + + if actions_column.button( f":{'gray' if not enable_table_groups else 'green'}[Table Groups →]", help="Create or edit Table Groups for the Connection", - use_container_width=True, ): - st.session_state["connection"] = connection.to_dict() - self.router.navigate( "connections:table-groups", {"connection_id": connection["connection_id"]}, ) - - _, col2 = st.columns([70, 30]) - - if col2.button( - "Configure QC Utility Schema", - help="Creates the required Utility schema and related functions in the target database", - use_container_width=True, - ): - create_qc_schema_dialog(connection) diff --git a/testgen/ui/views/login.py b/testgen/ui/views/login.py index 728a214..13e08fa 100644 --- a/testgen/ui/views/login.py +++ b/testgen/ui/views/login.py @@ -17,7 +17,7 @@ class LoginPage(Page): lambda: not session.authentication_status or session.logging_in or "overview", ] - def render(self) -> None: + def render(self, **_kwargs) -> None: auth_data = user_session_service.get_auth_data() authenticator = stauth.Authenticate( diff --git a/testgen/ui/views/overview.py b/testgen/ui/views/overview.py index 43e2b6e..901fa7f 100644 --- a/testgen/ui/views/overview.py +++ b/testgen/ui/views/overview.py @@ -3,9 +3,9 @@ import streamlit as st +from testgen.ui.components import widgets as testgen from testgen.ui.navigation.menu import MenuItem from testgen.ui.navigation.page import Page -from testgen.ui.services import form_service from testgen.ui.session import session LOG = logging.getLogger("testgen") @@ -18,8 +18,8 @@ class OverviewPage(Page): ] menu_item = MenuItem(icon="home", label="Overview", order=0) - def render(self): - form_service.render_page_header( + def render(self, **_kwargs): + testgen.page_header( "Welcome to DataOps TestGen", "https://docs.datakitchen.io/article/dataops-testgen-help/introduction-to-dataops-testgen", ) diff --git a/testgen/ui/views/profiling_anomalies.py b/testgen/ui/views/profiling_anomalies.py index 52250b6..fe8e5c4 100644 --- a/testgen/ui/views/profiling_anomalies.py +++ b/testgen/ui/views/profiling_anomalies.py @@ -3,221 +3,207 @@ import plotly.express as px import streamlit as st +import testgen.ui.queries.profiling_queries as profiling_queries import testgen.ui.services.database_service as db import testgen.ui.services.form_service as fm import testgen.ui.services.query_service as dq -import testgen.ui.services.toolbar_service as tb +from testgen.common import date_service from testgen.ui.components import widgets as testgen from testgen.ui.navigation.page import Page +from testgen.ui.services import project_service from testgen.ui.session import session from testgen.ui.views.profiling_modal import view_profiling_button class ProfilingAnomaliesPage(Page): - path = "profiling:hygiene" + path = "profiling-runs:hygiene" can_activate: typing.ClassVar = [ lambda: session.authentication_status, + lambda: "run_id" in session.current_page_args or "profiling-runs", ] - def render(self) -> None: - export_container = fm.render_page_header( + def render(self, run_id: str, issue_class: str | None = None, **_kwargs) -> None: + run_date, _table_group_id, table_group_name, project_code = profiling_queries.lookup_db_parentage_from_run( + run_id + ) + run_date = date_service.get_timezoned_timestamp(st.session_state, run_date) + project_service.set_current_project(project_code) + + testgen.page_header( "Hygiene Issues", "https://docs.datakitchen.io/article/dataops-testgen-help/profile-anomalies", - lst_breadcrumbs=[ - {"label": "Overview", "path": "overview"}, - {"label": "Data Profiling", "path": "profiling"}, - {"label": "Hygiene Issues", "path": None}, + breadcrumbs=[ + { "label": "Profiling Runs", "path": "profiling-runs", "params": { "project_code": project_code } }, + { "label": f"{table_group_name} | {run_date}" }, ], ) - if "project" not in st.session_state: - st.write("Select a Project from the Overview page.") - else: - str_project = st.session_state["project"] + others_summary_column, pii_summary_column, _ = st.columns([.3, .3, .4]) + liklihood_filter_column, actions_column, export_button_column = st.columns([.3, .5, .2], vertical_alignment="bottom") + testgen.flex_row_end(actions_column) + testgen.flex_row_end(export_button_column) + + with liklihood_filter_column: + # Likelihood selection - optional filter + status_options = ["All Likelihoods", "Definite", "Likely", "Possible", "Potential PII"] + issue_class = testgen.toolbar_select( + options=status_options, + default_value=issue_class, + required=True, + bind_to_query="issue_class", + label="Issue Class", + ) - # Setup Toolbar - tool_bar = tb.ToolBar(3, 1, 4, None) + with actions_column: + str_help = "Toggle on to perform actions on multiple Hygiene Issues" + do_multi_select = st.toggle("Multi-Select", help=str_help) + + # Get hygiene issue list + df_pa = get_profiling_anomalies(run_id, issue_class) + + # Retrieve disposition action (cache refreshed) + df_action = get_anomaly_disposition(run_id) + # Update action from disposition df + action_map = df_action.set_index("id")["action"].to_dict() + df_pa["action"] = df_pa["id"].map(action_map).fillna(df_pa["action"]) + + if not df_pa.empty: + summaries = get_profiling_anomaly_summary(run_id) + others_summary = [summary for summary in summaries if summary.get("type") != "PII"] + with others_summary_column: + testgen.summary_bar( + items=others_summary, + label="Hygiene Issues", + key="test_results_summary:others", + height=40, + width=400, + ) - # Look for drill-down from another page - # No need to clear -- will be sent every time page is accessed - str_drill_tg = st.session_state.get("drill_profile_tg") - str_drill_prun = st.session_state.get("drill_profile_run") + anomalies_pii_summary = [summary for summary in summaries if summary.get("type") == "PII"] + if anomalies_pii_summary: + with pii_summary_column: + testgen.summary_bar( + items=anomalies_pii_summary, + label="Potential PII", + key="test_results_summary:pii", + height=40, + width=400, + ) + # write_frequency_graph(df_pa) + + lst_show_columns = [ + "table_name", + "column_name", + "issue_likelihood", + "action", + "anomaly_name", + "detail", + ] - with tool_bar.long_slots[0]: - # Table Groups selection - df_tg = get_db_table_group_choices(str_project) - str_drill_tg_name = ( - df_tg[df_tg["id"] == str_drill_tg]["table_groups_name"].values[0] if str_drill_tg else None - ) - str_table_groups_id = fm.render_select( - "Table Group", df_tg, "table_groups_name", "id", str_default=str_drill_tg_name, boo_disabled=True + # Show main grid and retrieve selections + selected = fm.render_grid_select( + df_pa, lst_show_columns, int_height=400, do_multi_select=do_multi_select + ) + + with export_button_column: + lst_export_columns = [ + "schema_name", + "table_name", + "column_name", + "anomaly_name", + "issue_likelihood", + "anomaly_description", + "action", + "detail", + "suggested_action", + ] + lst_wrap_columns = ["anomaly_description", "suggested_action"] + fm.render_excel_export( + df_pa, lst_export_columns, "Hygiene Screen", "{TIMESTAMP}", lst_wrap_columns ) - str_profile_run_id = str_drill_prun - - with tool_bar.long_slots[1]: - # Likelihood selection - optional filter - lst_status_options = ["All Likelihoods", "Definite", "Likely", "Possible", "Potential PII"] - str_likelihood = st.selectbox("Issue Class", lst_status_options) - - with tool_bar.short_slots[0]: - str_help = "Toggle on to perform actions on multiple Hygiene Issues" - do_multi_select = st.toggle("Multi-Select", help=str_help) - - if str_table_groups_id: - # Get hygiene issue list - df_pa = get_profiling_anomalies(str_profile_run_id, str_likelihood) - - # Retrieve disposition action (cache refreshed) - df_action = get_anomaly_disposition(str_profile_run_id) - # Update action from disposition df - action_map = df_action.set_index("id")["action"].to_dict() - df_pa["action"] = df_pa["id"].map(action_map).fillna(df_pa["action"]) - - if not df_pa.empty: - others_summary_column, pii_summary_column, _ = st.columns([.3, .3, .4]) - summaries = get_profiling_anomaly_summary(str_profile_run_id) - others_summary = [summary for summary in summaries if summary.get("type") != "PII"] - with others_summary_column: - st.html("Hygiene Issues") - testgen.summary_bar( - items=others_summary, - key="test_results_summary:others", - height=40, - width=400, - ) - - anomalies_pii_summary = [summary for summary in summaries if summary.get("type") == "PII"] - if anomalies_pii_summary: - with pii_summary_column: - st.html("Potential PII") - testgen.summary_bar( - items=anomalies_pii_summary, - key="test_results_summary:pii", - height=40, - width=400, - ) - # write_frequency_graph(df_pa) - - lst_show_columns = [ - "table_name", - "column_name", - "issue_likelihood", - "action", - "anomaly_name", - "detail", - ] - # TODO: Can we reintegrate percents below: - # tool_bar.set_prompt( - # f"Hygiene Issues Found: {df_sum.at[0, 'issue_ct']} issues in {df_sum.at[0, 'column_ct']} columns, {df_sum.at[0, 'table_ct']} tables in schema {df_pa.loc[0, 'schema_name']}" - # ) - # Show main grid and retrieve selections - selected = fm.render_grid_select( - df_pa, lst_show_columns, int_height=400, do_multi_select=do_multi_select - ) + if selected: + # Always show details for last selected row + selected_row = selected[len(selected) - 1] + else: + selected_row = None - with export_container: - lst_export_columns = [ - "schema_name", + # Display hygiene issue detail for selected row + if not selected_row: + st.markdown(":orange[Select a record to see more information.]") + else: + col1, col2 = st.columns([0.7, 0.3]) + with col1: + fm.render_html_list( + selected_row, + [ + "anomaly_name", "table_name", "column_name", - "anomaly_name", - "issue_likelihood", + "column_type", "anomaly_description", - "action", "detail", + "likelihood_explanation", "suggested_action", - ] - lst_wrap_columns = ["anomaly_description", "suggested_action"] - fm.render_excel_export( - df_pa, lst_export_columns, "Hygiene Screen", "{TIMESTAMP}", lst_wrap_columns - ) - - if selected: - # Always show details for last selected row - selected_row = selected[len(selected) - 1] - else: - selected_row = None - - # Display hygiene issue detail for selected row - if not selected_row: - st.markdown(":orange[Select a record to see more information.]") - else: - col1, col2 = st.columns([0.7, 0.3]) - with col1: - fm.render_html_list( - selected_row, - [ - "anomaly_name", - "table_name", - "column_name", - "column_type", - "anomaly_description", - "detail", - "likelihood_explanation", - "suggested_action", - ], - "Hygiene Issue Detail", - int_data_width=700, - ) - with col2: - # _, v_col2 = st.columns([0.3, 0.7]) - v_col1, v_col2 = st.columns([0.5, 0.5]) - view_profiling_button( - v_col1, selected_row["table_name"], selected_row["column_name"], - str_profile_run_id=str_profile_run_id - ) - with v_col2: - if st.button( - ":green[Source Data →]", help="Review current source data for highlighted issue", use_container_width=True - ): - source_data_dialog(selected_row) - - # Need to render toolbar buttons after grid, so selection status is maintained - if tool_bar.button_slots[0].button( - "✓", help="Confirm this issue as relevant for this run", disabled=not selected - ): - fm.reset_post_updates( - do_disposition_update(selected, "Confirmed"), - as_toast=True, - clear_cache=True, - lst_cached_functions=[get_anomaly_disposition, get_profiling_anomaly_summary], - ) - if tool_bar.button_slots[1].button( - "✘", help="Dismiss this issue as not relevant for this run", disabled=not selected - ): - fm.reset_post_updates( - do_disposition_update(selected, "Dismissed"), - as_toast=True, - clear_cache=True, - lst_cached_functions=[get_anomaly_disposition, get_profiling_anomaly_summary], - ) - if tool_bar.button_slots[2].button( - "🔇", help="Mute this test to deactivate it for future runs", disabled=not selected + ], + "Hygiene Issue Detail", + int_data_width=700, + ) + with col2: + # _, v_col2 = st.columns([0.3, 0.7]) + v_col1, v_col2 = st.columns([0.5, 0.5]) + view_profiling_button( + v_col1, selected_row["table_name"], selected_row["column_name"], + str_profile_run_id=run_id + ) + with v_col2: + if st.button( + ":green[Source Data →]", help="Review current source data for highlighted issue", use_container_width=True ): - fm.reset_post_updates( - do_disposition_update(selected, "Inactive"), - as_toast=True, - clear_cache=True, - lst_cached_functions=[get_anomaly_disposition, get_profiling_anomaly_summary], - ) - if tool_bar.button_slots[3].button("↩︎", help="Clear action", disabled=not selected): - fm.reset_post_updates( - do_disposition_update(selected, "No Decision"), - as_toast=True, - clear_cache=True, - lst_cached_functions=[get_anomaly_disposition, get_profiling_anomaly_summary], - ) - else: - tool_bar.set_prompt("No Hygiene Issues Found") - - # Help Links - st.markdown( - "[Help on Hygiene Issues](https://docs.datakitchen.io/article/dataops-testgen-help/profile-anomalies)" - ) + source_data_dialog(selected_row) + + # Need to render toolbar buttons after grid, so selection status is maintained + if actions_column.button( + "✓", help="Confirm this issue as relevant for this run", disabled=not selected + ): + fm.reset_post_updates( + do_disposition_update(selected, "Confirmed"), + as_toast=True, + clear_cache=True, + lst_cached_functions=[get_anomaly_disposition, get_profiling_anomaly_summary], + ) + if actions_column.button( + "✘", help="Dismiss this issue as not relevant for this run", disabled=not selected + ): + fm.reset_post_updates( + do_disposition_update(selected, "Dismissed"), + as_toast=True, + clear_cache=True, + lst_cached_functions=[get_anomaly_disposition, get_profiling_anomaly_summary], + ) + if actions_column.button( + "🔇", help="Mute this test to deactivate it for future runs", disabled=not selected + ): + fm.reset_post_updates( + do_disposition_update(selected, "Inactive"), + as_toast=True, + clear_cache=True, + lst_cached_functions=[get_anomaly_disposition, get_profiling_anomaly_summary], + ) + if actions_column.button("↩︎", help="Clear action", disabled=not selected): + fm.reset_post_updates( + do_disposition_update(selected, "No Decision"), + as_toast=True, + clear_cache=True, + lst_cached_functions=[get_anomaly_disposition, get_profiling_anomaly_summary], + ) + else: + st.markdown(":green[**No Hygiene Issues Found**]") - # with st.sidebar: - # st.divider() + # Help Links + st.markdown( + "[Help on Hygiene Issues](https://docs.datakitchen.io/article/dataops-testgen-help/profile-anomalies)" + ) @st.cache_data(show_spinner=False) diff --git a/testgen/ui/views/profiling_results.py b/testgen/ui/views/profiling_results.py index 10ed4ad..d6445e0 100644 --- a/testgen/ui/views/profiling_results.py +++ b/testgen/ui/views/profiling_results.py @@ -4,9 +4,10 @@ import testgen.ui.queries.profiling_queries as profiling_queries import testgen.ui.services.form_service as fm -import testgen.ui.services.toolbar_service as tb from testgen.common import date_service +from testgen.ui.components import widgets as testgen from testgen.ui.navigation.page import Page +from testgen.ui.services import project_service from testgen.ui.session import session from testgen.ui.views.profiling_details import show_profiling_detail @@ -14,174 +15,149 @@ class ProfilingResultsPage(Page): - path = "profiling:results" + path = "profiling-runs:results" can_activate: typing.ClassVar = [ lambda: session.authentication_status, + lambda: "run_id" in session.current_page_args or "profiling-runs", ] - def render(self) -> None: - export_container = fm.render_page_header( + def render(self, run_id: str, table_name: str | None = None, column_name: str | None = None, **_kwargs) -> None: + run_date, table_group_id, table_group_name, project_code = profiling_queries.lookup_db_parentage_from_run( + run_id + ) + run_date = date_service.get_timezoned_timestamp(st.session_state, run_date) + project_service.set_current_project(project_code) + + testgen.page_header( "Data Profiling Results", "https://docs.datakitchen.io/article/dataops-testgen-help/investigate-profiling", - lst_breadcrumbs=[ - {"label": "Overview", "path": "overview"}, - {"label": "Data Profiling", "path": "profiling"}, - {"label": "Profiling Results", "path": None}, + breadcrumbs=[ + { "label": "Profiling Runs", "path": "profiling-runs", "params": { "project_code": project_code } }, + { "label": f"{table_group_name} | {run_date}" }, ], ) - - if "project" not in st.session_state: - st.write("Select a Project from the Overview page.") + + table_filter_column, column_filter_column, export_button_column = st.columns([.3, .3, .4], vertical_alignment="bottom") + + with table_filter_column: + # Table Name filter + df = profiling_queries.run_table_lookup_query(table_group_id) + table_name = testgen.toolbar_select( + options=df, + value_column="table_name", + default_value=table_name, + bind_to_query="table_name", + label="Table Name", + ) + + with column_filter_column: + # Column Name filter + df = profiling_queries.run_column_lookup_query(table_group_id, table_name) + column_name = testgen.toolbar_select( + options=df, + value_column="column_name", + default_value=column_name, + bind_to_query="column_name", + label="Column Name", + disabled=not table_name, + ) + + # Use SQL wildcard to match all values + if not table_name: + table_name = "%%" + if not column_name: + column_name = "%%" + + # Display main results grid + df = profiling_queries.get_profiling_detail(run_id, table_name, column_name) + show_columns = [ + "schema_name", + "table_name", + "column_name", + "column_type", + "semantic_data_type", + "anomalies", + ] + + # Show CREATE script button + if len(df) > 0 and table_name != "%%": + with st.expander("📜 **Table CREATE script with suggested datatypes**"): + st.code(generate_create_script(df), "sql") + + selected_row = fm.render_grid_select(df, show_columns) + + with export_button_column: + testgen.flex_row_end() + render_export_button(df) + + # Display profiling for selected row + if not selected_row: + st.markdown(":orange[Select a row to see profiling details.]") else: - # Retrieve State Variables - - str_project = st.session_state["project"] - # Look for drill-down from another page - if "drill_profile_run" in st.session_state: - str_profile_run_id = st.session_state["drill_profile_run"] - else: - str_profile_run_id = "" - - # Setup Toolbar - tool_bar = tb.ToolBar(4, 0, 1, None) - - # Retrieve Choices data - if str_profile_run_id: - # Lookup profiling run date and table group name from passed profile run - str_lookfor_run_date, str_lookfor_table_group = profiling_queries.lookup_db_parentage_from_run( - str_profile_run_id - ) - str_lookfor_run_date = date_service.get_timezoned_timestamp(st.session_state, str_lookfor_run_date) - else: - str_lookfor_run_date = "" - str_lookfor_table_group = "" - - with tool_bar.long_slots[0]: - # Prompt for Table Group (with passed default) - df = profiling_queries.run_table_groups_lookup_query(str_project) - str_table_groups_id = fm.render_select( - "Table Group", df, "table_groups_name", "id", True, str_lookfor_table_group, True - ) - - with tool_bar.long_slots[1]: - # Prompt for Profile Run (with passed default) - df = profiling_queries.get_db_profile_run_choices(str_table_groups_id) - date_service.create_timezoned_column_in_dataframe( - st.session_state, df, "profile_run_date_with_timezone", "profile_run_date" - ) - str_profile_run_id = fm.render_select( - "Profile Run", df, "profile_run_date_with_timezone", "id", True, str_lookfor_run_date, True - ) - - # Reset passed parameter - # st.session_state["drill_profile_run"] = None - - with tool_bar.long_slots[2]: - # Prompt for Table Name - df = profiling_queries.run_table_lookup_query(str_table_groups_id) - str_table_name = fm.render_select("Table Name", df, "table_name", "table_name", False) - - with tool_bar.long_slots[3]: - # Prompt for Column Name - if str_table_name: - df = profiling_queries.run_column_lookup_query(str_table_groups_id, str_table_name) - str_column_name = fm.render_select("Column Name", df, "column_name", "column_name", False) - if not str_column_name: - # Use SQL wildcard to match all values - str_column_name = "%%" - else: - # Use SQL wildcard to match all values - str_table_name = "%%" - str_column_name = "%%" - - # Display main results grid - if str_profile_run_id: - df = profiling_queries.get_profiling_detail(str_profile_run_id, str_table_name, str_column_name) - show_columns = [ - "schema_name", - "table_name", - "column_name", - "column_type", - "semantic_data_type", - "anomalies", - ] - - # Show CREATE script button - if len(df) > 0 and str_table_name != "%%": - with st.expander("📜 **Table CREATE script with suggested datatypes**"): - st.code(generate_create_script(df), "sql") - - selected_row = fm.render_grid_select(df, show_columns) - - with export_container: - lst_export_columns = [ - "schema_name", - "table_name", - "column_name", - "position", - "column_type", - "general_type", - "semantic_table_type", - "semantic_data_type", - "datatype_suggestion", - "anomalies", - "record_ct", - "value_ct", - "distinct_value_ct", - "top_freq_values", - "null_value_ct", - "min_length", - "max_length", - "avg_length", - "distinct_std_value_ct", - "numeric_ct", - "date_ct", - "dummy_value_ct", - "zero_length_ct", - "lead_space_ct", - "quoted_value_ct", - "includes_digit_ct", - "embedded_space_ct", - "avg_embedded_spaces", - "min_text", - "max_text", - "std_pattern_match", - "distinct_pattern_ct", - "top_patterns", - "distinct_value_hash", - "min_value", - "min_value_over_0", - "max_value", - "avg_value", - "stdev_value", - "percentile_25", - "percentile_50", - "percentile_75", - "zero_value_ct", - "fractional_sum", - "min_date", - "max_date", - "before_1yr_date_ct", - "before_5yr_date_ct", - "within_1yr_date_ct", - "within_1mo_date_ct", - "future_date_ct", - "date_days_present", - "date_weeks_present", - "date_months_present", - "boolean_true_ct", - ] - lst_wrap_columns = ["top_freq_values", "top_patterns"] - str_caption = "{TIMESTAMP}" - fm.render_excel_export(df, lst_export_columns, "Profiling Results", str_caption, lst_wrap_columns) - - # Display profiling for selected row - if not selected_row: - st.markdown(":orange[Select a row to see profiling details.]") - else: - show_profiling_detail(selected_row[0], FORM_DATA_WIDTH) - else: - st.markdown(":orange[Select a profiling run.]") + show_profiling_detail(selected_row[0], FORM_DATA_WIDTH) + + +def render_export_button(df): + export_columns = [ + "schema_name", + "table_name", + "column_name", + "position", + "column_type", + "general_type", + "semantic_table_type", + "semantic_data_type", + "datatype_suggestion", + "anomalies", + "record_ct", + "value_ct", + "distinct_value_ct", + "top_freq_values", + "null_value_ct", + "min_length", + "max_length", + "avg_length", + "distinct_std_value_ct", + "numeric_ct", + "date_ct", + "dummy_value_ct", + "zero_length_ct", + "lead_space_ct", + "quoted_value_ct", + "includes_digit_ct", + "embedded_space_ct", + "avg_embedded_spaces", + "min_text", + "max_text", + "std_pattern_match", + "distinct_pattern_ct", + "top_patterns", + "distinct_value_hash", + "min_value", + "min_value_over_0", + "max_value", + "avg_value", + "stdev_value", + "percentile_25", + "percentile_50", + "percentile_75", + "zero_value_ct", + "fractional_sum", + "min_date", + "max_date", + "before_1yr_date_ct", + "before_5yr_date_ct", + "within_1yr_date_ct", + "within_1mo_date_ct", + "future_date_ct", + "date_days_present", + "date_weeks_present", + "date_months_present", + "boolean_true_ct", + ] + wrap_columns = ["top_freq_values", "top_patterns"] + caption = "{TIMESTAMP}" + fm.render_excel_export(df, export_columns, "Profiling Results", caption, wrap_columns) def generate_create_script(df): diff --git a/testgen/ui/views/profiling_summary.py b/testgen/ui/views/profiling_summary.py index db1eb0d..6fa7dea 100644 --- a/testgen/ui/views/profiling_summary.py +++ b/testgen/ui/views/profiling_summary.py @@ -6,9 +6,9 @@ import testgen.ui.services.database_service as db import testgen.ui.services.form_service as fm import testgen.ui.services.query_service as dq -import testgen.ui.services.toolbar_service as tb from testgen.commands.run_profiling_bridge import update_profile_run_status from testgen.common import date_service +from testgen.ui.components import widgets as testgen from testgen.ui.navigation.menu import MenuItem from testgen.ui.navigation.page import Page from testgen.ui.session import session @@ -17,52 +17,51 @@ class DataProfilingPage(Page): - path = "profiling" + path = "profiling-runs" can_activate: typing.ClassVar = [ lambda: session.authentication_status, ] menu_item = MenuItem(icon="problem", label="Data Profiling", order=1) - def render(self) -> None: - fm.render_page_header( + def render(self, project_code: str | None = None, table_group_id: str | None = None, **_kwargs) -> None: + project_code = project_code or session.project + + testgen.page_header( "Profiling Runs", "https://docs.datakitchen.io/article/dataops-testgen-help/investigate-profiling", - lst_breadcrumbs=[ - {"label": "Overview", "path": "overview"}, - {"label": "Data Profiling", "path": None}, - ], - boo_show_refresh=True, ) - if "project" not in st.session_state: - st.write("Select a Project from the Overview page.") - else: - str_project = st.session_state["project"] - - # Setup Toolbar - tool_bar = tb.ToolBar(3, 2, 0, None) + # Setup Toolbar + group_filter_column, actions_column = st.columns([.3, .7], vertical_alignment="bottom") + testgen.flex_row_end(actions_column) - with tool_bar.long_slots[0]: - # Table Groups selection -- optional criterion - df_tg = get_db_table_group_choices(str_project) - str_table_groups_id = fm.render_select( - "Table Group", df_tg, "table_groups_name", "id", boo_required=False, str_default=None - ) + with group_filter_column: + # Table Groups selection -- optional criterion + df_tg = get_db_table_group_choices(project_code) + table_groups_id = testgen.toolbar_select( + options=df_tg, + value_column="id", + display_column="table_groups_name", + default_value=table_group_id, + bind_to_query="table_group_id", + label="Table Group", + ) - df, show_columns = get_db_profiling_runs(str_project, str_table_groups_id) + df, show_columns = get_db_profiling_runs(project_code, table_groups_id) - time_columns = ["start_time"] - date_service.accommodate_dataframe_to_timezone(df, st.session_state, time_columns) + time_columns = ["start_time"] + date_service.accommodate_dataframe_to_timezone(df, st.session_state, time_columns) - dct_selected_rows = fm.render_grid_select(df, show_columns) + dct_selected_rows = fm.render_grid_select(df, show_columns) - open_drill_downs(dct_selected_rows, tool_bar.short_slots, self.router) + open_drill_downs(dct_selected_rows, actions_column, self.router) + fm.render_refresh_button(actions_column) - if dct_selected_rows: - show_record_detail(dct_selected_rows[0]) - st.markdown(":orange[Click a button to view profiling outcomes for the selected run.]") - else: - st.markdown(":orange[Select a run to see more information.]") + if dct_selected_rows: + show_record_detail(dct_selected_rows[0]) + st.markdown(":orange[Click a button to view profiling outcomes for the selected run.]") + else: + st.markdown(":orange[Select a run to see more information.]") @st.cache_data(show_spinner=False) @@ -107,29 +106,24 @@ def get_db_profiling_runs(str_project_code, str_tg=None): return db.retrieve_data(str_sql), show_columns -def open_drill_downs(dct_selected_rows, button_slots, router): +def open_drill_downs(dct_selected_rows, container, router): dct_selected_row = None if dct_selected_rows: dct_selected_row = dct_selected_rows[0] - if button_slots[0].button( + if container.button( f":{'gray' if not dct_selected_rows else 'green'}[Profiling →]", help="Review profiling characteristics for each data column", - use_container_width=True, disabled=not dct_selected_rows, ): - st.session_state["drill_profile_run"] = dct_selected_row["profiling_run_id"] - router.navigate("profiling:results") + router.navigate("profiling-runs:results", { "run_id": dct_selected_row["profiling_run_id"] }) - if button_slots[1].button( + if container.button( f":{'gray' if not dct_selected_rows else 'green'}[Hygiene →]", help="Review potential data problems identified in profiling", - use_container_width=True, disabled=not dct_selected_rows, ): - st.session_state["drill_profile_run"] = dct_selected_row["profiling_run_id"] - st.session_state["drill_profile_tg"] = dct_selected_row["table_groups_id"] - router.navigate("profiling:hygiene") + router.navigate("profiling-runs:hygiene", { "run_id": dct_selected_row["profiling_run_id"] }) def show_record_detail(dct_selected_row): diff --git a/testgen/ui/views/project_settings.py b/testgen/ui/views/project_settings.py index 1aece63..603d104 100644 --- a/testgen/ui/views/project_settings.py +++ b/testgen/ui/views/project_settings.py @@ -3,9 +3,10 @@ import streamlit as st from testgen.commands.run_observability_exporter import test_observability_exporter +from testgen.ui.components import widgets as testgen from testgen.ui.navigation.menu import MenuItem from testgen.ui.navigation.page import Page -from testgen.ui.services import form_service, query_service +from testgen.ui.services import form_service, project_service from testgen.ui.session import session from testgen.ui.views.app_log_modal import view_log_file @@ -18,18 +19,15 @@ class ProjectSettingsPage(Page): ] menu_item = MenuItem(icon="settings", label="Settings", order=100) - def render(self) -> None: - form_service.render_page_header( + def render(self, project_code: str | None = None, **_kwargs) -> None: + project = project_service.get_project_by_code(project_code or session.project) + + testgen.page_header( "Settings", "https://docs.datakitchen.io/article/dataops-testgen-help/configuration", - lst_breadcrumbs=[ - {"label": "Overview", "path": "overview"}, - {"label": "Settings", "path": None}, - ], ) - project = get_current_project(session.project) - + testgen.whitespace(1) form_service.render_edit_form( "", project, @@ -59,13 +57,6 @@ def render(self) -> None: view_log_file(col3) -@st.cache_data(show_spinner=False) -def get_current_project(code: str): - if not code: - return None - return query_service.get_project_by_code(session.dbschema, code) - - def set_add_new_project(): session.add_project = True diff --git a/testgen/ui/views/table_groups.py b/testgen/ui/views/table_groups.py index 43bbc0c..8880c57 100644 --- a/testgen/ui/views/table_groups.py +++ b/testgen/ui/views/table_groups.py @@ -8,9 +8,10 @@ import testgen.ui.services.connection_service as connection_service import testgen.ui.services.form_service as fm import testgen.ui.services.table_group_service as table_group_service -import testgen.ui.services.toolbar_service as tb from testgen.commands.run_profiling_bridge import run_profiling_in_background +from testgen.ui.components import widgets as testgen from testgen.ui.navigation.page import Page +from testgen.ui.services import project_service from testgen.ui.services.string_service import empty_if_null from testgen.ui.session import session @@ -20,32 +21,25 @@ class TableGroupsPage(Page): can_activate: typing.ClassVar = [ lambda: authentication_service.current_user_has_admin_role() or "overview", lambda: session.authentication_status, + lambda: "connection_id" in session.current_page_args or "connections", ] - def render(self, connection_id: int | None = None) -> None: - fm.render_page_header( + def render(self, connection_id: str, **_kwargs) -> None: + connection = connection_service.get_by_id(connection_id, hide_passwords=False) + project_code = connection["project_code"] + project_service.set_current_project(project_code) + + testgen.page_header( "Table Groups", "https://docs.datakitchen.io/article/dataops-testgen-help/create-a-table-group", - lst_breadcrumbs=[ - {"label": "Overview", "path": "overview"}, - {"label": "Connections", "path": "connections"}, - {"label": "Table Groups", "path": None}, + breadcrumbs=[ + { "label": "Connections", "path": "connections", "params": { "project_code": project_code } }, + { "label": connection["connection_name"] }, ], ) - # Get page parameters from session - project_code = st.session_state["project"] - connection = ( - connection_service.get_by_id(connection_id, hide_passwords=False) - if connection_id - else st.session_state["connection"] - ) - connection_id = connection["connection_id"] - - tool_bar = tb.ToolBar(1, 5, 0, None) - - with tool_bar.long_slots[0]: - st.selectbox("Connection", [connection["connection_name"]], disabled=True) + _, actions_column = st.columns([.1, .9], vertical_alignment="bottom") + testgen.flex_row_end(actions_column) df = table_group_service.get_by_connection(project_code, connection_id) @@ -71,33 +65,30 @@ def render(self, connection_id: int | None = None) -> None: selected = fm.render_grid_select(df, show_columns, show_column_headers=show_column_headers) - if tool_bar.short_slots[1].button( - "➕ Add", help="Add a new Table Group", use_container_width=True # NOQA RUF001 + if actions_column.button( + ":material/add: Add", help="Add a new Table Group" ): add_table_group_dialog(project_code, connection) disable_buttons = selected is None - if tool_bar.short_slots[2].button( - "🖊️ Edit", help="Edit the selected Table Group", disabled=disable_buttons, use_container_width=True + if actions_column.button( + ":material/edit: Edit", help="Edit the selected Table Group", disabled=disable_buttons ): edit_table_group_dialog(project_code, connection, selected) - if tool_bar.short_slots[3].button( - "❌ Delete", help="Delete the selected Table Group", disabled=disable_buttons, use_container_width=True + if actions_column.button( + ":material/delete: Delete", help="Delete the selected Table Group", disabled=disable_buttons ): delete_table_group_dialog(selected) - if tool_bar.short_slots[4].button( + if actions_column.button( f":{'gray' if disable_buttons else 'green'}[Test Suites →]", help="Create or edit Test Suites for the selected Table Group", disabled=disable_buttons, - use_container_width=True, ): - st.session_state["table_group"] = selected[0] - self.router.navigate( - "connections:test-suites", - {"connection_id": connection_id, "table_group_id": selected[0]["id"]}, + "test-suites", + {"table_group_id": selected[0]["id"]}, ) if not selected: diff --git a/testgen/ui/views/test_definitions.py b/testgen/ui/views/test_definitions.py index e0f58cd..f88148a 100644 --- a/testgen/ui/views/test_definitions.py +++ b/testgen/ui/views/test_definitions.py @@ -7,12 +7,13 @@ import testgen.ui.services.form_service as fm import testgen.ui.services.query_service as dq +import testgen.ui.services.table_group_service as table_group_service import testgen.ui.services.test_definition_service as test_definition_service -import testgen.ui.services.toolbar_service as tb +import testgen.ui.services.test_suite_service as test_suite_service from testgen.common import date_service -from testgen.ui.navigation.menu import MenuItem +from testgen.ui.components import widgets as testgen from testgen.ui.navigation.page import Page -from testgen.ui.services import authentication_service +from testgen.ui.services import authentication_service, project_service from testgen.ui.services.string_service import empty_if_null, snake_case_to_title_case from testgen.ui.session import session from testgen.ui.views.profiling_modal import view_profiling_button @@ -21,138 +22,119 @@ class TestDefinitionsPage(Page): - path = "test-definitions" + path = "test-suites:definitions" can_activate: typing.ClassVar = [ lambda: session.authentication_status, + lambda: "test_suite_id" in session.current_page_args or "test-suites", ] - breadcrumbs: typing.ClassVar = [ - {"label": "Overview", "path": "overview"}, - {"label": "Tests Definitions", "path": None}, - ] - menu_item = MenuItem(icon="list_alt", label="Tests Definitions", order=4) - - def render(self, **_) -> None: - # Get page parameters from session - project_code = st.session_state["project"] - - connection = st.session_state["connection"] if "connection" in st.session_state.keys() else None - table_group = st.session_state["table_group"] if "table_group" in st.session_state.keys() else None - test_suite = st.session_state["test_suite"] if "test_suite" in st.session_state.keys() else None - - str_table_name = st.session_state["table_name"] if "table_name" in st.session_state.keys() else None - str_column_name = None + def render(self, test_suite_id: str, table_name: str | None = None, column_name: str | None = None, **_kwargs) -> None: + test_suite = test_suite_service.get_by_id(test_suite_id) + table_group = table_group_service.get_by_id(test_suite["table_groups_id"]) + project_code = table_group["project_code"] + project_service.set_current_project(project_code) - export_container = fm.render_page_header( + testgen.page_header( "Test Definitions", "https://docs.datakitchen.io/article/dataops-testgen-help/testgen-test-types", - lst_breadcrumbs=self.breadcrumbs, - boo_show_refresh=True, + breadcrumbs=[ + { "label": "Test Suites", "path": "test-suites", "params": { "project_code": project_code } }, + { "label": test_suite["test_suite"] }, + ], ) - tool_bar = tb.ToolBar(5, 6, 4, None, multiline=True) + table_filter_column, column_filter_column, table_actions_column = st.columns([.3, .3, .4], vertical_alignment="bottom") + testgen.flex_row_end(table_actions_column) + + actions_column, disposition_column = st.columns([.5, .5]) + testgen.flex_row_start(actions_column) + testgen.flex_row_end(disposition_column) + + with table_filter_column: + table_options = run_table_lookup_query(table_group["id"]) + table_name = testgen.toolbar_select( + options=table_options, + value_column="table_name", + default_value=table_name, + bind_to_query="table_name", + required=True, + label="Table Name", + ) + with column_filter_column: + column_options = get_column_names(table_group["id"], table_name) + column_name = testgen.toolbar_select( + options=column_options, + default_value=column_name, + bind_to_query="column_name", + label="Column Name", + disabled=not table_name, + ) - with tool_bar.long_slots[0]: - str_connection_id, connection = prompt_for_connection(session.project, connection) + with disposition_column: + str_help = "Toggle on to perform actions on multiple test definitions" + do_multi_select = st.toggle("Multi-Select", help=str_help) - # Prompt for Table Group - with tool_bar.long_slots[1]: - str_table_groups_id, str_connection_id, str_schema, table_group = prompt_for_table_group( - session.project, table_group, str_connection_id - ) + if actions_column.button( + ":material/add: Add", help="Add a new Test Definition" + ): + add_test_dialog(project_code, table_group, test_suite, table_name, column_name) - # Prompt for Test Suite - if str_table_groups_id: - with tool_bar.long_slots[2]: - str_test_suite, test_suite = prompt_for_test_suite(str_table_groups_id, test_suite) - with tool_bar.long_slots[3]: - str_table_name = prompt_for_table_name(str_table_groups_id, str_table_name) - if str_table_name: - with tool_bar.long_slots[4]: - str_column_name = prompt_for_column_name(str_table_groups_id, str_table_name) - - if str_test_suite and str_table_name: - with tool_bar.short_slots[5]: - str_help = "Toggle on to perform actions on multiple test definitions" - do_multi_select = st.toggle("Multi-Select", help=str_help) - - if tool_bar.short_slots[0].button( - "➕ Add", help="Add a new Test Definition", use_container_width=True # NOQA RUF001 - ): - add_test_dialog(project_code, table_group, test_suite, str_table_name, str_column_name) - - selected = show_test_defs_grid( - session.project, str_test_suite, str_table_name, str_column_name, do_multi_select, export_container, - str_table_groups_id - ) - - # Display buttons - if tool_bar.button_slots[0].button("✓", help="Activate for future runs", disabled=not selected): - fm.reset_post_updates( - update_test_definition(selected, "test_active", True, "Activated"), - as_toast=True, - clear_cache=True, - lst_cached_functions=[], - ) - if tool_bar.button_slots[1].button("✘", help="Inactivate Test for future runs", disabled=not selected): - fm.reset_post_updates( - update_test_definition(selected, "test_active", False, "Inactivated"), - as_toast=True, - clear_cache=True, - lst_cached_functions=[], - ) - if tool_bar.button_slots[2].button( - "🔒", help="Protect from future test generation", disabled=not selected - ): - fm.reset_post_updates( - update_test_definition(selected, "lock_refresh", True, "Locked"), - as_toast=True, - clear_cache=True, - lst_cached_functions=[], - ) - if tool_bar.button_slots[3].button( - "🔐", help="Unlock for future test generation", disabled=not selected - ): - fm.reset_post_updates( - update_test_definition(selected, "lock_refresh", False, "Unlocked"), - as_toast=True, - clear_cache=True, - lst_cached_functions=[], - ) - - if selected: - selected_test_def = selected[0] - - if tool_bar.short_slots[1].button( - "🖊️ Edit", # RUF001 - help="Edit the Test Definition", - use_container_width=True, - disabled=not selected, - ): - edit_test_dialog(project_code, table_group, test_suite, str_table_name, str_column_name, selected_test_def) - - if tool_bar.short_slots[2].button( - "❌ Delete", - help="Delete the selected Test Definition", - use_container_width=True, - disabled=not selected, - ): - delete_test_dialog(selected_test_def) + selected = show_test_defs_grid( + session.project, test_suite["test_suite"], table_name, column_name, do_multi_select, table_actions_column, + table_group["id"] + ) + fm.render_refresh_button(table_actions_column) + + # Display buttons + if disposition_column.button("✓", help="Activate for future runs", disabled=not selected): + fm.reset_post_updates( + update_test_definition(selected, "test_active", True, "Activated"), + as_toast=True, + clear_cache=True, + lst_cached_functions=[], + ) + if disposition_column.button("✘", help="Inactivate Test for future runs", disabled=not selected): + fm.reset_post_updates( + update_test_definition(selected, "test_active", False, "Inactivated"), + as_toast=True, + clear_cache=True, + lst_cached_functions=[], + ) + if disposition_column.button( + "🔒", help="Protect from future test generation", disabled=not selected + ): + fm.reset_post_updates( + update_test_definition(selected, "lock_refresh", True, "Locked"), + as_toast=True, + clear_cache=True, + lst_cached_functions=[], + ) + if disposition_column.button( + "🔐", help="Unlock for future test generation", disabled=not selected + ): + fm.reset_post_updates( + update_test_definition(selected, "lock_refresh", False, "Unlocked"), + as_toast=True, + clear_cache=True, + lst_cached_functions=[], + ) - else: - st.markdown(":orange[Select a Test Suite and Table Name to view Test Definition details.]") + if selected: + selected_test_def = selected[0] + if actions_column.button( + ":material/edit: Edit", + help="Edit the Test Definition", + disabled=not selected, + ): + edit_test_dialog(project_code, table_group, test_suite, table_name, column_name, selected_test_def) -class TestDefinitionsPageFromSuite(TestDefinitionsPage): - path = "connections:test-definitions" - breadcrumbs: typing.ClassVar = [ - {"label": "Overview", "path": "overview"}, - {"label": "Connections", "path": "connections"}, - {"label": "Table Groups", "path": "connections:table-groups"}, - {"label": "Test Suites", "path": "connections:test-suites"}, - {"label": "Test Definitions", "path": None}, - ] - menu_item = None + if actions_column.button( + ":material/delete: Delete", + help="Delete the selected Test Definition", + disabled=not selected, + ): + delete_test_dialog(selected_test_def) @st.dialog("Delete Test") @@ -200,7 +182,6 @@ def show_test_form_by_id(test_definition_id): selected_test_raw = test_definition_service.get_test_definitions(test_definition_ids=[test_definition_id]) test_definition = selected_test_raw.iloc[0].to_dict() - mode = "edit" project_code = test_definition["project_code"] table_group_id = test_definition["table_groups_id"] test_suite_name = test_definition["test_suite"] @@ -855,12 +836,6 @@ def generate_test_defs_help(str_test_type): return str_help -@st.cache_data(show_spinner=False) -def run_project_lookup_query(): - str_schema = st.session_state["dbschema"] - return dq.run_project_lookup_query(str_schema) - - @st.cache_data(show_spinner=False) def run_test_type_lookup_query(str_test_type=None, boo_show_referential=True, boo_show_table=True, boo_show_column=True, boo_show_custom=True): @@ -869,12 +844,6 @@ def run_test_type_lookup_query(str_test_type=None, boo_show_referential=True, bo boo_show_column, boo_show_custom) -@st.cache_data(show_spinner=False) -def run_connections_lookup_query(str_project_code): - str_schema = st.session_state["dbschema"] - return dq.run_connections_lookup_query(str_schema, str_project_code) - - @st.cache_data(show_spinner=False) def run_table_groups_lookup_query(str_project_code, str_connection_id=None, table_group_id=None): str_schema = st.session_state["dbschema"] @@ -899,99 +868,6 @@ def run_test_suite_lookup_query(str_table_groups_id, test_suite_name=None): return dq.run_test_suite_lookup_by_tgroup_query(str_schema, str_table_groups_id, test_suite_name) -def prompt_for_connection(str_project_code, selected_connection): - str_id = None - - df = run_connections_lookup_query(str_project_code) - lst_choices = df["connection_name"].tolist() - - if selected_connection: - connection_name = selected_connection["connection_name"] - selected_connection_index = lst_choices.index(connection_name) - else: - selected_connection_index = 0 - - str_name = st.selectbox("Connection", lst_choices, index=selected_connection_index) - if str_name: - str_id = df.loc[df["connection_name"] == str_name, "id"].iloc[0] - connection = df.loc[df["connection_name"] == str_name].iloc[0] - return str_id, connection - - -def prompt_for_table_group(str_project_code, selected_table_group, str_connection_id): - str_id = None - str_schema = None - table_group = None - - df = run_table_groups_lookup_query(str_project_code, str_connection_id) - lst_choices = df["table_groups_name"].tolist() - - table_group_name = None - if selected_table_group: - table_group_name = selected_table_group["table_groups_name"] - - if table_group_name and table_group_name in lst_choices: - selected_table_group_index = lst_choices.index(table_group_name) - else: - selected_table_group_index = 0 - - str_name = st.selectbox("Table Group", lst_choices, index=selected_table_group_index) - if str_name: - str_id = df.loc[df["table_groups_name"] == str_name, "id"].iloc[0] - str_connection_id = df.loc[df["table_groups_name"] == str_name, "connection_id"].iloc[0] - str_schema = df.loc[df["table_groups_name"] == str_name, "table_group_schema"].iloc[0] - table_group = df.loc[df["table_groups_name"] == str_name].iloc[0] - return str_id, str_connection_id, str_schema, table_group - - -def prompt_for_test_suite(str_table_groups_id, selected_test_suite): - df = run_test_suite_lookup_query(str_table_groups_id) - lst_choices = df["test_suite"].tolist() - - test_suite = None - test_suite_name = None - if selected_test_suite: - test_suite_name = selected_test_suite["test_suite"] - - if test_suite_name and test_suite_name in lst_choices: - test_suite_index = lst_choices.index(test_suite_name) - else: - test_suite_index = 0 - - str_name = st.selectbox("Test Suite", lst_choices, index=test_suite_index) - if str_name: - test_suite = df.loc[df["test_suite"] == str_name].iloc[0] - - return str_name, test_suite - - -def prompt_for_table_name(str_table_groups_id, selected_table_name): - df = run_table_lookup_query(str_table_groups_id) - lst_choices = df["table_name"].tolist() - - if selected_table_name and selected_table_name in lst_choices: - table_name_index = lst_choices.index(selected_table_name) + 1 - else: - table_name_index = 0 - - def table_name_callback(): - st.session_state["table_name"] = st.session_state.new_table_name - - str_name = selectbox( - "Table Name", lst_choices, index=table_name_index, key="new_table_name", on_change=table_name_callback - ) - - return str_name - - -def prompt_for_column_name(str_table_groups_id, str_table_name): - lst_choices = get_column_names(str_table_groups_id, str_table_name) - # Using extras selectbox to allow no entry - str_name = selectbox("Column Name", lst_choices, key="column-name-main-drop-down") - - return str_name - - def get_column_names(str_table_groups_id, str_table_name): df = run_column_lookup_query(str_table_groups_id, str_table_name) lst_choices = df["column_name"].tolist() diff --git a/testgen/ui/views/test_results.py b/testgen/ui/views/test_results.py index 8f2e815..833f82f 100644 --- a/testgen/ui/views/test_results.py +++ b/testgen/ui/views/test_results.py @@ -9,10 +9,10 @@ import testgen.ui.services.database_service as db import testgen.ui.services.form_service as fm import testgen.ui.services.query_service as dq -import testgen.ui.services.toolbar_service as tb from testgen.common import ConcatColumnList, date_service from testgen.ui.components import widgets as testgen from testgen.ui.navigation.page import Page +from testgen.ui.services import project_service from testgen.ui.services.string_service import empty_if_null from testgen.ui.session import session from testgen.ui.views.profiling_modal import view_profiling_button @@ -25,143 +25,121 @@ class TestResultsPage(Page): path = "test-runs:results" can_activate: typing.ClassVar = [ lambda: session.authentication_status, - lambda: session.project != None or "overview", + lambda: "run_id" in session.current_page_args or "test-runs", ] - def render(self) -> None: - export_container = fm.render_page_header( + def render(self, run_id: str, status: str | None = None, **_kwargs) -> None: + run_date, test_suite_name, project_code = get_drill_test_run(run_id) + run_date = date_service.get_timezoned_timestamp(st.session_state, run_date) + project_service.set_current_project(project_code) + + testgen.page_header( "Test Results", "https://docs.datakitchen.io/article/dataops-testgen-help/test-results", - lst_breadcrumbs=[ - {"label": "Overview", "path": "overview"}, - {"label": "Test Runs", "path": "test-runs"}, - {"label": "Test Results", "path": None}, + breadcrumbs=[ + { "label": "Test Runs", "path": "test-runs", "params": { "project_code": project_code } }, + { "label": f"{test_suite_name} | {run_date}" }, ], ) - str_project = st.session_state["project"] if "project" in st.session_state else None - - # Look for drill-down from another page - if "drill_test_run" in st.session_state: - str_sel_test_run = st.session_state["drill_test_run"] - else: - str_sel_test_run = None - - if not str_project: - st.write("Choose a Project from the menu.") - else: - # Setup Toolbar - tool_bar = tb.ToolBar(3, 1, 4, None) - - # Lookup Test Run - if str_sel_test_run: - df = get_drill_test_run(str_sel_test_run) - if not df.empty: - with tool_bar.long_slots[0]: - time_columns = ["test_date"] - date_service.accommodate_dataframe_to_timezone(df, st.session_state, time_columns) - df["description"] = df["test_date"] + " | " + df["test_suite_description"] - str_sel_test_run = fm.render_select( - "Test Run", df, "description", "test_run_id", boo_required=True, boo_disabled=True - ) - - if str_sel_test_run: - with tool_bar.long_slots[1]: - lst_status_options = [ - "Failures and Warnings", - "Failed Tests", - "Tests with Warnings", - "Passed Tests", - ] - str_sel_status = st.selectbox("Result Priority", lst_status_options) - - with tool_bar.short_slots[0]: - str_help = "Toggle on to perform actions on multiple results" - do_multi_select = st.toggle("Multi-Select", help=str_help) - - match str_sel_status: - case "Failures and Warnings": - str_sel_status = "'Failed','Warning'" - case "Failed Tests": - str_sel_status = "'Failed'" - case "Tests with Warnings": - str_sel_status = "'Warning'" - case "Passed Tests": - str_sel_status = "'Passed'" - - # Display main grid and retrieve selection - selected = show_result_detail(str_sel_test_run, str_sel_status, do_multi_select, export_container) - - # Need to render toolbar buttons after grid, so selection status is maintained - disable_dispo = True if not selected or str_sel_status == "'Passed'" else False - if tool_bar.button_slots[0].button( - "✓", help="Confirm this issue as relevant for this run", disabled=disable_dispo - ): - fm.reset_post_updates( - do_disposition_update(selected, "Confirmed"), - as_toast=True, - clear_cache=True, - lst_cached_functions=[get_test_disposition], - ) - if tool_bar.button_slots[1].button( - "✘", help="Dismiss this issue as not relevant for this run", disabled=disable_dispo - ): - fm.reset_post_updates( - do_disposition_update(selected, "Dismissed"), - as_toast=True, - clear_cache=True, - lst_cached_functions=[get_test_disposition], - ) - if tool_bar.button_slots[2].button( - "🔇", help="Mute this test to deactivate it for future runs", disabled=not selected - ): - fm.reset_post_updates( - do_disposition_update(selected, "Inactive"), - as_toast=True, - clear_cache=True, - lst_cached_functions=[get_test_disposition], - ) - if tool_bar.button_slots[3].button("⟲", help="Clear action", disabled=not selected): - fm.reset_post_updates( - do_disposition_update(selected, "No Decision"), - as_toast=True, - clear_cache=True, - lst_cached_functions=[get_test_disposition], - ) - - # Help Links - st.markdown( - "[Help on Test Types](https://docs.datakitchen.io/article/dataops-testgen-help/testgen-test-types)" + # Display summary bar + tests_summary = get_test_result_summary(run_id) + testgen.summary_bar(items=tests_summary, key="test_results", height=40, width=800) + + # Setup Toolbar + status_filter_column, actions_column, export_button_column = st.columns([.3, .5, .2], vertical_alignment="bottom") + testgen.flex_row_end(actions_column) + testgen.flex_row_end(export_button_column) + + with status_filter_column: + status_options = [ + "Failures and Warnings", + "Failed Tests", + "Tests with Warnings", + "Passed Tests", + ] + status = testgen.toolbar_select( + options=status_options, + default_value=status, + required=True, + bind_to_query="status", + label="Result Status", ) - # with st.sidebar: - # st.divider() - - -@st.cache_data(show_spinner=ALWAYS_SPIN) -def run_test_suite_lookup_by_project_query(str_project_code): - str_schema = st.session_state["dbschema"] - return dq.run_test_suite_lookup_by_project_query(str_schema, str_project_code) - + with actions_column: + str_help = "Toggle on to perform actions on multiple results" + do_multi_select = st.toggle("Multi-Select", help=str_help) + + match status: + case "Failures and Warnings": + status = "'Failed','Warning'" + case "Failed Tests": + status = "'Failed'" + case "Tests with Warnings": + status = "'Warning'" + case "Passed Tests": + status = "'Passed'" + + # Display main grid and retrieve selection + selected = show_result_detail(run_id, status, do_multi_select, export_button_column) + + # Need to render toolbar buttons after grid, so selection status is maintained + disable_dispo = True if not selected or status == "'Passed'" else False + if actions_column.button( + "✓", help="Confirm this issue as relevant for this run", disabled=disable_dispo + ): + fm.reset_post_updates( + do_disposition_update(selected, "Confirmed"), + as_toast=True, + clear_cache=True, + lst_cached_functions=[get_test_disposition], + ) + if actions_column.button( + "✘", help="Dismiss this issue as not relevant for this run", disabled=disable_dispo + ): + fm.reset_post_updates( + do_disposition_update(selected, "Dismissed"), + as_toast=True, + clear_cache=True, + lst_cached_functions=[get_test_disposition], + ) + if actions_column.button( + "🔇", help="Mute this test to deactivate it for future runs", disabled=not selected + ): + fm.reset_post_updates( + do_disposition_update(selected, "Inactive"), + as_toast=True, + clear_cache=True, + lst_cached_functions=[get_test_disposition], + ) + if actions_column.button("⟲", help="Clear action", disabled=not selected): + fm.reset_post_updates( + do_disposition_update(selected, "No Decision"), + as_toast=True, + clear_cache=True, + lst_cached_functions=[get_test_disposition], + ) -@st.cache_data(show_spinner=ALWAYS_SPIN) -def run_test_run_lookup_by_date(str_project_code, str_run_date): - str_schema = st.session_state["dbschema"] - return dq.run_test_run_lookup_by_date(str_schema, str_project_code, str_run_date) + # Help Links + st.markdown( + "[Help on Test Types](https://docs.datakitchen.io/article/dataops-testgen-help/testgen-test-types)" + ) @st.cache_data(show_spinner=ALWAYS_SPIN) def get_drill_test_run(str_test_run_id): str_schema = st.session_state["dbschema"] str_sql = f""" - SELECT tr.id::VARCHAR as test_run_id, - tr.test_starttime as test_date, - ts.test_suite as test_suite_description + SELECT tr.test_starttime as test_date, + ts.test_suite, + ts.project_code FROM {str_schema}.test_runs tr INNER JOIN {str_schema}.test_suites ts ON tr.test_suite_id = ts.id WHERE tr.id = '{str_test_run_id}'::UUID; """ - return db.retrieve_data(str_sql) + df = db.retrieve_data(str_sql) + if not df.empty: + return df.at[0, "test_date"], df.at[0, "test_suite"], df.at[0, "project_code"] @st.cache_data(show_spinner="Retrieving Results") @@ -574,10 +552,6 @@ def show_test_def_detail(str_test_def_id): def show_result_detail(str_run_id, str_sel_test_status, do_multi_select, export_container): - # Display summary bar - tests_summary = get_test_result_summary(str_run_id) - testgen.summary_bar(items=tests_summary, key="test_results", height=40, width=800) - # Retrieve test results (always cached, action as null) df = get_test_results(str_run_id, str_sel_test_status) # Retrieve disposition action (cache refreshed) diff --git a/testgen/ui/views/test_runs.py b/testgen/ui/views/test_runs.py index bdaa407..3f957d2 100644 --- a/testgen/ui/views/test_runs.py +++ b/testgen/ui/views/test_runs.py @@ -7,8 +7,8 @@ import testgen.ui.services.form_service as fm import testgen.ui.services.query_service as dq import testgen.ui.services.test_run_service as test_run_service -import testgen.ui.services.toolbar_service as tb from testgen.common import date_service +from testgen.ui.components import widgets as testgen from testgen.ui.navigation.menu import MenuItem from testgen.ui.navigation.page import Page from testgen.ui.session import session @@ -22,63 +22,66 @@ class TestRunsPage(Page): ] menu_item = MenuItem(icon="labs", label="Data Quality Testing", order=2) - def render(self) -> None: - fm.render_page_header( + def render(self, project_code: str | None = None, table_group_id: str | None = None, test_suite_id: str | None = None, **_kwargs) -> None: + project_code = project_code or st.session_state["project"] + + testgen.page_header( "Test Runs", "https://docs.datakitchen.io/article/dataops-testgen-help/test-results", - lst_breadcrumbs=[ - {"label": "Overview", "path": "overview"}, - {"label": "Test Runs", "path": None}, - ], - boo_show_refresh=True, ) - if "project" not in st.session_state: - st.write("You must select a Project in the Home Page.") + # Setup Toolbar + group_filter_column, suite_filter_column, actions_column = st.columns([.3, .3, .4], vertical_alignment="bottom") + testgen.flex_row_end(actions_column) + + with group_filter_column: + # Table Groups selection -- optional criterion + df_tg = get_db_table_group_choices(project_code) + table_groups_id = testgen.toolbar_select( + options=df_tg, + value_column="id", + display_column="table_groups_name", + default_value=table_group_id, + bind_to_query="table_group_id", + label="Table Group", + ) + + with suite_filter_column: + # Table Groups selection -- optional criterion + df_ts = get_db_test_suite_choices(project_code, table_groups_id) + test_suite_id = testgen.toolbar_select( + options=df_ts, + value_column="id", + display_column="test_suite", + default_value=test_suite_id, + bind_to_query="test_suite_id", + label="Test Suite", + ) + + df, show_columns = get_db_test_runs(project_code, table_groups_id, test_suite_id) + + time_columns = ["run_date"] + date_service.accommodate_dataframe_to_timezone(df, st.session_state, time_columns) + + dct_selected_rows = fm.render_grid_select(df, show_columns) + dct_selected_row = dct_selected_rows[0] if dct_selected_rows else None + + if actions_column.button( + f":{'gray' if not dct_selected_row else 'green'}[Test Results →]", + help="Review test results for the selected run", + disabled=not dct_selected_row, + ): + self.router.navigate("test-runs:results", { "run_id": dct_selected_row["test_run_id"] }) + + fm.render_refresh_button(actions_column) + + if dct_selected_rows: + open_record_detail( + dct_selected_rows[0], + ) + st.markdown(":orange[Click button to access test results for selected run.]") else: - str_project = st.session_state["project"] - - # Setup Toolbar - tool_bar = tb.ToolBar(4, 1, 0, None) - - with tool_bar.long_slots[0]: - # Table Groups selection -- optional criterion - df_tg = get_db_table_group_choices(str_project) - str_table_groups_id = fm.render_select( - "Table Group", df_tg, "table_groups_name", "id", boo_required=False, str_default=None - ) - - with tool_bar.long_slots[1]: - # Table Groups selection -- optional criterion - df_ts = get_db_test_suite_choices(str_project, str_table_groups_id) - str_test_suite_id = fm.render_select( - "Test Suite", df_ts, "test_suite_description", "id", boo_required=False, str_default=None - ) - - df, show_columns = get_db_test_runs(str_project, str_table_groups_id, str_test_suite_id) - - time_columns = ["run_date"] - date_service.accommodate_dataframe_to_timezone(df, st.session_state, time_columns) - - dct_selected_rows = fm.render_grid_select(df, show_columns) - dct_selected_row = dct_selected_rows[0] if dct_selected_rows else None - - if tool_bar.short_slots[0].button( - f":{'gray' if not dct_selected_row else 'green'}[Test Results →]", - help="Review test results for the selected run", - use_container_width=True, - disabled=not dct_selected_row, - ): - st.session_state["drill_test_run"] = dct_selected_row["test_run_id"] - self.router.navigate("test-runs:results") - - if dct_selected_rows: - open_record_detail( - dct_selected_rows[0], - ) - st.markdown(":orange[Click button to access test results for selected run.]") - else: - st.markdown(":orange[Select a run to access test results.]") + st.markdown(":orange[Select a run to access test results.]") @st.cache_data(show_spinner=False) diff --git a/testgen/ui/views/test_suites.py b/testgen/ui/views/test_suites.py index 8663b7c..7915909 100644 --- a/testgen/ui/views/test_suites.py +++ b/testgen/ui/views/test_suites.py @@ -7,69 +7,60 @@ import testgen.ui.services.authentication_service as authentication_service import testgen.ui.services.form_service as fm +import testgen.ui.services.query_service as dq import testgen.ui.services.test_suite_service as test_suite_service from testgen.commands.run_execute_tests import run_execution_steps_in_background from testgen.commands.run_generate_tests import run_test_gen_queries from testgen.commands.run_observability_exporter import export_test_results from testgen.ui.components import widgets as testgen +from testgen.ui.navigation.menu import MenuItem from testgen.ui.navigation.page import Page -from testgen.ui.services import connection_service, table_group_service from testgen.ui.services.string_service import empty_if_null from testgen.ui.session import session class TestSuitesPage(Page): - path = "connections:test-suites" + path = "test-suites" can_activate: typing.ClassVar = [ lambda: authentication_service.current_user_has_admin_role() or "overview", lambda: session.authentication_status, ] + menu_item = MenuItem(icon="list_alt", label="Test Suites", order=4) - def render(self, connection_id: str | None = None, table_group_id: str | None = None) -> None: - fm.render_page_header( + def render(self, project_code: str | None = None, table_group_id: str | None = None, **_kwargs) -> None: + project_code = st.session_state["project"] + + testgen.page_header( "Test Suites", "https://docs.datakitchen.io/article/dataops-testgen-help/create-a-test-suite", - lst_breadcrumbs=[ - {"label": "Overview", "path": "overview"}, - {"label": "Connections", "path": "connections"}, - {"label": "Table Groups", "path": "connections:table-groups"}, - {"label": "Test Suites", "path": None}, - ], ) - # Get page parameters from session - project_code = st.session_state["project"] - connection = connection_service.get_by_id(connection_id) if connection_id else st.session_state["connection"] - - table_group = st.session_state.get("table_group") - if table_group_id: - table_group = table_group_service.get_by_id(table_group_id) - table_group = table_group.iloc[0] - - connection_id = connection["connection_id"] - table_group_id = table_group["id"] - - tool_bar = st.columns([.2, .2, .4, .2], vertical_alignment="bottom") - - with tool_bar[0]: - st.selectbox("Connection", [connection["connection_name"]], disabled=True) + group_filter_column, actions_column = st.columns([.2, .8], vertical_alignment="bottom") + testgen.flex_row_end(actions_column) + + with group_filter_column: + df_tg = get_db_table_group_choices(project_code) + table_group_id = testgen.toolbar_select( + options=df_tg, + value_column="id", + display_column="table_groups_name", + default_value=table_group_id, + label="Table Group", + bind_to_query="table_group_id", + ) - with tool_bar[1]: - st.selectbox("Table Group", [table_group["table_groups_name"]], disabled=True) + df = test_suite_service.get_by_project(project_code, table_group_id) - with tool_bar[3]: + with actions_column: st.button( ":material/add: Add Test Suite", key="test_suite:keys:add", help="Add a new test suite", - use_container_width=True, - on_click=lambda: add_test_suite_dialog(project_code, connection, table_group), + on_click=lambda: add_test_suite_dialog(project_code, df_tg), ) - df = test_suite_service.get_by_table_group(project_code, table_group_id) - for _, test_suite in df.iterrows(): - subtitle = f"{connection['connection_name']} > {table_group['table_groups_name']}" + subtitle = f"{test_suite['connection_name']} > {test_suite['table_groups_name']}" with testgen.card(title=test_suite["test_suite"], subtitle=subtitle) as test_suite_card: with test_suite_card.actions: testgen.button( @@ -85,7 +76,7 @@ def render(self, connection_id: str | None = None, table_group_id: str | None = icon="edit", tooltip="Edit test suite", tooltip_position="right", - on_click=partial(edit_test_suite_dialog, project_code, connection, table_group, test_suite), + on_click=partial(edit_test_suite_dialog, project_code, df_tg, test_suite), key=f"test_suite:keys:edit:{test_suite['id']}", ) testgen.button( @@ -102,7 +93,8 @@ def render(self, connection_id: str | None = None, table_group_id: str | None = with main_section: testgen.link( label=f"{test_suite['test_ct']} tests definitions", - href="test-definitions", + href="test-suites:definitions", + params={ "test_suite_id": test_suite["id"] }, right_icon="chevron_right", key=f"test_suite:keys:go-to-definitions:{test_suite['id']}", ) @@ -116,11 +108,12 @@ def render(self, connection_id: str | None = None, table_group_id: str | None = if (latest_run_start := test_suite["latest_run_start"]) and not pd.isnull(latest_run_start): with latest_run_section: - st.html('') + testgen.no_flex_gap() st.html('
Latest Run
') testgen.link( label=latest_run_start.strftime("%B %d, %H:%M %p"), - href="test-runs", + href="test-runs:results", + params={ "run_id": str(test_suite["latest_run_id"]) }, right_icon="chevron_right", style="margin-bottom: 8px;", height=29, @@ -153,19 +146,23 @@ def render(self, connection_id: str | None = None, table_group_id: str | None = ) +@st.cache_data(show_spinner=False) +def get_db_table_group_choices(project_code): + schema = st.session_state["dbschema"] + return dq.run_table_groups_lookup_query(schema, project_code) + + @st.dialog(title="Add Test Suite") -def add_test_suite_dialog(project_code, connection, table_group): - show_test_suite("add", project_code, connection, table_group) +def add_test_suite_dialog(project_code, table_groups_df): + show_test_suite("add", project_code, table_groups_df) @st.dialog(title="Edit Test Suite") -def edit_test_suite_dialog(project_code, connection, table_group, selected): - show_test_suite("edit", project_code, connection, table_group, selected) +def edit_test_suite_dialog(project_code, table_groups_df, selected): + show_test_suite("edit", project_code, table_groups_df, selected) -def show_test_suite(mode, project_code, connection, table_group, selected=None): - connection_id = connection["connection_id"] - table_group_id = table_group["id"] +def show_test_suite(mode, project_code, table_groups_df, selected=None): severity_options = ["Inherit", "Failed", "Warning"] selected_test_suite = selected if mode == "edit" else None @@ -175,8 +172,8 @@ def show_test_suite(mode, project_code, connection, table_group, selected=None): # establish default values test_suite_id = selected_test_suite["id"] if mode == "edit" else None test_suite = empty_if_null(selected_test_suite["test_suite"]) if mode == "edit" else "" - connection_id = selected_test_suite["connection_id"] if mode == "edit" else connection_id - table_groups_id = selected_test_suite["table_groups_id"] if mode == "edit" else table_group_id + connection_id = selected_test_suite["connection_id"] if mode == "edit" else None + table_groups_id = selected_test_suite["table_groups_id"] if mode == "edit" else None test_suite_description = empty_if_null(selected_test_suite["test_suite_description"]) if mode == "edit" else "" test_action = empty_if_null(selected_test_suite["test_action"]) if mode == "edit" else "" severity_index = severity_options.index(selected_test_suite["severity"]) if mode == "edit" else 0 @@ -200,6 +197,12 @@ def show_test_suite(mode, project_code, connection, table_group, selected=None): ), "connection_id": connection_id, "table_groups_id": table_groups_id, + "table_groups_name": right_column.selectbox( + label="Table Group", + options=table_groups_df["table_groups_name"], + index=int(table_groups_df[table_groups_df["id"] == table_groups_id].index[0]) if table_groups_id else 0, + disabled=(mode != "add"), + ), "test_suite_description": left_column.text_input( label="Test Suite Description", max_chars=40, value=test_suite_description ), @@ -253,6 +256,10 @@ def show_test_suite(mode, project_code, connection, table_group, selected=None): if mode == "edit": test_suite_service.edit(entity) else: + selected_table_group_name = entity["table_groups_name"] + selected_table_group = table_groups_df[table_groups_df["table_groups_name"] == selected_table_group_name].iloc[0] + entity["connection_id"] = selected_table_group["connection_id"] + entity["table_groups_id"] = selected_table_group["id"] test_suite_service.add(entity) success_message = ( "Changes have been saved successfully. " From 4951576092cb0c500e6ae9ab8387e36d5ab10139 Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Thu, 5 Sep 2024 17:48:04 -0400 Subject: [PATCH 36/78] fix: fix bugs in query param handling and test suite query --- testgen/ui/components/widgets/page.py | 10 +++++-- testgen/ui/queries/test_suite_queries.py | 37 +++++++++++++----------- testgen/ui/views/test_suites.py | 2 +- 3 files changed, 28 insertions(+), 21 deletions(-) diff --git a/testgen/ui/components/widgets/page.py b/testgen/ui/components/widgets/page.py index 2715aff..6f429b8 100644 --- a/testgen/ui/components/widgets/page.py +++ b/testgen/ui/components/widgets/page.py @@ -54,15 +54,19 @@ def toolbar_select( else: kwargs["options"] = options if default_value in options: - kwargs["index"] = options.index(default_value) + kwargs["index"] = options.index(default_value) + (0 if required else 1) if bind_to_query: kwargs["key"] = kwargs.get("key", f"toolbar_select_{bind_to_query}") + if default_value is not None and kwargs.get("index") is None: + Router().set_query_params({ bind_to_query: None }) # Unset the query params if the current value is not valid def update_query_params(): query_value = st.session_state[kwargs["key"]] - if isinstance(options, pd.DataFrame): - query_value = options.loc[options[display_column] == query_value, value_column].iloc[0] if query_value != "---" else None + if not required and query_value == "---": + query_value = None + elif isinstance(options, pd.DataFrame): + query_value = options.loc[options[display_column] == query_value, value_column].iloc[0] Router().set_query_params({ bind_to_query: query_value }) kwargs["on_change"] = update_query_params diff --git a/testgen/ui/queries/test_suite_queries.py b/testgen/ui/queries/test_suite_queries.py index 57ffa16..48cd8b2 100644 --- a/testgen/ui/queries/test_suite_queries.py +++ b/testgen/ui/queries/test_suite_queries.py @@ -7,6 +7,10 @@ @st.cache_data(show_spinner=False) def get_by_project(schema, project_code, table_group_id=None): sql = f""" + WITH last_run_date + AS (SELECT test_suite_id, MAX(test_starttime) as test_starttime + FROM testgen.test_runs + GROUP BY test_suite_id) SELECT suites.id::VARCHAR(50), suites.project_code, @@ -23,23 +27,23 @@ def get_by_project(schema, project_code, table_group_id=None): suites.component_key, suites.component_type, suites.component_name, - COUNT(definitions.id) as test_ct, - last_run.id as latest_run_id, - MAX(last_run.test_starttime) as latest_run_start, - MAX(last_run.passed_ct) as last_run_passed_ct, - MAX(last_run.warning_ct) as last_run_warning_ct, - MAX(last_run.failed_ct) as last_run_failed_ct, - MAX(last_run.error_ct) as last_run_error_ct + last_run.id as latest_run_id, + last_run.test_starttime as latest_run_start, + last_run.passed_ct + last_run.warning_ct + last_run.failed_ct + last_run.error_ct as last_run_test_ct, + last_run.passed_ct as last_run_passed_ct, + last_run.warning_ct as last_run_warning_ct, + last_run.failed_ct as last_run_failed_ct, + last_run.error_ct as last_run_error_ct FROM {schema}.test_suites as suites - LEFT OUTER JOIN ( - SELECT * FROM {schema}.test_runs ORDER BY test_starttime DESC LIMIT 1 - ) AS last_run ON (last_run.test_suite_id = suites.id) - LEFT OUTER JOIN {schema}.test_definitions AS definitions - ON (definitions.test_suite_id = suites.id) - LEFT OUTER JOIN {schema}.connections AS connections - ON (connections.connection_id = suites.connection_id) - LEFT OUTER JOIN {schema}.table_groups as groups - ON (groups.id = suites.table_groups_id) + LEFT JOIN last_run_date lrd + ON (suites.id = lrd.test_suite_id) + LEFT JOIN {schema}.test_runs last_run + ON (lrd.test_suite_id = last_run.test_suite_id + AND lrd.test_starttime = last_run.test_starttime) + LEFT JOIN {schema}.connections AS connections + ON (connections.connection_id = suites.connection_id) + LEFT JOIN {schema}.table_groups as groups + ON (groups.id = suites.table_groups_id) WHERE suites.project_code = '{project_code}' """ @@ -49,7 +53,6 @@ def get_by_project(schema, project_code, table_group_id=None): """ sql += """ - GROUP BY suites.id, groups.table_groups_name, connections.connection_id, last_run.id ORDER BY suites.test_suite; """ diff --git a/testgen/ui/views/test_suites.py b/testgen/ui/views/test_suites.py index 7915909..caa497e 100644 --- a/testgen/ui/views/test_suites.py +++ b/testgen/ui/views/test_suites.py @@ -92,7 +92,7 @@ def render(self, project_code: str | None = None, table_group_id: str | None = N with main_section: testgen.link( - label=f"{test_suite['test_ct']} tests definitions", + label=f"{test_suite['last_run_test_ct']} tests definitions", href="test-suites:definitions", params={ "test_suite_id": test_suite["id"] }, right_icon="chevron_right", From 85436f79ce6f48fc36f1cb2d34f0d5bcdbc0129a Mon Sep 17 00:00:00 2001 From: Luis Trinidad Date: Thu, 5 Sep 2024 15:53:59 -0400 Subject: [PATCH 37/78] fix: fetch latest version from docker or pypi with proper configuration, both the UI and CLI can now fetch the latest version from dockerhub or pypi, and, optionally, authenticate the request sent to dockerhub. --- Dockerfile | 1 + testgen/__main__.py | 3 +- testgen/common/docker_service.py | 48 --------- testgen/common/version_service.py | 79 +++++++++++++++ testgen/settings.py | 42 +++++++- testgen/ui/bootstrap.py | 28 ++---- testgen/ui/session.py | 6 +- tests/unit/test_version_service.py | 150 +++++++++++++++++++++++++++++ 8 files changed, 281 insertions(+), 76 deletions(-) create mode 100644 testgen/common/version_service.py create mode 100644 tests/unit/test_version_service.py diff --git a/Dockerfile b/Dockerfile index 9c6f0ef..e436ca4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -30,6 +30,7 @@ ENV PATH="$PATH:/dk/bin:/opt/mssql-tools/bin/" ARG TESTGEN_VERSION ENV TESTGEN_VERSION=v$TESTGEN_VERSION +ENV TG_RELEASE_CHECK=docker ENV STREAMLIT_SERVER_MAX_UPLOAD_SIZE=200 diff --git a/testgen/__main__.py b/testgen/__main__.py index 234428a..285e949 100644 --- a/testgen/__main__.py +++ b/testgen/__main__.py @@ -43,6 +43,7 @@ get_tg_host, get_tg_schema, logs, + version_service, ) from testgen.utils import plugins @@ -60,7 +61,7 @@ class Configuration: @tui() @click.group( - help=f"This version: {settings.VERSION} \n\nLatest version: {docker_service.check_for_new_docker_release()} \n\nSchema revision: {get_schema_revision()}" + help=f"This version: {settings.VERSION} \n\nLatest version: {version_service.get_latest_version()} \n\nSchema revision: {get_schema_revision()}" ) @click.option( "-v", diff --git a/testgen/common/docker_service.py b/testgen/common/docker_service.py index 7b4fcfa..4d864d1 100644 --- a/testgen/common/docker_service.py +++ b/testgen/common/docker_service.py @@ -1,58 +1,10 @@ import logging -import requests - -from testgen import settings from testgen.common import get_tg_db, get_tg_host, get_tg_password, get_tg_schema, get_tg_username LOG = logging.getLogger("testgen") - -def check_for_new_docker_release() -> str: - if not settings.CHECK_FOR_LATEST_VERSION: - return "unknown" - - try: - tags = get_docker_tags() - - if len(tags) == 0: - LOG.debug("docker_service: No tags to parse, skipping check.") - return "unknown" - - ordered_tags = sorted(tags, key=lambda item: item[1], reverse=True) - latest_tag = ordered_tags[0][0] - - if latest_tag != settings.VERSION: - LOG.warning( - f"A new TestGen upgrade is available. Please update to version {latest_tag} for new features and improvements." - ) - - return latest_tag # noqa: TRY300 - except Exception: - LOG.warning("Unable to check for latest release", exc_info=True, stack_info=True) - - -def get_docker_tags(url: str = "https://hub.docker.com/v2/repositories/datakitchen/dataops-testgen/tags/"): - params = {"page_size": 25, "page": 1, "ordering": "last_updated"} - response = requests.get(url, params=params, timeout=3) - - tags_to_return = [] - if not response.status_code == 200: - LOG.warning(f"docker_service: Failed to fetch docker tags. Status code: {response.status_code}") - return tags_to_return - - tags_data = response.json() - results = tags_data.get("results", []) - for result in results: - tag_name = result["name"] - last_pushed = result["tag_last_pushed"] - if tag_name.count(".") >= 2 and "experimental" not in tag_name: - tags_to_return.append((tag_name, last_pushed)) - - return tags_to_return - - def check_basic_configuration(): ret = True message = "" diff --git a/testgen/common/version_service.py b/testgen/common/version_service.py new file mode 100644 index 0000000..c2317b1 --- /dev/null +++ b/testgen/common/version_service.py @@ -0,0 +1,79 @@ +import logging + +import requests + +from testgen import settings + +LOG = logging.getLogger("testgen") + + +def get_latest_version() -> str: + try: + return { + "pypi": _get_last_pypi_release, + "docker": _get_last_docker_release, + "yes": _get_last_docker_release, # NOTE: kept for retrocompatibility + }.get(settings.CHECK_FOR_LATEST_VERSION, lambda: "unknown")() + except: + return "unknown" + + +def _get_last_pypi_release() -> str: + response = requests.get("https://pypi.org/pypi/dataops-testgen/json", timeout=3) + if response.status_code != 200: + LOG.warning(f"version_service: Failed to fetch PyPi releases. Status code: {response.status_code}") + return "unknown" + + package_data = response.json() + package_releases = list((package_data.get("releases") or {}).keys()) + + return _sorted_tags(package_releases)[0] + + +def _get_last_docker_release() -> str: + headers = {} + if settings.DOCKER_HUB_USERNAME and settings.DOCKER_HUB_PASSWORD: + auth_response = requests.post( + "https://hub.docker.com/v2/users/login", + json={"username": settings.DOCKER_HUB_USERNAME, "password": settings.DOCKER_HUB_PASSWORD}, + timeout=5, + ) + if auth_response.status_code != 200: + LOG.warning( + "version_service: unable to login against https://hub.docker.com." + f" Status code: {auth_response.status_code}" + ) + return "unknown" + headers["Authorization"] = f"Bearer {auth_response.json()['token']}" + + response = requests.get( + f"https://hub.docker.com/v2/repositories/{settings.DOCKER_HUB_REPOSITORY}/tags", + headers=headers, + params={"page_size": 25, "page": 1, "ordering": "last_updated"}, + timeout=3, + ) + + if response.status_code != 200: + LOG.warning(f"version_service: Failed to fetch docker tags. Status code: {response.status_code}") + return "unknown" + + tags_to_return = [] + tags_data = response.json() + results = tags_data.get("results", []) + for result in results: + tag_name = result["name"] + if tag_name.count(".") >= 2 and "experimental" not in tag_name: + tags_to_return.append(tag_name) + + if len(tags_to_return) <= 0: + return "unkown" + + return _sorted_tags(tags_to_return)[0] + + +def _sorted_tags(tags: list[str]) -> list[str]: + sorted_tags_as_tuples = sorted( + [tuple([ int(i) for i in tag.replace("v", "").split(".") ]) for tag in tags], + reverse=True, + ) + return [".".join([str(i) for i in tag_tuple]) for tag_tuple in sorted_tags_as_tuples] diff --git a/testgen/settings.py b/testgen/settings.py index 627ae61..595e402 100644 --- a/testgen/settings.py +++ b/testgen/settings.py @@ -1,4 +1,5 @@ import os +import typing IS_DEBUG_LOG_LEVEL: bool = os.getenv("TESTGEN_DEBUG_LOG_LEVEL", "no").lower() == "yes" """ @@ -415,13 +416,48 @@ defaults to: `default` """ -CHECK_FOR_LATEST_VERSION: bool = os.getenv("TG_DOCKER_RELEASE_CHECK_ENABLED", "yes").lower() == "yes" +CHECK_FOR_LATEST_VERSION: typing.Literal["pypi", "docker", "no"] = typing.cast( + typing.Literal["pypi", "docker", "no"], + os.getenv("TG_RELEASE_CHECK", os.getenv("TG_DOCKER_RELEASE_CHECK_ENABLED", "pypi")).lower(), +) """ -When True, enables calling Docker Hub API to fetch the latest released +When set to, enables calling Docker Hub API to fetch the latest released image tag. The fetched tag is displayed in the UI menu. from env variable: `TG_DOCKER_RELEASE_CHECK_ENABLED` -defaults to: `True` +choices: `pypi`, `docker`, `no` +defaults to: `pypi` +""" + +DOCKER_HUB_REPOSITORY: str = os.getenv( + "TESTGEN_DOCKER_HUB_REPO", + "datakitchen/dataops-testgen", +) +""" +URL to the docker hub repository containing the dataops testgen image. +Used to check for new releases when `CHECK_FOR_LATEST_VERSION` is set to +`docker`. + +from env variable: `TESTGEN_DOCKER_HUB_URL` +defaults to: datakitchen/dataops-testgen +""" + +DOCKER_HUB_USERNAME: str | None = os.getenv("TESTGEN_DOCKER_HUB_USERNAME", None) +""" +Username to authenticate against Docker Hub API before fetching the list +of tags. Required if `DOCKER_HUB_REPOSITORY` is a private repository. + +from env variable: `TESTGEN_DOCKER_HUB_USERNAME` +defaults to: None +""" + +DOCKER_HUB_PASSWORD: str | None = os.getenv("TESTGEN_DOCKER_HUB_PASSWORD", None) +""" +Password to authenticate against Docker Hub API before fetching the list +of tags. Required if `DOCKER_HUB_REPOSITORY` is a private repository. + +from env variable: `TESTGEN_DOCKER_HUB_PASSWORD` +defaults to: None """ VERSION: str = os.getenv("TESTGEN_VERSION", "unknown") diff --git a/testgen/ui/bootstrap.py b/testgen/ui/bootstrap.py index 03a95f5..05b943f 100644 --- a/testgen/ui/bootstrap.py +++ b/testgen/ui/bootstrap.py @@ -3,11 +3,9 @@ import inspect import logging -import streamlit - from testgen import settings from testgen.commands.run_upgrade_db_config import get_schema_revision -from testgen.common import configure_logging, docker_service +from testgen.common import configure_logging, version_service from testgen.ui.navigation.menu import Menu, Version from testgen.ui.navigation.page import Page from testgen.ui.navigation.router import Router @@ -51,10 +49,14 @@ def __init__(self, router: Router, menu: Menu, logger: logging.Logger) -> None: self.logger = logger def get_version(self) -> Version: + latest_version = self.menu.version.latest + if not session.latest_version: + latest_version = version_service.get_latest_version() + return Version( current=settings.VERSION, - latest=check_for_upgrade(), - schema=_get_schema_rev(), + latest=latest_version, + schema=get_schema_revision(), ) @@ -86,22 +88,8 @@ def run(log_level: int = logging.INFO) -> Application: version=Version( current=settings.VERSION, latest="...", - schema=_get_schema_rev(), + schema=get_schema_revision(), ), ), logger=LOG, ) - - -@streamlit.cache_resource(show_spinner=False) -def _get_schema_rev() -> str: - revision = session.sb_schema_rev - if not revision: - revision = session.sb_schema_rev = get_schema_revision() - return revision - - -@streamlit.cache_resource(show_spinner=False) -def check_for_upgrade(): - return docker_service.check_for_new_docker_release() - diff --git a/testgen/ui/session.py b/testgen/ui/session.py index 2aaeba9..b10e251 100644 --- a/testgen/ui/session.py +++ b/testgen/ui/session.py @@ -27,9 +27,7 @@ class TestgenSession(Singleton): project: str add_project: bool - - sb_latest_rel: str - sb_schema_rev: str + latest_version: str | None def __init__(self, state: SessionStateProxy) -> None: super().__setattr__("_state", state) @@ -49,4 +47,4 @@ def __delattr__(self, key: str) -> None: del state[key] -session = TestgenSession(st.session_state) +session: TestgenSession = TestgenSession(st.session_state) diff --git a/tests/unit/test_version_service.py b/tests/unit/test_version_service.py new file mode 100644 index 0000000..b97890c --- /dev/null +++ b/tests/unit/test_version_service.py @@ -0,0 +1,150 @@ +from unittest import mock + +import pytest + +from testgen.common.version_service import get_latest_version + + +@pytest.mark.unit +@mock.patch("testgen.common.version_service.settings") +@mock.patch("testgen.common.version_service.requests") +def test_calls_pypi_api(requests: mock.Mock, settings: mock.Mock): + settings.CHECK_FOR_LATEST_VERSION = "pypi" + get_latest_version() + requests.get.assert_called_with("https://pypi.org/pypi/dataops-testgen/json", timeout=3) + + +@pytest.mark.unit +@mock.patch("testgen.common.version_service.settings") +@mock.patch("testgen.common.version_service.requests") +def test_return_unknown_when_pypi_request_fails(requests: mock.Mock, settings: mock.Mock): + response = mock.Mock() + response.status_code = 400 + requests.get.return_value = response + settings.CHECK_FOR_LATEST_VERSION = "pypi" + + assert get_latest_version() == "unknown" + + +@pytest.mark.unit +@mock.patch("testgen.common.version_service.settings") +@mock.patch("testgen.common.version_service.requests") +def test_get_the_latest_version_from_pypi(requests: mock.Mock, settings: mock.Mock): + response = mock.Mock() + response.status_code = 200 + requests.get.return_value = response + response.json.return_value = { + "releases": { + "0.0.1": "", + "0.1.0": "", + "1.0.0": "", + "1.1.0": "", + "v1.2.3": "", + "v1.2.0": "", + } + } + settings.CHECK_FOR_LATEST_VERSION = "pypi" + + assert get_latest_version() == "1.2.3" + + +@pytest.mark.unit +@mock.patch("testgen.common.version_service.settings") +@mock.patch("testgen.common.version_service.requests") +def test_calls_docker_tags_api(requests: mock.Mock, settings: mock.Mock): + settings.DOCKER_HUB_USERNAME = None + settings.DOCKER_HUB_PASSWORD = None + settings.DOCKER_HUB_REPOSITORY = "datakitchen/testgen-a" + settings.CHECK_FOR_LATEST_VERSION = "docker" + get_latest_version() + + requests.get.assert_called_with( + "https://hub.docker.com/v2/repositories/datakitchen/testgen-a/tags", + headers={}, + params={"page_size": 25, "page": 1, "ordering": "last_updated"}, + timeout=3, + ) + + +@pytest.mark.unit +@mock.patch("testgen.common.version_service.settings") +@mock.patch("testgen.common.version_service.requests") +def test_return_unknown_when_docker_request_fails(requests: mock.Mock, settings: mock.Mock): + response = mock.Mock() + response.status_code = 400 + requests.get.return_value = response + settings.DOCKER_HUB_USERNAME = None + settings.DOCKER_HUB_PASSWORD = None + settings.CHECK_FOR_LATEST_VERSION = "docker" + + assert get_latest_version() == "unknown" + + +@pytest.mark.unit +@mock.patch("testgen.common.version_service.settings") +@mock.patch("testgen.common.version_service.requests") +def test_get_the_latest_version_from_dockerhub(requests: mock.Mock, settings: mock.Mock): + settings.DOCKER_HUB_USERNAME = None + settings.DOCKER_HUB_PASSWORD = None + settings.CHECK_FOR_LATEST_VERSION = "docker" + + response = mock.Mock() + response.status_code = 200 + requests.get.return_value = response + response.json.return_value = { + "results": [ + {"name": "v0.0.1"}, + {"name": "v0.1.0"}, + {"name": "v1.0.0"}, + {"name": "v1.1.0"}, + {"name": "v1.2.0"}, + {"name": "v1.2.3-experimental"}, + ], + } + + assert get_latest_version() == "1.2.0" + +@pytest.mark.unit +@mock.patch("testgen.common.version_service.settings") +@mock.patch("testgen.common.version_service.requests") +def test_authenticates_docker_request(requests: mock.Mock, settings: mock.Mock): + username = settings.DOCKER_HUB_USERNAME = "docker-username" + password = settings.DOCKER_HUB_PASSWORD = "docker-password" # noqa: S105 + docker_auth_token = "docker-auth-token" # noqa: S105 + settings.CHECK_FOR_LATEST_VERSION = "docker" + settings.DOCKER_HUB_REPOSITORY = "datakitchen/testgen-b" + + response = mock.Mock() + response.status_code = 200 + response.json.return_value = {"token": docker_auth_token} + requests.post.return_value = response + + get_latest_version() + + requests.post.assert_called_with( + "https://hub.docker.com/v2/users/login", + json={"username": username, "password": password}, + timeout=5, + ) + requests.get.assert_called_with( + "https://hub.docker.com/v2/repositories/datakitchen/testgen-b/tags", + headers={"Authorization": f"Bearer {docker_auth_token}"}, + params={"page_size": 25, "page": 1, "ordering": "last_updated"}, + timeout=3, + ) + + +@pytest.mark.unit +@mock.patch("testgen.common.version_service.settings") +@mock.patch("testgen.common.version_service.requests") +def test_return_unknown_when_docker_auth_request_fails(requests: mock.Mock, settings: mock.Mock): + settings.DOCKER_HUB_USERNAME = "docker-username" + settings.DOCKER_HUB_PASSWORD = "docker-password" # noqa: S105 + settings.CHECK_FOR_LATEST_VERSION = "docker" + settings.DOCKER_HUB_REPOSITORY = "datakitchen/testgen-b" + + response = mock.Mock() + response.status_code = 400 + requests.post.return_value = response + + assert get_latest_version() == "unknown" From 89d1db158c84ff3967d16f23ab8df60ed5e228de Mon Sep 17 00:00:00 2001 From: Astor Date: Wed, 11 Sep 2024 10:37:07 -0300 Subject: [PATCH 38/78] astor/TG-770 --- testgen/ui/views/test_definitions.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/testgen/ui/views/test_definitions.py b/testgen/ui/views/test_definitions.py index f88148a..ee25183 100644 --- a/testgen/ui/views/test_definitions.py +++ b/testgen/ui/views/test_definitions.py @@ -510,7 +510,8 @@ def show_test_form( if i >= dynamic_attributes_half_length: current_column = mid_right_column - value = empty_if_null(selected_test_def[dynamic_attribute]) if mode == "edit" else "" + default_value = "" if dynamic_attribute != "threshold_value" else 0 + value = empty_if_null(selected_test_def[dynamic_attribute]) if mode == "edit" else default_value actual_dynamic_attributes_labels = ( dynamic_attributes_labels[i] From 322a0f3155ed1e507c31d9dd885dd95d7c9ff534 Mon Sep 17 00:00:00 2001 From: Ricardo Boni Date: Thu, 5 Sep 2024 11:47:11 -0400 Subject: [PATCH 39/78] feat(ui): Adding sort selector component --- .../js/components/sorting_selector.js | 216 ++++++++++++++++++ testgen/ui/components/frontend/js/main.js | 4 +- testgen/ui/components/widgets/__init__.py | 1 + .../ui/components/widgets/sorting_selector.py | 39 ++++ testgen/ui/views/profiling_summary.py | 2 +- 5 files changed, 260 insertions(+), 2 deletions(-) create mode 100644 testgen/ui/components/frontend/js/components/sorting_selector.js create mode 100644 testgen/ui/components/widgets/sorting_selector.py diff --git a/testgen/ui/components/frontend/js/components/sorting_selector.js b/testgen/ui/components/frontend/js/components/sorting_selector.js new file mode 100644 index 0000000..6564335 --- /dev/null +++ b/testgen/ui/components/frontend/js/components/sorting_selector.js @@ -0,0 +1,216 @@ +import {Streamlit} from "../streamlit.js"; +import van from '../van.min.js'; + +/** + * + * @typedef Properties + * @type {object} + * @property {Array} columns + * @property {Array} default + */ +const { a, hr, button, div, i, img, label, option, select, span } = van.tags; + +const SortingSelector = (/** @type {Properties} */ props) => { + + let defaultDirection = "ASC"; + + if (!window.testgen.loadedStylesheets.sortingSelector) { + document.adoptedStyleSheets.push(stylesheet); + window.testgen.loadedStylesheets.sortSelector = true; + } + + const columns = props.columns.val; + const prevComponentState = props.state.val || []; + + const columnLabel = columns.reduce((acc, [colLabel, colId]) => ({ ...acc, [colId]: colLabel}), {}); + + Streamlit.setFrameHeight(130 + 30 * columns.length); + + const componentState = columns.reduce( + (state, [colLabel, colId]) => ( + { ...state, [colId]: van.state(prevComponentState[colId] || { direction: "ASC", order: null })} + ), + {} + ); + + const selectedDiv = div( + { + class: 'tg-sort-selector--column-list', + style: `flex-grow: 1`, + }, + ); + + const directionIcons = { + ASC: `arrow_downward`, + DESC: `arrow_upward`, + } + + const activeColumnItem = (colId) => { + const state = componentState[colId]; + const directionIcon = van.derive(() => directionIcons[state.val.direction]); + return button( + { + onclick: () => { + state.val = { ...state.val, direction: state.val.direction === "DESC" ? "ASC" : "DESC" }; + }, + }, + i( + { class: `material-symbols-rounded` }, + directionIcon, + ), + span(columnLabel[colId]), + ) + } + + const selectColumn = (colId, direction) => { + componentState[colId].val = { direction: direction, order: selectedDiv.childElementCount } + van.add(selectedDiv, activeColumnItem(colId)); + } + + prevComponentState.forEach(([colId, direction]) => selectColumn(colId, direction)); + + const reset = () => { + columns.map(([colLabel, colId]) => (componentState[colId].val = { direction: defaultDirection, order: null })); + selectedDiv.innerHTML = ``; + } + + const apply = () => { + Streamlit.sendData( + Object.entries(componentState).filter( + ([colId, colState]) => colState.val.order !== null + ).sort( + ([colIdA, colStateA], [colIdB, colStateB]) => colStateA.val.order - colStateB.val.order + ).map( + ([colId, colState]) => [colId, colState.val.direction] + ) + ); + } + + const columnItem = (colId) => { + const state = componentState[colId]; + return button( + { + onclick: () => selectColumn(colId, defaultDirection), + hidden: state.val.order !== null, + }, + i( + { + class: `material-symbols-rounded`, + style: `color: var(--disabled-text-color);`, + }, + `expand_all` + ), + span(columnLabel[colId]), + ) + } + + const optionsDiv = div( + { + class: 'tg-sort-selector--column-list', + }, + columns.map(([colLabel, colId]) => van.derive(() => columnItem(colId))), + ) + + return div( + { class: 'tg-sort-selector' }, + div( + { + class: `tg-sort-selector--header`, + }, + span("Selected columns") + ), + selectedDiv, + div( + { class: `tg-sort-selector--header` }, + span("Available columns") + ), + optionsDiv, + div( + { class: `tg-sort-selector--footer` }, + button( + { onclick: reset }, + span(`Reset`), + ), + button( + { onclick: apply }, + span(`Apply`), + ) + ) + ); +}; + + +const stylesheet = new CSSStyleSheet(); +stylesheet.replace(` + +.tg-sort-selector { + height: 100vh; + display: flex; + flex-direction: column; + align-content: flex-end; + justify-content: space-between; +} + +.tg-sort-selector--column-list { + display: flex; + flex-direction: column; +} + +.tg-sort-selector--column-list button { + margin: 0; + border: 0; + padding: 5px 0; + text-align: left; + background: transparent; + color: var(--button-text-color); +} + +.tg-sort-selector--column-list button:hover { + background: #00000010; +} + +.tg-sort-selector--column-list button * { + vertical-align: middle; +} + +.tg-sort-selector--column-list button i { + font-size: 20px; +} + + +.tg-sort-selector--column-list { + border-bottom: 3px dotted var(--disabled-text-color); + padding-bottom: 16px; + margin-bottom: 8px; +} + +.tg-sort-selector--header { + text-align: right; + text-transform: uppercase; + font-size: 70%; +} + +.tg-sort-selector--footer { + display: flex; + flex-direction: row; + justify-content: space-between; + margin-top: 8px; +} + +.tg-sort-selector--footer button { + background-color: var(--button-stroked-background); + color: var(--button-stroked-text-color); + border: var(--button-stroked-border); + padding: 5px 20px; + border-radius: 5px; +} + +@media (prefers-color-scheme: dark) { + .tg-sort-selector--column-list button:hover { + background: #FFFFFF20; + } +} + +`); + +export { SortingSelector }; diff --git a/testgen/ui/components/frontend/js/main.js b/testgen/ui/components/frontend/js/main.js index 31f2db7..3c56cdd 100644 --- a/testgen/ui/components/frontend/js/main.js +++ b/testgen/ui/components/frontend/js/main.js @@ -3,7 +3,7 @@ * @type {object} * @property {string} id - id of the specific component to be rendered * @property {string} key - user key of the specific component to be rendered - * @property {object} props - object with the props to pass to the rendered component + * @property {object} props - object with the props to pass to the rendered component */ import van from './van.min.js'; import { Streamlit } from './streamlit.js'; @@ -13,6 +13,7 @@ import { ExpanderToggle } from './components/expander_toggle.js'; import { Link } from './components/link.js'; import { Select } from './components/select.js' import { SummaryBar } from './components/summary_bar.js'; +import { SortingSelector } from './components/sorting_selector.js'; let currentWindowVan = van; let topWindowVan = window.top.van; @@ -24,6 +25,7 @@ const TestGenComponent = (/** @type {string} */ id, /** @type {object} */ props) expander_toggle: ExpanderToggle, link: Link, select: Select, + sorting_selector: SortingSelector, sidebar: window.top.testgen.components.Sidebar, summary_bar: SummaryBar, }; diff --git a/testgen/ui/components/widgets/__init__.py b/testgen/ui/components/widgets/__init__.py index 7c25862..653d5e0 100644 --- a/testgen/ui/components/widgets/__init__.py +++ b/testgen/ui/components/widgets/__init__.py @@ -14,4 +14,5 @@ whitespace, ) from testgen.ui.components.widgets.sidebar import sidebar +from testgen.ui.components.widgets.sorting_selector import sorting_selector from testgen.ui.components.widgets.summary_bar import summary_bar diff --git a/testgen/ui/components/widgets/sorting_selector.py b/testgen/ui/components/widgets/sorting_selector.py new file mode 100644 index 0000000..1c024f8 --- /dev/null +++ b/testgen/ui/components/widgets/sorting_selector.py @@ -0,0 +1,39 @@ +from collections.abc import Iterable + +import streamlit as st +from streamlit.runtime.scriptrunner import get_script_run_ctx + +from testgen.ui.components.utils.component import component + + +def sorting_selector( + columns: Iterable[tuple[str, str]], + default: Iterable[tuple[str, str]] = (), + popover_label: str = "Sort", + key: str = "testgen:sorting_selector", +) -> list[tuple[str, str]]: + """ + Renders a pop over that, when clicked, shows a list of database columns to be selected for sorting. + + # Parameters + :param columns: Iterable of 2-tuples, being: (, ) + :param default: Iterable of 2-tuples, being: (, ) + :param key: unique key to give the component a persisting state + + # Return value + Returns a list of 2-tuples, being: (, ) + """ + + ctx = get_script_run_ctx() + try: + state = ctx.session_state[key] + except KeyError: + state = default + + with st.popover(popover_label): + return component( + id_="sorting_selector", + key=key, + default=default, + props={"columns": columns, "state": state}, + ) diff --git a/testgen/ui/views/profiling_summary.py b/testgen/ui/views/profiling_summary.py index 6fa7dea..e81fa15 100644 --- a/testgen/ui/views/profiling_summary.py +++ b/testgen/ui/views/profiling_summary.py @@ -25,7 +25,7 @@ class DataProfilingPage(Page): def render(self, project_code: str | None = None, table_group_id: str | None = None, **_kwargs) -> None: project_code = project_code or session.project - + testgen.page_header( "Profiling Runs", "https://docs.datakitchen.io/article/dataops-testgen-help/investigate-profiling", From 01d35477201dbb94ac39c0c22743d970812019bc Mon Sep 17 00:00:00 2001 From: Ricardo Boni Date: Tue, 10 Sep 2024 17:37:46 -0400 Subject: [PATCH 40/78] feat(ui): Tracking the sort selector state with the query parameters --- .../ui/components/widgets/sorting_selector.py | 62 ++++++++++++++++++- 1 file changed, 59 insertions(+), 3 deletions(-) diff --git a/testgen/ui/components/widgets/sorting_selector.py b/testgen/ui/components/widgets/sorting_selector.py index 1c024f8..539e1ce 100644 --- a/testgen/ui/components/widgets/sorting_selector.py +++ b/testgen/ui/components/widgets/sorting_selector.py @@ -1,15 +1,50 @@ -from collections.abc import Iterable +import itertools +import re +from collections.abc import Callable, Iterable +from typing import Any import streamlit as st from streamlit.runtime.scriptrunner import get_script_run_ctx from testgen.ui.components.utils.component import component +from testgen.ui.navigation.router import Router + + +def _slugfy(text) -> str: + return re.sub(r"[^a-z]+", "-", text.lower()) + + +def _state_to_str(columns, state): + state_parts = [] + state_dict = dict(state) + try: + for col_label, col_id in columns: + if col_id in state_dict: + state_parts.append(".".join((_slugfy(col_label), state_dict[col_id].lower()))) + return "-".join(state_parts) or "-" + except Exception: + return None + + +def _state_from_str(columns, state_str): + col_slug_to_id = {_slugfy(col_label): col_id for col_label, col_id in columns} + state_part_re = re.compile("".join(("(", "|".join(col_slug_to_id.keys()), r")\.(asc|desc)"))) + state = [] + try: + for state_part in state_str.split("-"): + if match := state_part_re.match(state_part): + state.append([col_slug_to_id[match.group(1)], match.group(2).upper()]) + except Exception as e: + return None + return state def sorting_selector( columns: Iterable[tuple[str, str]], default: Iterable[tuple[str, str]] = (), + on_change: Callable[[], Any] | None = None, popover_label: str = "Sort", + query_param: str | None = "sort", key: str = "testgen:sorting_selector", ) -> list[tuple[str, str]]: """ @@ -24,16 +59,37 @@ def sorting_selector( Returns a list of 2-tuples, being: (, ) """ + state = None + ctx = get_script_run_ctx() try: state = ctx.session_state[key] except KeyError: + pass + + if state is None and query_param and (state_str := st.query_params.get(query_param)): + state = _state_from_str(columns, state_str) + + if state is None: state = default with st.popover(popover_label): - return component( + new_state = component( id_="sorting_selector", key=key, - default=default, + default=state, + on_change=on_change, props={"columns": columns, "state": state}, ) + + # For some unknown reason, sometimes, streamlit returns None as the component status + new_state = [] if new_state is None else new_state + + if query_param: + if tuple(itertools.chain(*default)) == tuple(itertools.chain(*new_state)): + value = None + else: + value = _state_to_str(columns, new_state) + Router().set_query_params({query_param: value}) + + return new_state From 50a44b33c4ed56102891d015a9c82985f7a4ea56 Mon Sep 17 00:00:00 2001 From: Ricardo Boni Date: Wed, 11 Sep 2024 14:21:13 -0400 Subject: [PATCH 41/78] feat(ui): Improving sorting and filtering of hygiene issues --- testgen/ui/navigation/router.py | 15 +++-- testgen/ui/views/profiling_anomalies.py | 75 ++++++++++++++++++++----- 2 files changed, 69 insertions(+), 21 deletions(-) diff --git a/testgen/ui/navigation/router.py b/testgen/ui/navigation/router.py index 8480ec5..0c58484 100644 --- a/testgen/ui/navigation/router.py +++ b/testgen/ui/navigation/router.py @@ -17,9 +17,9 @@ class Router(Singleton): def __init__( self, /, - routes: list[type[testgen.ui.navigation.page.Page]], + routes: list[type[testgen.ui.navigation.page.Page]] | None = None, ) -> None: - self._routes = {route.path: route(self) for route in routes} + self._routes = {route.path: route(self) for route in routes} if routes else {} def run(self, hide_sidebar=False) -> None: streamlit_pages = [route.streamlit_page for route in self._routes.values()] @@ -29,7 +29,7 @@ def run(self, hide_sidebar=False) -> None: # Otherwise anything custom in the sidebar randomly flickers on page navigation current_page = st.navigation(streamlit_pages, position="hidden" if hide_sidebar else "sidebar") session.current_page_args = st.query_params - + # This hack is needed because the auth cookie is not retrieved on the first run # We have to store the page and wait for the second run @@ -39,7 +39,7 @@ def run(self, hide_sidebar=False) -> None: else: current_page = session.page_pending_cookies or current_page session.page_pending_cookies = None - + if session.page_args_pending_router is not None: session.current_page_args = session.page_args_pending_router st.query_params.from_dict(session.page_args_pending_router) @@ -47,8 +47,8 @@ def run(self, hide_sidebar=False) -> None: session.current_page = current_page.url_path current_page.run() - - + + def navigate(self, /, to: str, with_args: dict = {}) -> None: # noqa: B006 try: if to != session.current_page: @@ -66,8 +66,7 @@ def navigate(self, /, to: str, with_args: dict = {}) -> None: # noqa: B006 st.error(error_message) LOG.exception(error_message) - - def set_query_params(self, with_args: dict = {}) -> None: # noqa: B006 + def set_query_params(self, with_args: dict) -> None: params = st.query_params params.update(with_args) params = {k: v for k, v in params.items() if v not in [None, "None", ""]} diff --git a/testgen/ui/views/profiling_anomalies.py b/testgen/ui/views/profiling_anomalies.py index fe8e5c4..9a7176a 100644 --- a/testgen/ui/views/profiling_anomalies.py +++ b/testgen/ui/views/profiling_anomalies.py @@ -22,7 +22,7 @@ class ProfilingAnomaliesPage(Page): lambda: "run_id" in session.current_page_args or "profiling-runs", ] - def render(self, run_id: str, issue_class: str | None = None, **_kwargs) -> None: + def render(self, run_id: str, issue_class: str | None = None, issue_type: str | None = None, **_kwargs) -> None: run_date, _table_group_id, table_group_name, project_code = profiling_queries.lookup_db_parentage_from_run( run_id ) @@ -39,7 +39,9 @@ def render(self, run_id: str, issue_class: str | None = None, **_kwargs) -> None ) others_summary_column, pii_summary_column, _ = st.columns([.3, .3, .4]) - liklihood_filter_column, actions_column, export_button_column = st.columns([.3, .5, .2], vertical_alignment="bottom") + (liklihood_filter_column, issue_type_filter_column, actions_column, export_button_column) = ( + st.columns([.16, .34, .32, .18], vertical_alignment="bottom") + ) testgen.flex_row_end(actions_column) testgen.flex_row_end(export_button_column) @@ -54,12 +56,35 @@ def render(self, run_id: str, issue_class: str | None = None, **_kwargs) -> None label="Issue Class", ) + with issue_type_filter_column: + # Issue filter (optional) + issue_type_options = get_issue_types() + issue_type = testgen.toolbar_select( + options=["All Issue Types", *issue_type_options["anomaly_name"]], + default_value=issue_type, + required=True, + bind_to_query="issue_type", + label="Issue Type", + ) + issue_type_id = dict(zip(issue_type_options["anomaly_name"], issue_type_options["id"], strict=False)).get(issue_type) + with actions_column: str_help = "Toggle on to perform actions on multiple Hygiene Issues" do_multi_select = st.toggle("Multi-Select", help=str_help) + with export_button_column: + sortable_columns = ( + ("Table", "r.table_name"), + ("Column", "r.column_name"), + ("Anomaly", "t.anomaly_name"), + ("Likelihood", "likelihood_order"), + ("Action", "r.disposition"), + ) + default = (("r.table_name", "ASC"), ("r.column_name", "ASC")) + sorting_columns = testgen.sorting_selector(sortable_columns, default) + # Get hygiene issue list - df_pa = get_profiling_anomalies(run_id, issue_class) + df_pa = get_profiling_anomalies(run_id, issue_class, issue_type_id, sorting_columns) # Retrieve disposition action (cache refreshed) df_action = get_anomaly_disposition(run_id) @@ -90,7 +115,7 @@ def render(self, run_id: str, issue_class: str | None = None, **_kwargs) -> None width=400, ) # write_frequency_graph(df_pa) - + lst_show_columns = [ "table_name", "column_name", @@ -162,6 +187,11 @@ def render(self, run_id: str, issue_class: str | None = None, **_kwargs) -> None ): source_data_dialog(selected_row) + cached_functions = [get_anomaly_disposition, get_profiling_anomaly_summary] + # Clear the list cache if the list is sorted by disposition/action + if "r.disposition" in dict(sorting_columns): + cached_functions.append(get_profiling_anomalies) + # Need to render toolbar buttons after grid, so selection status is maintained if actions_column.button( "✓", help="Confirm this issue as relevant for this run", disabled=not selected @@ -170,7 +200,7 @@ def render(self, run_id: str, issue_class: str | None = None, **_kwargs) -> None do_disposition_update(selected, "Confirmed"), as_toast=True, clear_cache=True, - lst_cached_functions=[get_anomaly_disposition, get_profiling_anomaly_summary], + lst_cached_functions=cached_functions, ) if actions_column.button( "✘", help="Dismiss this issue as not relevant for this run", disabled=not selected @@ -179,7 +209,7 @@ def render(self, run_id: str, issue_class: str | None = None, **_kwargs) -> None do_disposition_update(selected, "Dismissed"), as_toast=True, clear_cache=True, - lst_cached_functions=[get_anomaly_disposition, get_profiling_anomaly_summary], + lst_cached_functions=cached_functions, ) if actions_column.button( "🔇", help="Mute this test to deactivate it for future runs", disabled=not selected @@ -188,14 +218,14 @@ def render(self, run_id: str, issue_class: str | None = None, **_kwargs) -> None do_disposition_update(selected, "Inactive"), as_toast=True, clear_cache=True, - lst_cached_functions=[get_anomaly_disposition, get_profiling_anomaly_summary], + lst_cached_functions=cached_functions, ) if actions_column.button("↩︎", help="Clear action", disabled=not selected): fm.reset_post_updates( do_disposition_update(selected, "No Decision"), as_toast=True, clear_cache=True, - lst_cached_functions=[get_anomaly_disposition, get_profiling_anomaly_summary], + lst_cached_functions=cached_functions, ) else: st.markdown(":green[**No Hygiene Issues Found**]") @@ -213,12 +243,18 @@ def get_db_table_group_choices(str_project_code): @st.cache_data(show_spinner="Retrieving Data") -def get_profiling_anomalies(str_profile_run_id, str_likelihood): +def get_profiling_anomalies(str_profile_run_id, str_likelihood, issue_type_id, sorting_columns): str_schema = st.session_state["dbschema"] if str_likelihood == "All Likelihoods": str_criteria = " AND t.issue_likelihood <> 'Potential PII'" else: str_criteria = f" AND t.issue_likelihood = '{str_likelihood}'" + if sorting_columns: + str_order_by = "ORDER BY " + (", ".join(" ".join(col) for col in sorting_columns)) + else: + str_order_by = "" + if issue_type_id: + str_criteria += f" AND t.id = '{issue_type_id}'" # Define the query -- first visible column must be first, because will hold the multi-select box str_sql = f""" SELECT r.table_name, r.column_name, r.schema_name, @@ -228,9 +264,15 @@ def get_profiling_anomalies(str_profile_run_id, str_likelihood): WHEN t.issue_likelihood = 'Possible' THEN 'Possible: speculative test that often identifies problems' WHEN t.issue_likelihood = 'Likely' THEN 'Likely: typically indicates a data problem' WHEN t.issue_likelihood = 'Definite' THEN 'Definite: indicates a highly-likely data problem' - WHEN t.issue_likelihood = 'Potential PII' + WHEN t.issue_likelihood = 'Potential PII' THEN 'Potential PII: may require privacy policies, standards and procedures for access, storage and transmission.' - END as likelihood_explanation, + END AS likelihood_explanation, + CASE + WHEN t.issue_likelihood = 'Potential PII' THEN 1 + WHEN t.issue_likelihood = 'Possible' THEN 2 + WHEN t.issue_likelihood = 'Likely' THEN 3 + WHEN t.issue_likelihood = 'Definite' THEN 4 + END AS likelihood_order, t.anomaly_description, r.detail, t.suggested_action, r.anomaly_id, r.table_groups_id::VARCHAR, r.id::VARCHAR, p.profiling_starttime FROM {str_schema}.profile_anomaly_results r @@ -240,7 +282,7 @@ def get_profiling_anomalies(str_profile_run_id, str_likelihood): ON r.profile_run_id = p.id WHERE r.profile_run_id = '{str_profile_run_id}' {str_criteria} - ORDER BY r.schema_name, r.table_name, r.column_name; + {str_order_by} """ # Retrieve data as df df = db.retrieve_data(str_sql) @@ -267,6 +309,13 @@ def get_anomaly_disposition(str_profile_run_id): return df[["id", "action"]] +@st.cache_data(show_spinner="Retrieving Status") +def get_issue_types(): + schema = st.session_state["dbschema"] + df = db.retrieve_data(f"SELECT id, anomaly_name FROM {schema}.profile_anomaly_types") + return df + + @st.cache_data(show_spinner=False) def get_profiling_anomaly_summary(str_profile_run_id): str_schema = st.session_state["dbschema"] @@ -314,7 +363,7 @@ def get_bad_data(selected_row): str_sql = f""" SELECT t.lookup_query, tg.table_group_schema, c.project_qc_schema, c.sql_flavor, c.project_host, c.project_port, c.project_db, c.project_user, c.project_pw_encrypted, - c.url, c.connect_by_url, c.connect_by_key, c.private_key, c.private_key_passphrase + c.url, c.connect_by_url, c.connect_by_key, c.private_key, c.private_key_passphrase FROM {str_schema}.target_data_lookups t INNER JOIN {str_schema}.table_groups tg ON ('{selected_row["table_groups_id"]}'::UUID = tg.id) From ba02ba380ecbb153f94bfe9eae4c665798005b37 Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Thu, 12 Sep 2024 16:05:03 -0400 Subject: [PATCH 42/78] docs: update readme for pip installation --- README.md | 38 +++++++++++++++++++++----------------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index 467b954..0266eb4 100644 --- a/README.md +++ b/README.md @@ -86,7 +86,7 @@ Create and activate a virtual environment with a TestGen-compatible version of P _On Linux/Mac_ ```shell -python3.10 -m venv venv +python3 -m venv venv source venv/bin/activate ``` @@ -108,28 +108,32 @@ testgen --help ### Set up the application database in PostgresSQL -Set appropriate values for the following environment variables (use `export variable=value` for Linux/Mac and `set variable=value` for Windows). Refer to the [TestGen Configuration](configuration.md) document for more details, defaults, and other supported configuration. - +Create a `local.env` file with the following environment variables, replacing the `` placeholders with appropriate values. Refer to the [TestGen Configuration](docs/configuration.md) document for more details, defaults, and other supported configuration. ```shell # Connection parameters for the PostgreSQL server -TG_METADATA_DB_HOST -TG_METADATA_DB_PORT +export TG_METADATA_DB_HOST= +export TG_METADATA_DB_PORT= + +# Connection credentials for the PostgreSQL server +# This role must have privileges to create roles, users, database and schema so that the application database can be initialized +export TG_METADATA_DB_USER= +export TG_METADATA_DB_PASSWORD= -# PostgreSQL admin role with privileges to create roles, users, database and schema -# This role will be used by the next step to initialize the application database -DATABASE_ADMIN_USER -DATABASE_ADMIN_PASSWORD +# Set a password and arbitrary string (the "salt") to be used for encrypting secrets in the application database +export TG_DECRYPT_PASSWORD= +export TG_DECRYPT_SALT= -# Credentials to be used for encrypting secrets in application database -TG_DECRYPT_SALT -TG_DECRYPT_PASSWORD +# Set credentials for the default admin user to be created for TestGen +export TESTGEN_USERNAME= +export TESTGEN_PASSWORD= -# Default admin user to be created for TestGen -TESTGEN_USERNAME -TESTGEN_PASSWORD +# Set an accessible path for storing application logs +export TESTGEN_LOG_FILE_PATH= +``` -# Accessible path for storing application logs -TESTGEN_LOG_FILE_PATH +Source the file to apply the environment variables. For the Windows equivalent, refer to [this guide](https://bennett4.medium.com/windows-alternative-to-source-env-for-setting-environment-variables-606be2a6d3e1). +```shell +source local.env ``` Make sure the PostgreSQL database server is up and running. Initialize the application database for TestGen. From c81e996f71507841d75968a714526c707c8695e4 Mon Sep 17 00:00:00 2001 From: Ricardo Boni Date: Wed, 11 Sep 2024 15:28:05 -0400 Subject: [PATCH 43/78] misc: Self-review fixes --- .../js/components/sorting_selector.js | 19 ++++++--- .../ui/components/widgets/sorting_selector.py | 6 ++- testgen/ui/views/profiling_anomalies.py | 40 +++++++++---------- 3 files changed, 39 insertions(+), 26 deletions(-) diff --git a/testgen/ui/components/frontend/js/components/sorting_selector.js b/testgen/ui/components/frontend/js/components/sorting_selector.js index 6564335..64c1d04 100644 --- a/testgen/ui/components/frontend/js/components/sorting_selector.js +++ b/testgen/ui/components/frontend/js/components/sorting_selector.js @@ -2,13 +2,18 @@ import {Streamlit} from "../streamlit.js"; import van from '../van.min.js'; /** + * @typedef ColDef + * @type {Array.} * - * @typedef Properties + * @typedef StateItem + * @type {Array.} + * + * @typedef Properties * @type {object} - * @property {Array} columns - * @property {Array} default + * @property {Array.} columns + * @property {Array.} state */ -const { a, hr, button, div, i, img, label, option, select, span } = van.tags; +const { button, div, i, span } = van.tags; const SortingSelector = (/** @type {Properties} */ props) => { @@ -70,7 +75,11 @@ const SortingSelector = (/** @type {Properties} */ props) => { prevComponentState.forEach(([colId, direction]) => selectColumn(colId, direction)); const reset = () => { - columns.map(([colLabel, colId]) => (componentState[colId].val = { direction: defaultDirection, order: null })); + columns.map( + ([colLabel, colId]) => ( + componentState[colId].val = { direction: defaultDirection, order: null } + ) + ); selectedDiv.innerHTML = ``; } diff --git a/testgen/ui/components/widgets/sorting_selector.py b/testgen/ui/components/widgets/sorting_selector.py index 539e1ce..17ffa11 100644 --- a/testgen/ui/components/widgets/sorting_selector.py +++ b/testgen/ui/components/widgets/sorting_selector.py @@ -53,6 +53,10 @@ def sorting_selector( # Parameters :param columns: Iterable of 2-tuples, being: (, ) :param default: Iterable of 2-tuples, being: (, ) + :param on_change: Callable that will be called when the component state is updated + :param popover_label: Label to be applied to the pop-over button. Default: 'Sort' + :param query_param: Name of the query parameter that will store the component state. Can be disabled by setting + to None. Default: 'sort'. :param key: unique key to give the component a persisting state # Return value @@ -82,7 +86,7 @@ def sorting_selector( props={"columns": columns, "state": state}, ) - # For some unknown reason, sometimes, streamlit returns None as the component status + # For some unknown reason, sometimes, streamlit returns None as the component state new_state = [] if new_state is None else new_state if query_param: diff --git a/testgen/ui/views/profiling_anomalies.py b/testgen/ui/views/profiling_anomalies.py index 9a7176a..4372902 100644 --- a/testgen/ui/views/profiling_anomalies.py +++ b/testgen/ui/views/profiling_anomalies.py @@ -39,40 +39,35 @@ def render(self, run_id: str, issue_class: str | None = None, issue_type: str | ) others_summary_column, pii_summary_column, _ = st.columns([.3, .3, .4]) - (liklihood_filter_column, issue_type_filter_column, actions_column, export_button_column) = ( - st.columns([.16, .34, .32, .18], vertical_alignment="bottom") + (liklihood_filter_column, issue_type_filter_column, sort_column, actions_column, export_button_column) = ( + st.columns([.16, .34, .08, .32, .1], vertical_alignment="bottom") ) testgen.flex_row_end(actions_column) testgen.flex_row_end(export_button_column) with liklihood_filter_column: - # Likelihood selection - optional filter - status_options = ["All Likelihoods", "Definite", "Likely", "Possible", "Potential PII"] issue_class = testgen.toolbar_select( - options=status_options, + options=["Definite", "Likely", "Possible", "Potential PII"], default_value=issue_class, - required=True, + required=False, bind_to_query="issue_class", label="Issue Class", ) with issue_type_filter_column: - # Issue filter (optional) issue_type_options = get_issue_types() - issue_type = testgen.toolbar_select( - options=["All Issue Types", *issue_type_options["anomaly_name"]], - default_value=issue_type, - required=True, + issue_type_id = testgen.toolbar_select( + options=issue_type_options, + default_value=None if issue_class == "Potential PII" else issue_type, + value_column="id", + display_column="anomaly_name", + required=False, bind_to_query="issue_type", label="Issue Type", + disabled=issue_class == "Potential PII", ) - issue_type_id = dict(zip(issue_type_options["anomaly_name"], issue_type_options["id"], strict=False)).get(issue_type) - with actions_column: - str_help = "Toggle on to perform actions on multiple Hygiene Issues" - do_multi_select = st.toggle("Multi-Select", help=str_help) - - with export_button_column: + with sort_column: sortable_columns = ( ("Table", "r.table_name"), ("Column", "r.column_name"), @@ -80,9 +75,14 @@ def render(self, run_id: str, issue_class: str | None = None, issue_type: str | ("Likelihood", "likelihood_order"), ("Action", "r.disposition"), ) - default = (("r.table_name", "ASC"), ("r.column_name", "ASC")) + default = [(sortable_columns[i][1], "ASC") for i in (0, 1)] sorting_columns = testgen.sorting_selector(sortable_columns, default) + with actions_column: + str_help = "Toggle on to perform actions on multiple Hygiene Issues" + do_multi_select = st.toggle("Multi-Select", help=str_help) + + # Get hygiene issue list df_pa = get_profiling_anomalies(run_id, issue_class, issue_type_id, sorting_columns) @@ -245,7 +245,7 @@ def get_db_table_group_choices(str_project_code): @st.cache_data(show_spinner="Retrieving Data") def get_profiling_anomalies(str_profile_run_id, str_likelihood, issue_type_id, sorting_columns): str_schema = st.session_state["dbschema"] - if str_likelihood == "All Likelihoods": + if str_likelihood is None: str_criteria = " AND t.issue_likelihood <> 'Potential PII'" else: str_criteria = f" AND t.issue_likelihood = '{str_likelihood}'" @@ -309,7 +309,7 @@ def get_anomaly_disposition(str_profile_run_id): return df[["id", "action"]] -@st.cache_data(show_spinner="Retrieving Status") +@st.cache_data(show_spinner=False) def get_issue_types(): schema = st.session_state["dbschema"] df = db.retrieve_data(f"SELECT id, anomaly_name FROM {schema}.profile_anomaly_types") From 19c694ce4f85758f224e47e4cf76eb0c6e1ed97b Mon Sep 17 00:00:00 2001 From: Ricardo Boni Date: Thu, 12 Sep 2024 12:23:25 -0400 Subject: [PATCH 44/78] feat(ui): improved sorting and filtering of test results and profilling results --- testgen/ui/queries/profiling_queries.py | 11 +++- testgen/ui/views/profiling_results.py | 28 ++++++--- testgen/ui/views/test_results.py | 79 +++++++++++++++++++------ 3 files changed, 91 insertions(+), 27 deletions(-) diff --git a/testgen/ui/queries/profiling_queries.py b/testgen/ui/queries/profiling_queries.py index 8f6c089..f831bbc 100644 --- a/testgen/ui/queries/profiling_queries.py +++ b/testgen/ui/queries/profiling_queries.py @@ -79,8 +79,13 @@ def lookup_db_parentage_from_run(str_profile_run_id): @st.cache_data(show_spinner="Retrieving Data") -def get_profiling_detail(str_profile_run_id, str_table_name, str_column_name): +def get_profiling_detail(str_profile_run_id, str_table_name, str_column_name, sorting_columns = None): str_schema = st.session_state["dbschema"] + sorting_columns_str = ( + "p.schema_name, p.table_name, position" + if sorting_columns is None + else ", ".join(" ".join(col) for col in sorting_columns) + ) str_sql = f""" SELECT -- Identifiers id::VARCHAR, dk_id, @@ -98,7 +103,7 @@ def get_profiling_detail(str_profile_run_id, str_table_name, str_column_name): WHEN 'B' THEN 'Boolean' ELSE 'N/A' END as general_type, - functional_table_type as semantic_table_type, + functional_table_type as semantic_table_type, functional_data_type as semantic_data_type, datatype_suggestion, CASE WHEN s.column_name IS NOT NULL THEN 'Yes' END as anomalies, @@ -142,7 +147,7 @@ def get_profiling_detail(str_profile_run_id, str_table_name, str_column_name): WHERE p.profile_run_id = '{str_profile_run_id}'::UUID AND p.table_name ILIKE '{str_table_name}' AND p.column_name ILIKE '{str_column_name}' - ORDER BY p.schema_name, p.table_name, position; + ORDER BY {sorting_columns_str}; """ return db.retrieve_data(str_sql) diff --git a/testgen/ui/views/profiling_results.py b/testgen/ui/views/profiling_results.py index d6445e0..896631b 100644 --- a/testgen/ui/views/profiling_results.py +++ b/testgen/ui/views/profiling_results.py @@ -36,25 +36,27 @@ def render(self, run_id: str, table_name: str | None = None, column_name: str | { "label": f"{table_group_name} | {run_date}" }, ], ) - - table_filter_column, column_filter_column, export_button_column = st.columns([.3, .3, .4], vertical_alignment="bottom") + + table_filter_column, column_filter_column, sort_column, export_button_column = st.columns( + [.3, .3, .08, .32], vertical_alignment="bottom" + ) with table_filter_column: # Table Name filter df = profiling_queries.run_table_lookup_query(table_group_id) table_name = testgen.toolbar_select( - options=df, + options=df, value_column="table_name", default_value=table_name, bind_to_query="table_name", label="Table Name", ) - + with column_filter_column: # Column Name filter df = profiling_queries.run_column_lookup_query(table_group_id, table_name) column_name = testgen.toolbar_select( - options=df, + options=df, value_column="column_name", default_value=column_name, bind_to_query="column_name", @@ -62,14 +64,26 @@ def render(self, run_id: str, table_name: str | None = None, column_name: str | disabled=not table_name, ) + with sort_column: + sortable_columns = ( + ("Schema Name", "p.schema_name"), + ("Table Name", "p.table_name"), + ("Column Name", "p.column_name"), + ("Column Type", "p.column_type"), + ("Semantic Data Type", "semantic_data_type"), + ("Anomalies", "anomalies"), + ) + default_sorting = [(sortable_columns[i][1], "ASC") for i in (0, 1, 2)] + sorting_columns = testgen.sorting_selector(sortable_columns, default_sorting) + # Use SQL wildcard to match all values - if not table_name: + if not table_name: table_name = "%%" if not column_name: column_name = "%%" # Display main results grid - df = profiling_queries.get_profiling_detail(run_id, table_name, column_name) + df = profiling_queries.get_profiling_detail(run_id, table_name, column_name, sorting_columns) show_columns = [ "schema_name", "table_name", diff --git a/testgen/ui/views/test_results.py b/testgen/ui/views/test_results.py index 833f82f..5fa415b 100644 --- a/testgen/ui/views/test_results.py +++ b/testgen/ui/views/test_results.py @@ -28,7 +28,7 @@ class TestResultsPage(Page): lambda: "run_id" in session.current_page_args or "test-runs", ] - def render(self, run_id: str, status: str | None = None, **_kwargs) -> None: + def render(self, run_id: str, status: str | None = None, test_type: str | None = None, **_kwargs) -> None: run_date, test_suite_name, project_code = get_drill_test_run(run_id) run_date = date_service.get_timezoned_timestamp(st.session_state, run_date) project_service.set_current_project(project_code) @@ -47,7 +47,9 @@ def render(self, run_id: str, status: str | None = None, **_kwargs) -> None: testgen.summary_bar(items=tests_summary, key="test_results", height=40, width=800) # Setup Toolbar - status_filter_column, actions_column, export_button_column = st.columns([.3, .5, .2], vertical_alignment="bottom") + status_filter_column, test_type_filter_column, sort_column, actions_column, export_button_column = st.columns( + [.2, .2, .08, .4, .12], vertical_alignment="bottom" + ) testgen.flex_row_end(actions_column) testgen.flex_row_end(export_button_column) @@ -60,12 +62,36 @@ def render(self, run_id: str, status: str | None = None, **_kwargs) -> None: ] status = testgen.toolbar_select( options=status_options, - default_value=status, - required=True, + default_value=status or "Failures and Warnings", + required=False, bind_to_query="status", label="Result Status", ) + with test_type_filter_column: + test_type = testgen.toolbar_select( + options=get_test_types(), + value_column="test_type", + display_column="test_name_short", + default_value=test_type, + required=False, + bind_to_query="test_type", + label="Test Type", + ) + + with sort_column: + sortable_columns = ( + ("Table Name", "r.table_name"), + ("Columns/Focus", "r.column_names"), + ("Test Type", "r.test_type"), + ("UOM", "tt.measure_uom"), + ("Result Measure", "result_measure"), + ("Status", "result_status"), + ("Action", "r.disposition"), + ) + default = [(sortable_columns[i][1], "ASC") for i in (0, 1, 2)] + sorting_columns = testgen.sorting_selector(sortable_columns, default) + with actions_column: str_help = "Toggle on to perform actions on multiple results" do_multi_select = st.toggle("Multi-Select", help=str_help) @@ -81,10 +107,17 @@ def render(self, run_id: str, status: str | None = None, **_kwargs) -> None: status = "'Passed'" # Display main grid and retrieve selection - selected = show_result_detail(run_id, status, do_multi_select, export_button_column) + selected = show_result_detail( + run_id, status, test_type, sorting_columns, do_multi_select, export_button_column + ) # Need to render toolbar buttons after grid, so selection status is maintained disable_dispo = True if not selected or status == "'Passed'" else False + + affected_cached_functions = [get_test_disposition] + if "r.disposition" in dict(sorting_columns): + affected_cached_functions.append(get_test_results) + if actions_column.button( "✓", help="Confirm this issue as relevant for this run", disabled=disable_dispo ): @@ -92,7 +125,7 @@ def render(self, run_id: str, status: str | None = None, **_kwargs) -> None: do_disposition_update(selected, "Confirmed"), as_toast=True, clear_cache=True, - lst_cached_functions=[get_test_disposition], + lst_cached_functions=affected_cached_functions, ) if actions_column.button( "✘", help="Dismiss this issue as not relevant for this run", disabled=disable_dispo @@ -101,7 +134,7 @@ def render(self, run_id: str, status: str | None = None, **_kwargs) -> None: do_disposition_update(selected, "Dismissed"), as_toast=True, clear_cache=True, - lst_cached_functions=[get_test_disposition], + lst_cached_functions=affected_cached_functions, ) if actions_column.button( "🔇", help="Mute this test to deactivate it for future runs", disabled=not selected @@ -110,14 +143,14 @@ def render(self, run_id: str, status: str | None = None, **_kwargs) -> None: do_disposition_update(selected, "Inactive"), as_toast=True, clear_cache=True, - lst_cached_functions=[get_test_disposition], + lst_cached_functions=affected_cached_functions, ) if actions_column.button("⟲", help="Clear action", disabled=not selected): fm.reset_post_updates( do_disposition_update(selected, "No Decision"), as_toast=True, clear_cache=True, - lst_cached_functions=[get_test_disposition], + lst_cached_functions=affected_cached_functions, ) # Help Links @@ -142,20 +175,32 @@ def get_drill_test_run(str_test_run_id): return df.at[0, "test_date"], df.at[0, "test_suite"], df.at[0, "project_code"] +@st.cache_data(show_spinner=False) +def get_test_types(): + schema = st.session_state["dbschema"] + df = db.retrieve_data(f"SELECT test_type, test_name_short FROM {schema}.test_types") + return df + + @st.cache_data(show_spinner="Retrieving Results") -def get_test_results(str_run_id, str_sel_test_status): +def get_test_results(str_run_id, str_sel_test_status, test_type_id, sorting_columns): schema = st.session_state["dbschema"] - return get_test_results_uncached(schema, str_run_id, str_sel_test_status) + return get_test_results_uncached(schema, str_run_id, str_sel_test_status, test_type_id, sorting_columns) -def get_test_results_uncached(str_schema, str_run_id, str_sel_test_status): +def get_test_results_uncached(str_schema, str_run_id, str_sel_test_status, test_type_id, sorting_columns): # First visible row first, so multi-select checkbox will render + str_order_by = "ORDER BY " + (", ".join(" ".join(col) for col in sorting_columns)) if sorting_columns else "" + test_type_clause = f"AND r.test_type = '{test_type_id}'" if test_type_id else "" + status_clause = f" AND r.result_status IN ({str_sel_test_status})" if str_sel_test_status else "" str_sql = f""" WITH run_results AS (SELECT * FROM {str_schema}.test_results r - WHERE r.test_run_id = '{str_run_id}' - AND r.result_status IN ({str_sel_test_status}) + WHERE + r.test_run_id = '{str_run_id}' + {status_clause} + {test_type_clause} ) SELECT r.table_name, p.project_name, ts.test_suite, tg.table_groups_name, cn.connection_name, cn.project_host, cn.sql_flavor, @@ -214,7 +259,7 @@ def get_test_results_uncached(str_schema, str_run_id, str_sel_test_status): LEFT JOIN {str_schema}.cat_test_conditions c ON (cn.sql_flavor = c.sql_flavor AND r.test_type = c.test_type) - ORDER BY schema_name, table_name, column_names, test_type; + {str_order_by} ; """ df = db.retrieve_data(str_sql) @@ -551,9 +596,9 @@ def show_test_def_detail(str_test_def_id): ) -def show_result_detail(str_run_id, str_sel_test_status, do_multi_select, export_container): +def show_result_detail(str_run_id, str_sel_test_status, test_type_id, sorting_columns, do_multi_select, export_container): # Retrieve test results (always cached, action as null) - df = get_test_results(str_run_id, str_sel_test_status) + df = get_test_results(str_run_id, str_sel_test_status, test_type_id, sorting_columns) # Retrieve disposition action (cache refreshed) df_action = get_test_disposition(str_run_id) # Update action from disposition df From 3b231e9c05aa28b5eb91b40e2326815917ad4b34 Mon Sep 17 00:00:00 2001 From: Ricardo Boni Date: Mon, 16 Sep 2024 19:03:57 -0400 Subject: [PATCH 45/78] misc(ui): Addressing code review feedback --- .../js/components/sorting_selector.js | 42 +++++++++++++------ .../ui/components/widgets/sorting_selector.py | 4 +- 2 files changed, 30 insertions(+), 16 deletions(-) diff --git a/testgen/ui/components/frontend/js/components/sorting_selector.js b/testgen/ui/components/frontend/js/components/sorting_selector.js index 64c1d04..926a173 100644 --- a/testgen/ui/components/frontend/js/components/sorting_selector.js +++ b/testgen/ui/components/frontend/js/components/sorting_selector.js @@ -29,7 +29,7 @@ const SortingSelector = (/** @type {Properties} */ props) => { const columnLabel = columns.reduce((acc, [colLabel, colId]) => ({ ...acc, [colId]: colLabel}), {}); - Streamlit.setFrameHeight(130 + 30 * columns.length); + Streamlit.setFrameHeight(100 + 30 * columns.length); const componentState = columns.reduce( (state, [colLabel, colId]) => ( @@ -83,16 +83,16 @@ const SortingSelector = (/** @type {Properties} */ props) => { selectedDiv.innerHTML = ``; } + const externalComponentState = () => Object.entries(componentState).filter( + ([colId, colState]) => colState.val.order !== null + ).sort( + ([colIdA, colStateA], [colIdB, colStateB]) => colStateA.val.order - colStateB.val.order + ).map( + ([colId, colState]) => [colId, colState.val.direction] + ) + const apply = () => { - Streamlit.sendData( - Object.entries(componentState).filter( - ([colId, colState]) => colState.val.order !== null - ).sort( - ([colIdA, colStateA], [colIdB, colStateB]) => colStateA.val.order - colStateB.val.order - ).map( - ([colId, colState]) => [colId, colState.val.direction] - ) - ); + Streamlit.sendData(externalComponentState()); } const columnItem = (colId) => { @@ -120,6 +120,12 @@ const SortingSelector = (/** @type {Properties} */ props) => { columns.map(([colLabel, colId]) => van.derive(() => columnItem(colId))), ) + const resetDisabled = () => Object.entries(componentState).filter( + ([colId, colState]) => colState.val.order != null + ).length === 0; + + const applyDisabled = () => externalComponentState().toString() === (props.state.val || []).toString(); + return div( { class: 'tg-sort-selector' }, div( @@ -137,11 +143,15 @@ const SortingSelector = (/** @type {Properties} */ props) => { div( { class: `tg-sort-selector--footer` }, button( - { onclick: reset }, + { + onclick: reset, + style: `color: var(--button-text-color);`, + disabled: van.derive(resetDisabled), + }, span(`Reset`), ), button( - { onclick: apply }, + { onclick: apply, disabled: van.derive(applyDisabled) }, span(`Apply`), ) ) @@ -189,7 +199,7 @@ stylesheet.replace(` .tg-sort-selector--column-list { border-bottom: 3px dotted var(--disabled-text-color); - padding-bottom: 16px; + padding-bottom: 8px; margin-bottom: 8px; } @@ -197,6 +207,7 @@ stylesheet.replace(` text-align: right; text-transform: uppercase; font-size: 70%; + color: var(--secondary-text-color); } .tg-sort-selector--footer { @@ -214,6 +225,11 @@ stylesheet.replace(` border-radius: 5px; } +.tg-sort-selector--footer button[disabled] { + color: var(--disabled-text-color) !important; +} + + @media (prefers-color-scheme: dark) { .tg-sort-selector--column-list button:hover { background: #FFFFFF20; diff --git a/testgen/ui/components/widgets/sorting_selector.py b/testgen/ui/components/widgets/sorting_selector.py index 17ffa11..b81f932 100644 --- a/testgen/ui/components/widgets/sorting_selector.py +++ b/testgen/ui/components/widgets/sorting_selector.py @@ -4,7 +4,6 @@ from typing import Any import streamlit as st -from streamlit.runtime.scriptrunner import get_script_run_ctx from testgen.ui.components.utils.component import component from testgen.ui.navigation.router import Router @@ -65,9 +64,8 @@ def sorting_selector( state = None - ctx = get_script_run_ctx() try: - state = ctx.session_state[key] + state = st.session_state[key] except KeyError: pass From 1d8e310f69aa545cd65672faccc4f02fef9cb025 Mon Sep 17 00:00:00 2001 From: Ricardo Boni Date: Mon, 16 Sep 2024 20:43:51 -0400 Subject: [PATCH 46/78] misc: Addressing code review feedback --- testgen/ui/components/widgets/sorting_selector.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/testgen/ui/components/widgets/sorting_selector.py b/testgen/ui/components/widgets/sorting_selector.py index b81f932..8b168f1 100644 --- a/testgen/ui/components/widgets/sorting_selector.py +++ b/testgen/ui/components/widgets/sorting_selector.py @@ -28,13 +28,11 @@ def _state_to_str(columns, state): def _state_from_str(columns, state_str): col_slug_to_id = {_slugfy(col_label): col_id for col_label, col_id in columns} state_part_re = re.compile("".join(("(", "|".join(col_slug_to_id.keys()), r")\.(asc|desc)"))) - state = [] - try: - for state_part in state_str.split("-"): - if match := state_part_re.match(state_part): - state.append([col_slug_to_id[match.group(1)], match.group(2).upper()]) - except Exception as e: - return None + state = [ + [col_slug_to_id[col_slug], direction.upper()] + for col_slug, direction + in state_part_re.findall(state_str) + ] return state From f27b085473feef3092a9f0127b9e09f5b9cfeea1 Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Fri, 13 Sep 2024 12:07:21 -0400 Subject: [PATCH 47/78] feat(ui): add project and table groups summary to overview page --- testgen/ui/assets/style.css | 10 +- testgen/ui/views/overview.py | 298 +++++++++++++++++++++++++++++++++-- 2 files changed, 296 insertions(+), 12 deletions(-) diff --git a/testgen/ui/assets/style.css b/testgen/ui/assets/style.css index c3a39c5..3a7b50a 100644 --- a/testgen/ui/assets/style.css +++ b/testgen/ui/assets/style.css @@ -1,6 +1,7 @@ body { --primary-color: #06a04a; --link-color: #1976d2; + --error-color: #EF5350; --primary-text-color: #000000de; --secondary-text-color: #0000008a; @@ -126,7 +127,6 @@ button[title="Show password text"] { .element-container:has(iframe[height="0"][title="testgen.ui.components.utils.component.testgen"]) { display: none !important; } -/* ... */ /* Cards Component */ [data-testid="stVerticalBlockBorderWrapper"]:has(> div > div[data-testid="stVerticalBlock"] > div.element-container > div.stHtml > i.testgen_card) { @@ -172,6 +172,14 @@ button[title="Show password text"] { gap: unset; } +/* Stylistic equivalent of st.caption("text") for customization +Use as st.html('

text

') */ +.caption { + color: var(--caption-text-color); + font-size: 14px; + margin-bottom: 0; +} + /* Tooltips */ [data-tooltip] { position: relative; diff --git a/testgen/ui/views/overview.py b/testgen/ui/views/overview.py index 901fa7f..b500826 100644 --- a/testgen/ui/views/overview.py +++ b/testgen/ui/views/overview.py @@ -1,14 +1,18 @@ -import logging import typing +from datetime import datetime +import pandas as pd import streamlit as st +import testgen.ui.services.database_service as db +from testgen.common import date_service from testgen.ui.components import widgets as testgen from testgen.ui.navigation.menu import MenuItem from testgen.ui.navigation.page import Page +from testgen.ui.services import test_suite_service from testgen.ui.session import session -LOG = logging.getLogger("testgen") +STALE_PROFILE_DAYS = 60 class OverviewPage(Page): @@ -18,17 +22,289 @@ class OverviewPage(Page): ] menu_item = MenuItem(icon="home", label="Overview", order=0) - def render(self, **_kwargs): + def render(self, project_code: str | None = None, **_kwargs): + project_code = project_code or session.project + table_groups_df: pd.DataFrame = get_table_groups_summary(project_code) + testgen.page_header( - "Welcome to DataOps TestGen", + "Project Overview", "https://docs.datakitchen.io/article/dataops-testgen-help/introduction-to-dataops-testgen", - ) + ) + + render_project_summary(table_groups_df) + + st.html(f'
Table Groups ({len(table_groups_df.index)})
') + for index, table_group in table_groups_df.iterrows(): + render_table_group_card(table_group, project_code, index) + + +def render_project_summary(table_groups: pd.DataFrame) -> None: + project_column, _ = st.columns([.5, .5]) + with project_column: + with testgen.card(): + summary_column, _ = st.columns([.8, .2]) + # score_column, summary_column = st.columns([.5, .5]) + + # with score_column: + # st.caption("Project HIT score") + # st.metric( + # "Project HIT score", + # value=project_score, + # delta=project_score_delta or 0, + # label_visibility="collapsed", + # ) + + with summary_column: + st.caption("Project Summary") + st.html(f"""{len(table_groups.index)} table groups +
{table_groups['latest_tests_suite_ct'].sum()} test suites +
{table_groups['latest_tests_ct'].sum()} test definitions + """) + + +@st.fragment +def render_table_group_card(table_group: pd.Series, project_code: str, key: int) -> None: + with testgen.card(title=table_group["table_groups_name"]) as test_suite_card: + + # Don't remove this + # For some reason, st.columns do not get completely removed from DOM when used conditionally within a fragment + # Without this CSS, the "hidden" elements in the expanded state take up space + testgen.no_flex_gap() + + with test_suite_card.actions: + expand_toggle = testgen.expander_toggle(key=f"toggle_{key}") + + profile_column, tests_column = st.columns([.5, .5]) + # score_column, profile_column, tests_column = st.columns([.2, .35, .45]) + + # with score_column: + # st.caption("HIT score") + # st.metric( + # "HIT score", + # value=table_group["score"], + # delta=table_group["score_delta"] or 0, + # label_visibility="collapsed", + # ) + + with profile_column: + testgen.no_flex_gap() + is_stale = (datetime.utcnow() - table_group["latest_profile_start"]).days > STALE_PROFILE_DAYS + st.html(f"""

Latest profile {'(stale)' if is_stale else ""}

""") - st.session_state["app_title"] = "TestGen Dashboard" + if (latest_profile_id := table_group["latest_profile_id"]) and not pd.isnull(latest_profile_id): + testgen.link( + label=date_service.get_timezoned_timestamp(st.session_state, table_group["latest_profile_start"]), + href="profiling-runs:results", + params={ "run_id": str(latest_profile_id) }, + key=f"overview:keys:go-to-profile:{latest_profile_id}", + ) + + st.html(f""" + {table_group["latest_anomalies_ct"]} anomalies in {table_group["latest_profile_table_ct"]} tables + """) - st.markdown( - "###### The easiest way possible to institute comprehensive, agile data quality testing.\n\n" - " - Start measuring immediately. \n" - " - Derive actionable information quickly. \n" - " - Then iterate, using tests and results to refine as you go." + testgen.summary_bar( + items=[ + { "label": "Definite", "value": int(table_group["latest_anomalies_definite_ct"]), "color": "red" }, + { "label": "Likely", "value": int(table_group["latest_anomalies_likely_ct"]), "color": "orange" }, + { "label": "Possible", "value": int(table_group["latest_anomalies_possible_ct"]), "color": "yellow" }, + { "label": "Dismissed", "value": int(table_group["latest_anomalies_dismissed_ct"]), "color": "grey" }, + ], + key=f"anomalies_{key}", + height=12, + width=200, + ) + else: + st.markdown("--") + + with tests_column: + testgen.no_flex_gap() + st.caption("Latest test results") + total_tests = int(table_group["latest_tests_ct"]) + if total_tests: + passed_tests = int(table_group["latest_tests_passed_ct"]) + tests_summary = [ + { "label": "Passed", "value": passed_tests, "color": "green" }, + { "label": "Warnings", "value": int(table_group["latest_tests_warning_ct"]), "color": "yellow" }, + { "label": "Failed", "value": int(table_group["latest_tests_failed_ct"]), "color": "red" }, + { "label": "Errors", "value": int(table_group["latest_tests_error_ct"]), "color": "grey" }, + ] + + st.html(f""" +

{round(passed_tests * 100 / total_tests)}% passed

+ {total_tests} tests in {table_group["latest_tests_suite_ct"]} test suites + """) + + testgen.summary_bar( + items=tests_summary, + key=f"tests_{key}", + height=12, + width=260, + ) + else: + st.markdown("--") + + if expand_toggle: + render_table_group_expanded(table_group["id"], project_code, key) + + +def render_table_group_expanded(table_group_id: str, project_code: str, key: int) -> None: + st.html('
') + + column_spec = [0.25, 0.15, 0.6] + suite_column, run_column, results_column = st.columns(column_spec) + suite_column.caption("Test Suite") + # generation_column.caption("Latest Generation") + run_column.caption("Latest Run") + results_column.caption("Latest Results") + testgen.whitespace(1) + + test_suites_df: pd.DataFrame = test_suite_service.get_by_project(project_code, table_group_id) + + for index, suite in test_suites_df.iterrows(): + render_test_suite_item(suite, column_spec, f"{key}_{index}") + + +def render_test_suite_item(test_suite: pd.Series, column_spec: list[int], key: int) -> None: + suite_column, run_column, results_column = st.columns(column_spec) + with suite_column: + testgen.no_flex_gap() + testgen.link( + label=test_suite["test_suite"], + href="test-suites:definitions", + params={ "test_suite_id": str(test_suite["id"]) }, + key=f"overview:keys:go-to-definitions:{test_suite['id']}", ) + st.html(f'

{test_suite["last_run_test_ct"]} tests

') + + # if (latest_generation := test_suite["latest_generation"]) and not pd.isnull(latest_generation): + # generation_column.markdown(date_service.get_timezoned_timestamp(st.session_state, latest_generation)) + # else: + # generation_column.markdown("--") + + latest_run_id = test_suite["latest_run_id"] + if latest_run_id and not pd.isnull(latest_run_id): + with run_column: + testgen.link( + label=date_service.get_timezoned_timestamp(st.session_state, test_suite["latest_run_start"]), + href="test-runs:results", + params={ "run_id": str(latest_run_id) }, + key=f"overview:keys:go-to-run:{latest_run_id}", + ) + + with results_column: + testgen.summary_bar( + items=[ + { "label": "Passed", "value": int(test_suite["last_run_passed_ct"]), "color": "green" }, + { "label": "Warnings", "value": int(test_suite["last_run_warning_ct"]), "color": "yellow" }, + { "label": "Failed", "value": int(test_suite["last_run_failed_ct"]), "color": "red" }, + { "label": "Errors", "value": int(test_suite["last_run_error_ct"]), "color": "grey" }, + ], + key=f"tests_{key}", + height=8, + width=120, + ) + else: + run_column.markdown("--") + results_column.html("--

") + + +def get_table_groups_summary(project_code: str) -> pd.DataFrame: + schema = st.session_state["dbschema"] + sql = f""" + WITH latest_profile_dates AS ( + SELECT table_groups_id, + MAX(profiling_starttime) as profiling_starttime + FROM {schema}.profiling_runs + GROUP BY table_groups_id + ), + latest_profile AS ( + SELECT latest_run.table_groups_id, + latest_run.id, + latest_run.profiling_starttime, + latest_run.table_ct, + latest_run.anomaly_ct, + SUM( + CASE + WHEN COALESCE(latest_anomalies.disposition, 'Confirmed') = 'Confirmed' + AND anomaly_types.issue_likelihood = 'Definite' THEN 1 + ELSE 0 + END + ) as definite_ct, + SUM( + CASE + WHEN COALESCE(latest_anomalies.disposition, 'Confirmed') = 'Confirmed' + AND anomaly_types.issue_likelihood = 'Likely' THEN 1 + ELSE 0 + END + ) as likely_ct, + SUM( + CASE + WHEN COALESCE(latest_anomalies.disposition, 'Confirmed') = 'Confirmed' + AND anomaly_types.issue_likelihood = 'Possible' THEN 1 + ELSE 0 + END + ) as possible_ct, + SUM( + CASE + WHEN COALESCE(latest_anomalies.disposition, 'Confirmed') IN ('Dismissed', 'Inactive') + AND anomaly_types.issue_likelihood <> 'Potential PII' THEN 1 + ELSE 0 + END + ) as dismissed_ct + FROM latest_profile_dates lpd + LEFT JOIN {schema}.profiling_runs latest_run ON ( + lpd.table_groups_id = latest_run.table_groups_id + AND lpd.profiling_starttime = latest_run.profiling_starttime + ) + LEFT JOIN {schema}.profile_anomaly_results latest_anomalies ON ( + latest_run.id = latest_anomalies.profile_run_id + ) + LEFT JOIN {schema}.profile_anomaly_types anomaly_types ON ( + anomaly_types.id = latest_anomalies.anomaly_id + ) + GROUP BY latest_run.id + ), + latest_run_dates AS ( + SELECT test_suite_id, + MAX(test_starttime) as test_starttime + FROM {schema}.test_runs + GROUP BY test_suite_id + ), + latest_tests AS ( + SELECT suites.table_groups_id, + COUNT(*) as test_suite_ct, + SUM(passed_ct) as passed_ct, + SUM(warning_ct) as warning_ct, + SUM(failed_ct) as failed_ct, + SUM(error_ct) as error_ct + FROM latest_run_dates lrd + LEFT JOIN {schema}.test_runs latest_run ON ( + lrd.test_suite_id = latest_run.test_suite_id + AND lrd.test_starttime = latest_run.test_starttime + ) + LEFT JOIN {schema}.test_suites as suites ON (suites.id = lrd.test_suite_id) + GROUP BY suites.table_groups_id + ) + SELECT groups.id::VARCHAR(50), + groups.table_groups_name, + latest_profile.id as latest_profile_id, + latest_profile.profiling_starttime as latest_profile_start, + latest_profile.table_ct as latest_profile_table_ct, + latest_profile.anomaly_ct as latest_anomalies_ct, + latest_profile.definite_ct as latest_anomalies_definite_ct, + latest_profile.likely_ct as latest_anomalies_likely_ct, + latest_profile.possible_ct as latest_anomalies_possible_ct, + latest_profile.dismissed_ct as latest_anomalies_dismissed_ct, + latest_tests.test_suite_ct as latest_tests_suite_ct, + latest_tests.passed_ct + latest_tests.warning_ct + latest_tests.failed_ct + latest_tests.error_ct as latest_tests_ct, + latest_tests.passed_ct as latest_tests_passed_ct, + latest_tests.warning_ct as latest_tests_warning_ct, + latest_tests.failed_ct as latest_tests_failed_ct, + latest_tests.error_ct as latest_tests_error_ct + FROM {schema}.table_groups as groups + LEFT JOIN latest_profile ON (groups.id = latest_profile.table_groups_id) + LEFT JOIN latest_tests ON (groups.id = latest_tests.table_groups_id) + WHERE groups.project_code = '{project_code}'; + """ + return db.retrieve_data(sql) From e4a6c79bf72142e7b61f6165b7acf657bcf2b443 Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Mon, 16 Sep 2024 21:50:58 -0400 Subject: [PATCH 48/78] fix(ui): handle disposition in test results summary add last generation time, fix empty state bug --- testgen/ui/assets/style.css | 8 ++ testgen/ui/queries/test_suite_queries.py | 80 ++++++++++++++--- testgen/ui/views/overview.py | 106 +++++++++++++++-------- testgen/ui/views/test_results.py | 56 +++++++++--- testgen/ui/views/test_suites.py | 7 +- 5 files changed, 198 insertions(+), 59 deletions(-) diff --git a/testgen/ui/assets/style.css b/testgen/ui/assets/style.css index 3a7b50a..ed5b605 100644 --- a/testgen/ui/assets/style.css +++ b/testgen/ui/assets/style.css @@ -180,6 +180,14 @@ Use as st.html('

text

') */ margin-bottom: 0; } +/* Stylistic equivalent of testgen.link() to match font style of links +Use as st.html('

text

') */ +.line { + font-size: 14px; + font-family: 'Roboto', 'Helvetica Neue', sans-serif; + line-height: 16.5px; +} + /* Tooltips */ [data-tooltip] { position: relative; diff --git a/testgen/ui/queries/test_suite_queries.py b/testgen/ui/queries/test_suite_queries.py index 48cd8b2..61982ca 100644 --- a/testgen/ui/queries/test_suite_queries.py +++ b/testgen/ui/queries/test_suite_queries.py @@ -7,10 +7,67 @@ @st.cache_data(show_spinner=False) def get_by_project(schema, project_code, table_group_id=None): sql = f""" - WITH last_run_date - AS (SELECT test_suite_id, MAX(test_starttime) as test_starttime - FROM testgen.test_runs - GROUP BY test_suite_id) + WITH last_gen_date AS ( + SELECT test_suite_id, + MAX(last_auto_gen_date) as auto_gen_date + FROM {schema}.test_definitions + GROUP BY test_suite_id + ), + last_run_date AS ( + SELECT test_suite_id, + MAX(test_starttime) as test_starttime + FROM {schema}.test_runs + GROUP BY test_suite_id + ), + last_run AS ( + SELECT test_runs.test_suite_id, + test_runs.id, + test_runs.test_starttime, + COUNT(*) as test_ct, + SUM( + CASE + WHEN COALESCE(test_results.disposition, 'Confirmed') = 'Confirmed' + AND test_results.result_status = 'Passed' THEN 1 + ELSE 0 + END + ) as passed_ct, + SUM( + CASE + WHEN COALESCE(test_results.disposition, 'Confirmed') = 'Confirmed' + AND test_results.result_status = 'Warning' THEN 1 + ELSE 0 + END + ) as warning_ct, + SUM( + CASE + WHEN COALESCE(test_results.disposition, 'Confirmed') = 'Confirmed' + AND test_results.result_status = 'Failed' THEN 1 + ELSE 0 + END + ) as failed_ct, + SUM( + CASE + WHEN COALESCE(test_results.disposition, 'Confirmed') = 'Confirmed' + AND test_results.result_status = 'Error' THEN 1 + ELSE 0 + END + ) as error_ct, + SUM( + CASE + WHEN COALESCE(test_results.disposition, 'Confirmed') IN ('Dismissed', 'Inactive') THEN 1 + ELSE 0 + END + ) as dismissed_ct + FROM last_run_date lrd + LEFT JOIN {schema}.test_runs ON ( + lrd.test_suite_id = test_runs.test_suite_id + AND lrd.test_starttime = test_runs.test_starttime + ) + LEFT JOIN {schema}.test_results ON ( + test_runs.id = test_results.test_run_id + ) + GROUP BY test_runs.id + ) SELECT suites.id::VARCHAR(50), suites.project_code, @@ -27,19 +84,20 @@ def get_by_project(schema, project_code, table_group_id=None): suites.component_key, suites.component_type, suites.component_name, + last_gen_date.auto_gen_date as latest_auto_gen_date, last_run.id as latest_run_id, last_run.test_starttime as latest_run_start, - last_run.passed_ct + last_run.warning_ct + last_run.failed_ct + last_run.error_ct as last_run_test_ct, + last_run.test_ct as last_run_test_ct, last_run.passed_ct as last_run_passed_ct, last_run.warning_ct as last_run_warning_ct, last_run.failed_ct as last_run_failed_ct, - last_run.error_ct as last_run_error_ct + last_run.error_ct as last_run_error_ct, + last_run.dismissed_ct as last_run_dismissed_ct FROM {schema}.test_suites as suites - LEFT JOIN last_run_date lrd - ON (suites.id = lrd.test_suite_id) - LEFT JOIN {schema}.test_runs last_run - ON (lrd.test_suite_id = last_run.test_suite_id - AND lrd.test_starttime = last_run.test_starttime) + LEFT JOIN last_gen_date + ON (suites.id = last_gen_date.test_suite_id) + LEFT JOIN last_run + ON (suites.id = last_run.test_suite_id) LEFT JOIN {schema}.connections AS connections ON (connections.connection_id = suites.connection_id) LEFT JOIN {schema}.table_groups as groups diff --git a/testgen/ui/views/overview.py b/testgen/ui/views/overview.py index b500826..4c35890 100644 --- a/testgen/ui/views/overview.py +++ b/testgen/ui/views/overview.py @@ -12,7 +12,7 @@ from testgen.ui.services import test_suite_service from testgen.ui.session import session -STALE_PROFILE_DAYS = 60 +STALE_PROFILE_DAYS = 30 class OverviewPage(Page): @@ -43,6 +43,7 @@ def render_project_summary(table_groups: pd.DataFrame) -> None: with project_column: with testgen.card(): summary_column, _ = st.columns([.8, .2]) + # TODO: Uncomment and replace with below section when adding the score # score_column, summary_column = st.columns([.5, .5]) # with score_column: @@ -57,8 +58,8 @@ def render_project_summary(table_groups: pd.DataFrame) -> None: with summary_column: st.caption("Project Summary") st.html(f"""{len(table_groups.index)} table groups -
{table_groups['latest_tests_suite_ct'].sum()} test suites -
{table_groups['latest_tests_ct'].sum()} test definitions +
{int(table_groups['latest_tests_suite_ct'].sum())} test suites +
{int(table_groups['latest_tests_ct'].sum())} test definitions """) @@ -75,6 +76,7 @@ def render_table_group_card(table_group: pd.Series, project_code: str, key: int) expand_toggle = testgen.expander_toggle(key=f"toggle_{key}") profile_column, tests_column = st.columns([.5, .5]) + # TODO: Uncomment and replace with below section when adding the score # score_column, profile_column, tests_column = st.columns([.2, .35, .45]) # with score_column: @@ -88,8 +90,8 @@ def render_table_group_card(table_group: pd.Series, project_code: str, key: int) with profile_column: testgen.no_flex_gap() - is_stale = (datetime.utcnow() - table_group["latest_profile_start"]).days > STALE_PROFILE_DAYS - st.html(f"""

Latest profile {'(stale)' if is_stale else ""}

""") + profile_days_ago = (datetime.utcnow() - table_group["latest_profile_start"]).days + st.html(f"""

Latest profile {f'({profile_days_ago} days ago)' if profile_days_ago > STALE_PROFILE_DAYS else ""}

""") if (latest_profile_id := table_group["latest_profile_id"]) and not pd.isnull(latest_profile_id): testgen.link( @@ -100,7 +102,7 @@ def render_table_group_card(table_group: pd.Series, project_code: str, key: int) ) st.html(f""" - {table_group["latest_anomalies_ct"]} anomalies in {table_group["latest_profile_table_ct"]} tables + {int(table_group["latest_anomalies_ct"])} anomalies in {int(table_group["latest_profile_table_ct"])} tables """) testgen.summary_bar( @@ -112,7 +114,7 @@ def render_table_group_card(table_group: pd.Series, project_code: str, key: int) ], key=f"anomalies_{key}", height=12, - width=200, + width=280, ) else: st.markdown("--") @@ -120,26 +122,26 @@ def render_table_group_card(table_group: pd.Series, project_code: str, key: int) with tests_column: testgen.no_flex_gap() st.caption("Latest test results") - total_tests = int(table_group["latest_tests_ct"]) + total_tests = int(table_group["latest_tests_ct"]) if not pd.isnull(table_group["latest_tests_ct"]) else 0 if total_tests: passed_tests = int(table_group["latest_tests_passed_ct"]) - tests_summary = [ - { "label": "Passed", "value": passed_tests, "color": "green" }, - { "label": "Warnings", "value": int(table_group["latest_tests_warning_ct"]), "color": "yellow" }, - { "label": "Failed", "value": int(table_group["latest_tests_failed_ct"]), "color": "red" }, - { "label": "Errors", "value": int(table_group["latest_tests_error_ct"]), "color": "grey" }, - ] st.html(f"""

{round(passed_tests * 100 / total_tests)}% passed

- {total_tests} tests in {table_group["latest_tests_suite_ct"]} test suites + {total_tests} tests in {int(table_group["latest_tests_suite_ct"])} test suites """) testgen.summary_bar( - items=tests_summary, + items=[ + { "label": "Passed", "value": passed_tests, "color": "green" }, + { "label": "Warnings", "value": int(table_group["latest_tests_warning_ct"]), "color": "yellow" }, + { "label": "Failed", "value": int(table_group["latest_tests_failed_ct"]), "color": "red" }, + { "label": "Errors", "value": int(table_group["latest_tests_error_ct"]), "color": "brown" }, + { "label": "Dismissed", "value": int(table_group["latest_tests_dismissed_ct"]), "color": "grey" }, + ], key=f"tests_{key}", height=12, - width=260, + width=350, ) else: st.markdown("--") @@ -151,10 +153,10 @@ def render_table_group_card(table_group: pd.Series, project_code: str, key: int) def render_table_group_expanded(table_group_id: str, project_code: str, key: int) -> None: st.html('
') - column_spec = [0.25, 0.15, 0.6] - suite_column, run_column, results_column = st.columns(column_spec) + column_spec = [0.25, 0.15, 0.15, 0.5] + suite_column, generation_column, run_column, results_column = st.columns(column_spec) suite_column.caption("Test Suite") - # generation_column.caption("Latest Generation") + generation_column.caption("Latest Generation") run_column.caption("Latest Run") results_column.caption("Latest Results") testgen.whitespace(1) @@ -166,7 +168,7 @@ def render_table_group_expanded(table_group_id: str, project_code: str, key: int def render_test_suite_item(test_suite: pd.Series, column_spec: list[int], key: int) -> None: - suite_column, run_column, results_column = st.columns(column_spec) + suite_column, generation_column, run_column, results_column = st.columns(column_spec) with suite_column: testgen.no_flex_gap() testgen.link( @@ -177,10 +179,10 @@ def render_test_suite_item(test_suite: pd.Series, column_spec: list[int], key: i ) st.html(f'

{test_suite["last_run_test_ct"]} tests

') - # if (latest_generation := test_suite["latest_generation"]) and not pd.isnull(latest_generation): - # generation_column.markdown(date_service.get_timezoned_timestamp(st.session_state, latest_generation)) - # else: - # generation_column.markdown("--") + if (latest_generation := test_suite["latest_auto_gen_date"]) and not pd.isnull(latest_generation): + generation_column.html(f'

{date_service.get_timezoned_timestamp(st.session_state, latest_generation)}

') + else: + generation_column.markdown("--") latest_run_id = test_suite["latest_run_id"] if latest_run_id and not pd.isnull(latest_run_id): @@ -198,11 +200,12 @@ def render_test_suite_item(test_suite: pd.Series, column_spec: list[int], key: i { "label": "Passed", "value": int(test_suite["last_run_passed_ct"]), "color": "green" }, { "label": "Warnings", "value": int(test_suite["last_run_warning_ct"]), "color": "yellow" }, { "label": "Failed", "value": int(test_suite["last_run_failed_ct"]), "color": "red" }, - { "label": "Errors", "value": int(test_suite["last_run_error_ct"]), "color": "grey" }, + { "label": "Errors", "value": int(test_suite["last_run_error_ct"]), "color": "brown" }, + { "label": "Dismissed", "value": int(test_suite["last_run_dismissed_ct"]), "color": "grey" }, ], key=f"tests_{key}", height=8, - width=120, + width=200, ) else: run_column.markdown("--") @@ -273,16 +276,50 @@ def get_table_groups_summary(project_code: str) -> pd.DataFrame: ), latest_tests AS ( SELECT suites.table_groups_id, - COUNT(*) as test_suite_ct, - SUM(passed_ct) as passed_ct, - SUM(warning_ct) as warning_ct, - SUM(failed_ct) as failed_ct, - SUM(error_ct) as error_ct + COUNT(DISTINCT latest_run.test_suite_id) as test_suite_ct, + COUNT(*) as test_ct, + SUM( + CASE + WHEN COALESCE(latest_results.disposition, 'Confirmed') = 'Confirmed' + AND latest_results.result_status = 'Passed' THEN 1 + ELSE 0 + END + ) as passed_ct, + SUM( + CASE + WHEN COALESCE(latest_results.disposition, 'Confirmed') = 'Confirmed' + AND latest_results.result_status = 'Warning' THEN 1 + ELSE 0 + END + ) as warning_ct, + SUM( + CASE + WHEN COALESCE(latest_results.disposition, 'Confirmed') = 'Confirmed' + AND latest_results.result_status = 'Failed' THEN 1 + ELSE 0 + END + ) as failed_ct, + SUM( + CASE + WHEN COALESCE(latest_results.disposition, 'Confirmed') = 'Confirmed' + AND latest_results.result_status = 'Error' THEN 1 + ELSE 0 + END + ) as error_ct, + SUM( + CASE + WHEN COALESCE(latest_results.disposition, 'Confirmed') IN ('Dismissed', 'Inactive') THEN 1 + ELSE 0 + END + ) as dismissed_ct FROM latest_run_dates lrd LEFT JOIN {schema}.test_runs latest_run ON ( lrd.test_suite_id = latest_run.test_suite_id AND lrd.test_starttime = latest_run.test_starttime ) + LEFT JOIN {schema}.test_results latest_results ON ( + latest_run.id = latest_results.test_run_id + ) LEFT JOIN {schema}.test_suites as suites ON (suites.id = lrd.test_suite_id) GROUP BY suites.table_groups_id ) @@ -297,11 +334,12 @@ def get_table_groups_summary(project_code: str) -> pd.DataFrame: latest_profile.possible_ct as latest_anomalies_possible_ct, latest_profile.dismissed_ct as latest_anomalies_dismissed_ct, latest_tests.test_suite_ct as latest_tests_suite_ct, - latest_tests.passed_ct + latest_tests.warning_ct + latest_tests.failed_ct + latest_tests.error_ct as latest_tests_ct, + latest_tests.test_ct as latest_tests_ct, latest_tests.passed_ct as latest_tests_passed_ct, latest_tests.warning_ct as latest_tests_warning_ct, latest_tests.failed_ct as latest_tests_failed_ct, - latest_tests.error_ct as latest_tests_error_ct + latest_tests.error_ct as latest_tests_error_ct, + latest_tests.dismissed_ct as latest_tests_dismissed_ct FROM {schema}.table_groups as groups LEFT JOIN latest_profile ON (groups.id = latest_profile.table_groups_id) LEFT JOIN latest_tests ON (groups.id = latest_tests.table_groups_id) diff --git a/testgen/ui/views/test_results.py b/testgen/ui/views/test_results.py index 5fa415b..9c932f3 100644 --- a/testgen/ui/views/test_results.py +++ b/testgen/ui/views/test_results.py @@ -287,23 +287,57 @@ def get_test_disposition(str_run_id): @st.cache_data(show_spinner=ALWAYS_SPIN) -def get_test_result_summary(str_run_id): - str_schema = st.session_state["dbschema"] - str_sql = f""" - SELECT passed_ct, - warning_ct, - failed_ct, - COALESCE(error_ct, 0) as error_ct - FROM {str_schema}.test_runs - WHERE id = '{str_run_id}'::UUID; +def get_test_result_summary(run_id): + schema = st.session_state["dbschema"] + sql = f""" + SELECT SUM( + CASE + WHEN COALESCE(test_results.disposition, 'Confirmed') = 'Confirmed' + AND test_results.result_status = 'Passed' THEN 1 + ELSE 0 + END + ) as passed_ct, + SUM( + CASE + WHEN COALESCE(test_results.disposition, 'Confirmed') = 'Confirmed' + AND test_results.result_status = 'Warning' THEN 1 + ELSE 0 + END + ) as warning_ct, + SUM( + CASE + WHEN COALESCE(test_results.disposition, 'Confirmed') = 'Confirmed' + AND test_results.result_status = 'Failed' THEN 1 + ELSE 0 + END + ) as failed_ct, + SUM( + CASE + WHEN COALESCE(test_results.disposition, 'Confirmed') = 'Confirmed' + AND test_results.result_status = 'Error' THEN 1 + ELSE 0 + END + ) as error_ct, + SUM( + CASE + WHEN COALESCE(test_results.disposition, 'Confirmed') IN ('Dismissed', 'Inactive') THEN 1 + ELSE 0 + END + ) as dismissed_ct + FROM {schema}.test_runs + LEFT JOIN {schema}.test_results ON ( + test_runs.id = test_results.test_run_id + ) + WHERE test_runs.id = '{run_id}'::UUID; """ - df = db.retrieve_data(str_sql) + df = db.retrieve_data(sql) return [ { "label": "Passed", "value": int(df.at[0, "passed_ct"]), "color": "green" }, { "label": "Warnings", "value": int(df.at[0, "warning_ct"]), "color": "yellow" }, { "label": "Failed", "value": int(df.at[0, "failed_ct"]), "color": "red" }, - { "label": "Errors", "value": int(df.at[0, "error_ct"]), "color": "grey" }, + { "label": "Errors", "value": int(df.at[0, "error_ct"]), "color": "brown" }, + { "label": "Dismissed", "value": int(df.at[0, "dismissed_ct"]), "color": "grey" }, ] diff --git a/testgen/ui/views/test_suites.py b/testgen/ui/views/test_suites.py index caa497e..84da279 100644 --- a/testgen/ui/views/test_suites.py +++ b/testgen/ui/views/test_suites.py @@ -124,10 +124,11 @@ def render(self, project_code: str | None = None, table_group_id: str | None = N { "label": "Passed", "value": int(test_suite["last_run_passed_ct"]), "color": "green" }, { "label": "Warnings", "value": int(test_suite["last_run_warning_ct"]), "color": "yellow" }, { "label": "Failed", "value": int(test_suite["last_run_failed_ct"]), "color": "red" }, - { "label": "Errors", "value": int(test_suite["last_run_error_ct"]), "color": "grey" }, + { "label": "Errors", "value": int(test_suite["last_run_error_ct"]), "color": "brown" }, + { "label": "Dismissed", "value": int(test_suite["last_run_dismissed_ct"]), "color": "grey" }, ], - height=30, - width=100, + height=20, + width=350, key=f"test_suite:keys:run-rummary:{test_suite['id']}", ) From 772a512daea940ccc7fdcd20d6288331631cfd7f Mon Sep 17 00:00:00 2001 From: Ricardo Boni Date: Tue, 17 Sep 2024 13:13:46 -0400 Subject: [PATCH 49/78] fix(test results): sorting and filtering should be optional for the CLI --- testgen/ui/views/test_results.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testgen/ui/views/test_results.py b/testgen/ui/views/test_results.py index 9c932f3..cc8962c 100644 --- a/testgen/ui/views/test_results.py +++ b/testgen/ui/views/test_results.py @@ -188,7 +188,7 @@ def get_test_results(str_run_id, str_sel_test_status, test_type_id, sorting_colu return get_test_results_uncached(schema, str_run_id, str_sel_test_status, test_type_id, sorting_columns) -def get_test_results_uncached(str_schema, str_run_id, str_sel_test_status, test_type_id, sorting_columns): +def get_test_results_uncached(str_schema, str_run_id, str_sel_test_status, test_type_id=None, sorting_columns=None): # First visible row first, so multi-select checkbox will render str_order_by = "ORDER BY " + (", ".join(" ".join(col) for col in sorting_columns)) if sorting_columns else "" test_type_clause = f"AND r.test_type = '{test_type_id}'" if test_type_id else "" From 2d73a57ba5173e9df1633e2e047d106cfdb08af6 Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Tue, 17 Sep 2024 00:38:29 -0400 Subject: [PATCH 50/78] feat(ui): bind grid selection to query params on result pages --- testgen/ui/services/form_service.py | 28 ++++++++++++++++++++++--- testgen/ui/views/profiling_anomalies.py | 8 +++++-- testgen/ui/views/profiling_results.py | 6 +++++- testgen/ui/views/test_results.py | 9 ++++++-- 4 files changed, 43 insertions(+), 8 deletions(-) diff --git a/testgen/ui/services/form_service.py b/testgen/ui/services/form_service.py index a1a56de..e1810f3 100644 --- a/testgen/ui/services/form_service.py +++ b/testgen/ui/services/form_service.py @@ -19,6 +19,7 @@ import testgen.common.date_service as date_service import testgen.ui.services.authentication_service as authentication_service import testgen.ui.services.database_service as db +from testgen.ui.navigation.router import Router """ Shared rendering of UI elements @@ -766,8 +767,10 @@ def render_grid_select( str_prompt=None, int_height=400, do_multi_select=False, + bind_to_query=None, show_column_headers=None, render_highlights=True, + key="aggrid", ): show_prompt(str_prompt) @@ -841,7 +844,18 @@ def render_grid_select( gb = GridOptionsBuilder.from_dataframe(df) selection_mode = "multiple" if do_multi_select else "single" - gb.configure_selection(selection_mode=selection_mode, use_checkbox=do_multi_select) + + pre_selected_rows = None + if bind_to_query: + query_value = st.query_params.get(bind_to_query) + # Workaround for this open issue: https://github.com/PablocFonseca/streamlit-aggrid/issues/207#issuecomment-1793039564 + pre_selected_rows = { query_value: True } if isinstance(query_value, str) and query_value.isdigit() else None + + gb.configure_selection( + selection_mode=selection_mode, + use_checkbox=do_multi_select, + pre_selected_rows=pre_selected_rows, + ) all_columns = list(df.columns) @@ -896,10 +910,18 @@ def render_grid_select( "padding-bottom": "0px !important", } }, + # Key is needed for query binding to work + # Changing selection mode does not work if same key is used for both modes + key=f"{key}_{selection_mode}", ) - if len(grid_data["selected_rows"]): - return grid_data["selected_rows"] + selected_rows = grid_data["selected_rows"] + if bind_to_query: + Router().set_query_params({ + bind_to_query: selected_rows[0].get("_selectedRowNodeInfo", {}).get("nodeRowIndex") if len(selected_rows) else None, + }) + if len(selected_rows): + return selected_rows def render_logo(logo_path: str = logo_file): diff --git a/testgen/ui/views/profiling_anomalies.py b/testgen/ui/views/profiling_anomalies.py index 4372902..ab1bfcc 100644 --- a/testgen/ui/views/profiling_anomalies.py +++ b/testgen/ui/views/profiling_anomalies.py @@ -127,7 +127,11 @@ def render(self, run_id: str, issue_class: str | None = None, issue_type: str | # Show main grid and retrieve selections selected = fm.render_grid_select( - df_pa, lst_show_columns, int_height=400, do_multi_select=do_multi_select + df_pa, + lst_show_columns, + int_height=400, + do_multi_select=do_multi_select, + bind_to_query="selected", ) with export_button_column: @@ -149,7 +153,7 @@ def render(self, run_id: str, issue_class: str | None = None, issue_type: str | if selected: # Always show details for last selected row - selected_row = selected[len(selected) - 1] + selected_row = selected[0] else: selected_row = None diff --git a/testgen/ui/views/profiling_results.py b/testgen/ui/views/profiling_results.py index 896631b..bd134d9 100644 --- a/testgen/ui/views/profiling_results.py +++ b/testgen/ui/views/profiling_results.py @@ -98,7 +98,11 @@ def render(self, run_id: str, table_name: str | None = None, column_name: str | with st.expander("📜 **Table CREATE script with suggested datatypes**"): st.code(generate_create_script(df), "sql") - selected_row = fm.render_grid_select(df, show_columns) + selected_row = fm.render_grid_select( + df, + show_columns, + bind_to_query="selected", + ) with export_button_column: testgen.flex_row_end() diff --git a/testgen/ui/views/test_results.py b/testgen/ui/views/test_results.py index cc8962c..67e49a5 100644 --- a/testgen/ui/views/test_results.py +++ b/testgen/ui/views/test_results.py @@ -660,7 +660,12 @@ def show_result_detail(str_run_id, str_sel_test_status, test_type_id, sorting_co ] selected_rows = fm.render_grid_select( - df, lst_show_columns, do_multi_select=do_multi_select, show_column_headers=lst_show_headers + df, + lst_show_columns, + do_multi_select=do_multi_select, + show_column_headers=lst_show_headers, + key="grid:test-results", + bind_to_query="selected", ) with export_container: @@ -705,7 +710,7 @@ def show_result_detail(str_run_id, str_sel_test_status, test_type_id, sorting_co if not selected_rows: st.markdown(":orange[Select a record to see more information.]") else: - selected_row = selected_rows[len(selected_rows) - 1] + selected_row = selected_rows[0] dfh = get_test_result_history( selected_row["test_type"], selected_row["test_suite_id"], From cd837623de640cacd83cb7118ff143c1f5658ed2 Mon Sep 17 00:00:00 2001 From: Luis Trinidad Date: Wed, 18 Sep 2024 07:22:39 -0400 Subject: [PATCH 51/78] fix(test definitions): guard against null dynamic attribute labels --- testgen/ui/views/test_definitions.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/testgen/ui/views/test_definitions.py b/testgen/ui/views/test_definitions.py index ee25183..b43e1e8 100644 --- a/testgen/ui/views/test_definitions.py +++ b/testgen/ui/views/test_definitions.py @@ -312,7 +312,9 @@ def show_test_form( dynamic_attributes = dynamic_attributes_raw.split(",") dynamic_attributes_labels_raw = selected_test_type_row["default_parm_prompts"] - dynamic_attributes_labels = dynamic_attributes_labels_raw.split(",") + dynamic_attributes_labels = "" + if dynamic_attributes_labels_raw: + dynamic_attributes_labels = dynamic_attributes_labels_raw.split(",") dynamic_attributes_help_raw = selected_test_type_row["default_parm_help"] if not dynamic_attributes_help_raw: From 87bb2499c14eff5b9b5e87fe78f2a8bd75f790d6 Mon Sep 17 00:00:00 2001 From: Luis Trinidad Date: Wed, 18 Sep 2024 07:23:43 -0400 Subject: [PATCH 52/78] fix(test definitions): remove extra empty option from add dialog dropdown --- testgen/ui/views/test_definitions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testgen/ui/views/test_definitions.py b/testgen/ui/views/test_definitions.py index b43e1e8..463d8df 100644 --- a/testgen/ui/views/test_definitions.py +++ b/testgen/ui/views/test_definitions.py @@ -642,7 +642,7 @@ def prompt_for_test_type(): df = run_test_type_lookup_query(str_test_type=None, boo_show_referential=boo_show_referential, boo_show_table=boo_show_table, boo_show_column=boo_show_column, boo_show_custom=boo_show_custom) - lst_choices = ["(Select a Test Type)", *df["select_name"].tolist()] + lst_choices = df["select_name"].tolist() str_selected = selectbox("Test Type", lst_choices) if str_selected: From 7c03eaed4184aa913ee40907aa40e2ec633394e9 Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Wed, 18 Sep 2024 13:35:07 -0400 Subject: [PATCH 53/78] feat(ui): add paginator component --- .../frontend/js/components/paginator.js | 111 ++++++++++++++++++ testgen/ui/components/frontend/js/main.js | 2 + .../ui/components/widgets/expander_toggle.py | 5 - testgen/ui/components/widgets/paginator.py | 25 ++++ 4 files changed, 138 insertions(+), 5 deletions(-) create mode 100644 testgen/ui/components/frontend/js/components/paginator.js create mode 100644 testgen/ui/components/widgets/paginator.py diff --git a/testgen/ui/components/frontend/js/components/paginator.js b/testgen/ui/components/frontend/js/components/paginator.js new file mode 100644 index 0000000..7c839a2 --- /dev/null +++ b/testgen/ui/components/frontend/js/components/paginator.js @@ -0,0 +1,111 @@ +/** + * @typedef Properties + * @type {object} + * @property {number} count + * @property {number} pageSize + * @property {number} pageIndex + */ +import van from '../van.min.js'; +import { Streamlit } from '../streamlit.js'; + +const { div, span, i, button } = van.tags; + +const Paginator = (/** @type Properties */ props) => { + const count = props.count.val; + const pageSize = props.pageSize.val; + + Streamlit.setFrameHeight(32); + + if (!window.testgen.loadedStylesheets.expanderToggle) { + document.adoptedStyleSheets.push(stylesheet); + window.testgen.loadedStylesheets.expanderToggle = true; + } + + const pageIndexState = van.state(props.pageIndex.val || 0); + + return div( + { class: 'tg-paginator' }, + span( + { class: 'tg-paginator--label' }, + () => { + const pageIndex = pageIndexState.val; + return `${pageSize * pageIndex + 1} - ${Math.min(count, pageSize * (pageIndex + 1))} of ${count}` + }, + ), + button( + { + class: 'tg-paginator--button', + onclick: () => { + pageIndexState.val = 0; + Streamlit.sendData(pageIndexState.val); + }, + disabled: () => pageIndexState.val === 0, + }, + i({class: 'material-symbols-rounded'}, 'first_page') + ), + button( + { + class: 'tg-paginator--button', + onclick: () => { + pageIndexState.val--; + Streamlit.sendData(pageIndexState.val); + }, + disabled: () => pageIndexState.val === 0, + }, + i({class: 'material-symbols-rounded'}, 'chevron_left') + ), + button( + { + class: 'tg-paginator--button', + onclick: () => { + pageIndexState.val++; + Streamlit.sendData(pageIndexState.val); + }, + disabled: () => pageIndexState.val === Math.ceil(count / pageSize) - 1, + }, + i({class: 'material-symbols-rounded'}, 'chevron_right') + ), + button( + { + class: 'tg-paginator--button', + onclick: () => { + pageIndexState.val = Math.ceil(count / pageSize) - 1; + Streamlit.sendData(pageIndexState.val); + }, + disabled: () => pageIndexState.val === Math.ceil(count / pageSize) - 1, + }, + i({class: 'material-symbols-rounded'}, 'last_page') + ), + ); +}; + +const stylesheet = new CSSStyleSheet(); +stylesheet.replace(` +.tg-paginator { + display: flex; + flex-direction: row; + align-items: center; + justify-content: flex-end; +} + +.tg-paginator--label { + margin-right: 20px; + color: var(--secondary-text-color); +} + +.tg-paginator--button { + background-color: transparent; + border: none; + height: 32px; + padding: 4px; + color: var(--secondary-text-color); + cursor: pointer; +} + +.tg-paginator--button[disabled] { + color: var(--disabled-text-color); + cursor: default; +} +`); + +export { Paginator }; diff --git a/testgen/ui/components/frontend/js/main.js b/testgen/ui/components/frontend/js/main.js index 3c56cdd..bf8bc4b 100644 --- a/testgen/ui/components/frontend/js/main.js +++ b/testgen/ui/components/frontend/js/main.js @@ -11,6 +11,7 @@ import { Button } from './components/button.js' import { Breadcrumbs } from './components/breadcrumbs.js' import { ExpanderToggle } from './components/expander_toggle.js'; import { Link } from './components/link.js'; +import { Paginator } from './components/paginator.js'; import { Select } from './components/select.js' import { SummaryBar } from './components/summary_bar.js'; import { SortingSelector } from './components/sorting_selector.js'; @@ -24,6 +25,7 @@ const TestGenComponent = (/** @type {string} */ id, /** @type {object} */ props) button: Button, expander_toggle: ExpanderToggle, link: Link, + paginator: Paginator, select: Select, sorting_selector: SortingSelector, sidebar: window.top.testgen.components.Sidebar, diff --git a/testgen/ui/components/widgets/expander_toggle.py b/testgen/ui/components/widgets/expander_toggle.py index 16e1bf3..21f6dcb 100644 --- a/testgen/ui/components/widgets/expander_toggle.py +++ b/testgen/ui/components/widgets/expander_toggle.py @@ -1,11 +1,7 @@ -import logging - import streamlit as st from testgen.ui.components.utils.component import component -LOG = logging.getLogger("testgen") - def expander_toggle( default: bool = False, @@ -22,7 +18,6 @@ def expander_toggle( :param collapse_label: label for expanded state, default="Collapse" :param key: unique key to give the component a persisting state """ - LOG.debug(key) if key in st.session_state: default = st.session_state[key] diff --git a/testgen/ui/components/widgets/paginator.py b/testgen/ui/components/widgets/paginator.py new file mode 100644 index 0000000..8c1e4c7 --- /dev/null +++ b/testgen/ui/components/widgets/paginator.py @@ -0,0 +1,25 @@ +from testgen.ui.components.utils.component import component + + +def paginator( + count: int, + page_size: int, + page_index: int = 0, + key: str = "testgen:paginator", +) -> bool: + """ + Testgen component to display pagination arrows. + + # Parameters + :param count: total number of items being paginated + :param page_size: number of items displayed per page + :param page_index: index of initial page displayed, default=0 (first page) + :param key: unique key to give the component a persisting state + """ + + return component( + id_="paginator", + key=key, + default=page_index, + props={"count": count, "pageSize": page_size, "pageIndex": page_index}, + ) From 858419f83fbc1e145e9ab68f4f7d021e85c2b229 Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Wed, 18 Sep 2024 13:36:12 -0400 Subject: [PATCH 54/78] misc(ui): updates to components and widgets --- testgen/ui/assets/style.css | 26 +++++++++++++------ testgen/ui/components/frontend/css/shared.css | 13 +++++++++- .../frontend/js/components/button.js | 2 ++ testgen/ui/components/widgets/__init__.py | 5 ++++ testgen/ui/components/widgets/button.py | 4 +++ testgen/ui/components/widgets/card.py | 2 +- testgen/ui/components/widgets/page.py | 26 +++++++++++++++---- 7 files changed, 63 insertions(+), 15 deletions(-) diff --git a/testgen/ui/assets/style.css b/testgen/ui/assets/style.css index ed5b605..25b829e 100644 --- a/testgen/ui/assets/style.css +++ b/testgen/ui/assets/style.css @@ -3,6 +3,15 @@ body { --link-color: #1976d2; --error-color: #EF5350; + --red: #EF5350; + --orange: #FF9800; + --yellow: #FDD835; + --green: #9CCC65; + --purple: #AB47BC; + --blue: #42A5F5; + --brown: #8D6E63; + --grey: #BDBDBD; + --primary-text-color: #000000de; --secondary-text-color: #0000008a; --disabled-text-color: #00000042; @@ -129,10 +138,6 @@ button[title="Show password text"] { } /* Cards Component */ -[data-testid="stVerticalBlockBorderWrapper"]:has(> div > div[data-testid="stVerticalBlock"] > div.element-container > div.stHtml > i.testgen_card) { - background-color: var(--dk-card-background); -} - [data-testid="stVerticalBlockBorderWrapper"]:has(> div > div[data-testid="stVerticalBlock"] > div.element-container > div.stHtml > i.testgen_card) .testgen_card-header > .testgen_card-title { margin: unset; padding: unset; @@ -149,6 +154,10 @@ button[title="Show password text"] { } /* ... */ +[data-testid="stVerticalBlockBorderWrapper"]:has(> div > div[data-testid="stVerticalBlock"] > div.element-container > div.stHtml > i.bg-white) { + background-color: var(--dk-card-background); +} + [data-testid="column"]:has(> div[data-testid="stVerticalBlockBorderWrapper"] > div > div[data-testid="stVerticalBlock"] > div.element-container > div.stHtml > i.flex-row) [data-testid="stVerticalBlock"] { width: 100%; flex-direction: row; @@ -173,19 +182,20 @@ button[title="Show password text"] { } /* Stylistic equivalent of st.caption("text") for customization -Use as st.html('

text

') */ +Use as testgen.caption("text", "extra_styles") */ .caption { color: var(--caption-text-color); font-size: 14px; margin-bottom: 0; } -/* Stylistic equivalent of testgen.link() to match font style of links -Use as st.html('

text

') */ -.line { +/* Stylistic equivalent of testgen.link() to match font size/style of links +Use as testgen.text("text", "extra_styles") */ +.text { font-size: 14px; font-family: 'Roboto', 'Helvetica Neue', sans-serif; line-height: 16.5px; + margin-bottom: 0; } /* Tooltips */ diff --git a/testgen/ui/components/frontend/css/shared.css b/testgen/ui/components/frontend/css/shared.css index 42419d5..fcf0fa1 100644 --- a/testgen/ui/components/frontend/css/shared.css +++ b/testgen/ui/components/frontend/css/shared.css @@ -9,11 +9,22 @@ body { body { --primary-color: #06a04a; + --link-color: #1976d2; + --error-color: #EF5350; + + --red: #EF5350; + --orange: #FF9800; + --yellow: #FDD835; + --green: #9CCC65; + --purple: #AB47BC; + --blue: #42A5F5; + --brown: #8D6E63; + --grey: #BDBDBD; + --primary-text-color: #000000de; --secondary-text-color: #0000008a; --disabled-text-color: #00000042; --caption-text-color: rgba(49, 51, 63, 0.6); /* Match Streamlit's caption color */ - --link-color: #1976d2; --sidebar-background-color: white; --sidebar-item-hover-color: #f5f5f5; diff --git a/testgen/ui/components/frontend/js/components/button.js b/testgen/ui/components/frontend/js/components/button.js index 02eedac..a5ce8e8 100644 --- a/testgen/ui/components/frontend/js/components/button.js +++ b/testgen/ui/components/frontend/js/components/button.js @@ -7,6 +7,7 @@ * @property {(string|null)} tooltip * @property {(string|null)} tooltipPosition * @property {(Function|null)} onclick + * @property {string?} style */ import { enforceElementWidth } from '../utils.js'; import van from '../van.min.js'; @@ -42,6 +43,7 @@ const Button = (/** @type Properties */ props) => { return button( { class: `tg-button tg-${props.type.val}-button ${props.type.val !== 'icon' && isIconOnly ? 'tg-icon-button' : ''}`, + style: props.style?.val, onclick: onClickHandler, }, span({class: 'tg-button-focus-state-indicator'}, ''), diff --git a/testgen/ui/components/widgets/__init__.py b/testgen/ui/components/widgets/__init__.py index 653d5e0..eba62b7 100644 --- a/testgen/ui/components/widgets/__init__.py +++ b/testgen/ui/components/widgets/__init__.py @@ -6,13 +6,18 @@ from testgen.ui.components.widgets.expander_toggle import expander_toggle from testgen.ui.components.widgets.link import link from testgen.ui.components.widgets.page import ( + caption, + css_class, + divider, flex_row_end, flex_row_start, no_flex_gap, page_header, + text, toolbar_select, whitespace, ) +from testgen.ui.components.widgets.paginator import paginator from testgen.ui.components.widgets.sidebar import sidebar from testgen.ui.components.widgets.sorting_selector import sorting_selector from testgen.ui.components.widgets.summary_bar import summary_bar diff --git a/testgen/ui/components/widgets/button.py b/testgen/ui/components/widgets/button.py index c248981..dba2fe5 100644 --- a/testgen/ui/components/widgets/button.py +++ b/testgen/ui/components/widgets/button.py @@ -13,6 +13,7 @@ def button( tooltip: str | None = None, tooltip_position: TooltipPosition = "left", on_click: typing.Callable[..., None] | None = None, + style: str | None = None, key: str | None = None, ) -> None: """ @@ -36,4 +37,7 @@ def button( if tooltip: props.update({"tooltip": tooltip, "tooltipPosition": tooltip_position}) + if style: + props.update({"style": style}) + component(id_="button", key=key, props=props, on_change=on_click) diff --git a/testgen/ui/components/widgets/card.py b/testgen/ui/components/widgets/card.py index 97677c8..1b4ddad 100644 --- a/testgen/ui/components/widgets/card.py +++ b/testgen/ui/components/widgets/card.py @@ -20,7 +20,7 @@ def card( extra_css_class: str = "", ) -> typing.Generator["CardContext", None, None]: with st.container(border=border): - st.html(f'') + st.html(f'') title_column, actions_column = st.columns([.5, .5], vertical_alignment="center") if title or subtitle: diff --git a/testgen/ui/components/widgets/page.py b/testgen/ui/components/widgets/page.py index 6f429b8..cb3b495 100644 --- a/testgen/ui/components/widgets/page.py +++ b/testgen/ui/components/widgets/page.py @@ -21,7 +21,7 @@ def page_header( st.page_link(help_link, label=" ", icon=":material/help:") if breadcrumbs: - tg_breadcrumbs(breadcrumbs=breadcrumbs) + tg_breadcrumbs(breadcrumbs=breadcrumbs) st.write( '
', container) +def divider(margin_top: int = 0, margin_bottom: int = 0, container: DeltaGenerator | None = None): + _apply_html(f'
', container) + + +def text(text: str, styles: str = "", container: DeltaGenerator | None = None): + _apply_html(f'

{text}

', container) + + +def caption(text: str, styles: str = "", container: DeltaGenerator | None = None): + _apply_html(f'

{text}

', container) + + +def css_class(css_classes: str, container: DeltaGenerator | None = None): + _apply_html(f'', container) + + def flex_row_start(container: DeltaGenerator | None = None): _apply_html('', container) @@ -93,7 +109,7 @@ def flex_row_end(container: DeltaGenerator | None = None): def no_flex_gap(container: DeltaGenerator | None = None): _apply_html('', container) - + def _apply_html(html: str, container: DeltaGenerator | None = None): if container: From f0ae57c4df63fe81efdd4b33aacd4daea5e5e2b3 Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Wed, 18 Sep 2024 13:36:39 -0400 Subject: [PATCH 55/78] misc(ui): add utility functions for formatting display --- testgen/common/date_service.py | 14 ++++++++++++++ testgen/utils/__init__.py | 7 +++++++ 2 files changed, 21 insertions(+) diff --git a/testgen/common/date_service.py b/testgen/common/date_service.py index e0e37f2..28e4b06 100644 --- a/testgen/common/date_service.py +++ b/testgen/common/date_service.py @@ -66,3 +66,17 @@ def get_timezoned_timestamp(streamlit_session, value, dateformat="%b %-d, %-I:%M def get_timezoned_now(streamlit_session): value = datetime.utcnow() return get_timezoned_timestamp(streamlit_session, value) + + +def get_formatted_duration(duration: str) -> str: + hour, minute, second = duration.split(":") + formatted = "" + if int(hour): + formatted += f"{int(hour)!s}h" + if int(minute): + formatted += f" {int(minute)!s}m" + if int(second): + formatted += f" {int(second)!s}s" + + formatted = formatted.strip() or "< 1s" + return formatted diff --git a/testgen/utils/__init__.py b/testgen/utils/__init__.py index e69de29..d7475d5 100644 --- a/testgen/utils/__init__.py +++ b/testgen/utils/__init__.py @@ -0,0 +1,7 @@ +import pandas as pd + + +def to_int(value: float | int) -> int: + if pd.notnull(value): + return int(value) + return 0 From 9b703deb8072da2a340396f7a7379322c116607f Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Wed, 18 Sep 2024 13:37:21 -0400 Subject: [PATCH 56/78] refactor(ui): replace grid with list in test runs page --- testgen/ui/views/test_runs.py | 316 +++++++++++++++++++++------------- 1 file changed, 192 insertions(+), 124 deletions(-) diff --git a/testgen/ui/views/test_runs.py b/testgen/ui/views/test_runs.py index 3f957d2..eb972dd 100644 --- a/testgen/ui/views/test_runs.py +++ b/testgen/ui/views/test_runs.py @@ -1,5 +1,7 @@ import typing +from functools import partial +import pandas as pd import streamlit as st import testgen.common.process_service as process_service @@ -12,6 +14,9 @@ from testgen.ui.navigation.menu import MenuItem from testgen.ui.navigation.page import Page from testgen.ui.session import session +from testgen.utils import to_int + +PAGE_SIZE = 50 class TestRunsPage(Page): @@ -24,21 +29,18 @@ class TestRunsPage(Page): def render(self, project_code: str | None = None, table_group_id: str | None = None, test_suite_id: str | None = None, **_kwargs) -> None: project_code = project_code or st.session_state["project"] - + testgen.page_header( "Test Runs", "https://docs.datakitchen.io/article/dataops-testgen-help/test-results", ) - # Setup Toolbar group_filter_column, suite_filter_column, actions_column = st.columns([.3, .3, .4], vertical_alignment="bottom") - testgen.flex_row_end(actions_column) with group_filter_column: - # Table Groups selection -- optional criterion - df_tg = get_db_table_group_choices(project_code) + table_groups_df = get_db_table_group_choices(project_code) table_groups_id = testgen.toolbar_select( - options=df_tg, + options=table_groups_df, value_column="id", display_column="table_groups_name", default_value=table_group_id, @@ -47,10 +49,9 @@ def render(self, project_code: str | None = None, table_group_id: str | None = N ) with suite_filter_column: - # Table Groups selection -- optional criterion - df_ts = get_db_test_suite_choices(project_code, table_groups_id) + test_suites_df = get_db_test_suite_choices(project_code, table_groups_id) test_suite_id = testgen.toolbar_select( - options=df_ts, + options=test_suites_df, value_column="id", display_column="test_suite", default_value=test_suite_id, @@ -58,140 +59,207 @@ def render(self, project_code: str | None = None, table_group_id: str | None = N label="Test Suite", ) - df, show_columns = get_db_test_runs(project_code, table_groups_id, test_suite_id) + testgen.flex_row_end(actions_column) + fm.render_refresh_button(actions_column) - time_columns = ["run_date"] - date_service.accommodate_dataframe_to_timezone(df, st.session_state, time_columns) + testgen.whitespace(0.5) + list_container = st.container(border=True) - dct_selected_rows = fm.render_grid_select(df, show_columns) - dct_selected_row = dct_selected_rows[0] if dct_selected_rows else None + test_runs_df = get_db_test_runs(project_code, table_groups_id, test_suite_id) - if actions_column.button( - f":{'gray' if not dct_selected_row else 'green'}[Test Results →]", - help="Review test results for the selected run", - disabled=not dct_selected_row, - ): - self.router.navigate("test-runs:results", { "run_id": dct_selected_row["test_run_id"] }) + run_count = len(test_runs_df) + page_index = testgen.paginator(count=run_count, page_size=PAGE_SIZE) - fm.render_refresh_button(actions_column) + with list_container: + testgen.css_class("bg-white") + column_spec = [.3, .2, .5] + + run_column, status_column, results_column = st.columns(column_spec, vertical_alignment="top") + header_styles = "font-size: 12px; text-transform: uppercase; margin-bottom: 8px;" + testgen.caption("Start Time | Table Group | Test Suite", header_styles, run_column) + testgen.caption("Status | Duration", header_styles, status_column) + testgen.caption("Results Summary", header_styles, results_column) + testgen.divider(-8) + + paginated_df = test_runs_df[PAGE_SIZE * page_index : PAGE_SIZE * (page_index + 1)] + for index, test_run in paginated_df.iterrows(): + with st.container(): + render_test_run_row(test_run, column_spec) + + if (index + 1) % PAGE_SIZE and index != run_count - 1: + testgen.divider(-4, 4) + + +def render_test_run_row(test_run: pd.Series, column_spec: list[int]) -> None: + test_run_id = test_run["test_run_id"] + status = test_run["status"] + + run_column, status_column, results_column = st.columns(column_spec, vertical_alignment="top") + + with run_column: + start_time = date_service.get_timezoned_timestamp(st.session_state, test_run["test_starttime"]) if pd.notnull(test_run["test_starttime"]) else "--" + testgen.no_flex_gap() + testgen.link( + label=start_time, + href="test-runs:results", + params={ "run_id": str(test_run_id) }, + height=18, + key=f"test_run:keys:go-to-run:{test_run_id}", + ) + testgen.caption( + f"{test_run['table_groups_name']} > {test_run['test_suite']}", + "margin-top: -9px;" + ) + + with status_column: + testgen.flex_row_start() + + status_display_map = { + "Running": { "label": "Running", "color": "blue" }, + "Complete": { "label": "Completed", "color": "" }, + "Error": { "label": "Error", "color": "red" }, + "Cancelled": { "label": "Canceled", "color": "purple" }, + } + status_attrs = status_display_map.get(status, { "label": "Unknown", "color": "grey" }) - if dct_selected_rows: - open_record_detail( - dct_selected_rows[0], + st.html(f""" +

{status_attrs["label"]}

+

{date_service.get_formatted_duration(test_run["duration"])}

+ """) + + if status == "Error" and (log_message := test_run["log_message"]): + st.markdown("", help=log_message) + + if status == "Running" and pd.notnull(test_run["process_id"]): + testgen.button( + type_="stroked", + label="Cancel Run", + style="width: auto; height: 32px; color: var(--purple); margin-left: 16px;", + on_click=partial(on_cancel_run, test_run), + key=f"test_run:keys:cancel-run:{test_run_id}", + ) + + with results_column: + if to_int(test_run["test_ct"]): + testgen.summary_bar( + items=[ + { "label": "Passed", "value": to_int(test_run["passed_ct"]), "color": "green" }, + { "label": "Warnings", "value": to_int(test_run["warning_ct"]), "color": "yellow" }, + { "label": "Failed", "value": to_int(test_run["failed_ct"]), "color": "red" }, + { "label": "Errors", "value": to_int(test_run["error_ct"]), "color": "brown" }, + { "label": "Dismissed", "value": to_int(test_run["dismissed_ct"]), "color": "grey" }, + ], + height=10, + width=300, + key=f"test_run:keys:summary:{test_run_id}", ) - st.markdown(":orange[Click button to access test results for selected run.]") else: - st.markdown(":orange[Select a run to access test results.]") + st.markdown("--") + + +def on_cancel_run(test_run: pd.Series) -> None: + process_status, process_message = process_service.kill_test_run(test_run["process_id"]) + if process_status: + test_run_service.update_status(test_run["test_run_id"], "Cancelled") + + fm.reset_post_updates(str_message=f":{'green' if process_status else 'red'}[{process_message}]", as_toast=True) @st.cache_data(show_spinner=False) -def run_test_suite_lookup_query(str_schema, str_project, str_tg=None): - str_tg_condition = f" AND s.table_groups_id = '{str_tg}' " if str_tg else "" - str_sql = f""" - SELECT s.id::VARCHAR(50), - s.test_suite, - COALESCE(s.test_suite_description, s.test_suite) AS test_suite_description - FROM {str_schema}.test_suites s - LEFT JOIN {str_schema}.table_groups tg ON s.table_groups_id = tg.id - WHERE s.project_code = '{str_project}' {str_tg_condition} - ORDER BY s.test_suite +def run_test_suite_lookup_query(schema: str, project_code: str, table_groups_id: str | None = None) -> pd.DataFrame: + table_group_condition = f" AND test_suites.table_groups_id = '{table_groups_id}' " if table_groups_id else "" + sql = f""" + SELECT test_suites.id::VARCHAR(50), + test_suites.test_suite + FROM {schema}.test_suites + LEFT JOIN {schema}.table_groups ON test_suites.table_groups_id = table_groups.id + WHERE test_suites.project_code = '{project_code}' + {table_group_condition} + ORDER BY test_suites.test_suite """ - return db.retrieve_data(str_sql) + return db.retrieve_data(sql) @st.cache_data(show_spinner=False) -def get_db_table_group_choices(str_project_code): - str_schema = st.session_state["dbschema"] - return dq.run_table_groups_lookup_query(str_schema, str_project_code) +def get_db_table_group_choices(project_code: str) -> pd.DataFrame: + schema = st.session_state["dbschema"] + return dq.run_table_groups_lookup_query(schema, project_code) @st.cache_data(show_spinner=False) -def get_db_test_suite_choices(str_project_code, str_table_groups_id=None): - str_schema = st.session_state["dbschema"] - return run_test_suite_lookup_query(str_schema, str_project_code, str_table_groups_id) +def get_db_test_suite_choices(project_code: str, table_groups_id: str | None = None) -> pd.DataFrame: + schema = st.session_state["dbschema"] + return run_test_suite_lookup_query(schema, project_code, table_groups_id) # @st.cache_data(show_spinner="Retrieving Data") -def get_db_test_runs(str_project_code, str_tg=None, str_ts=None): - str_schema = st.session_state["dbschema"] - str_tg_condition = f" AND s.table_groups_id = '{str_tg}' " if str_tg else "" - str_ts_condition = f" AND s.id = '{str_ts}' " if str_ts else "" - str_sql = f""" - SELECT r.test_starttime as run_date, - s.test_suite, s.test_suite_description, - r.status, - r.duration, - r.test_ct, r.passed_ct, r.failed_ct, r.warning_ct, r.error_ct, - ROUND(100.0 * r.passed_ct::DECIMAL(12, 4) / r.test_ct::DECIMAL(12, 4), 3) as passed_pct, - COALESCE(r.log_message, 'Test run completed successfully.') as log_message, - r.column_ct, r.column_failed_ct, r.column_warning_ct, - ROUND(100.0 * (r.column_ct - r.column_failed_ct - r.column_warning_ct)::DECIMAL(12, 4) / r.column_ct::DECIMAL(12, 4), 3) as column_passed_pct, - r.id::VARCHAR as test_run_id, - p.project_name, - s.table_groups_id::VARCHAR, tg.table_groups_name, tg.table_group_schema, process_id - FROM {str_schema}.test_runs r - INNER JOIN {str_schema}.test_suites s - ON (r.test_suite_id = s.id) - INNER JOIN {str_schema}.table_groups tg - ON (s.table_groups_id = tg.id) - INNER JOIN {str_schema}.projects p - ON (s.project_code = p.project_code) - WHERE s.project_code = '{str_project_code}' {str_tg_condition} {str_ts_condition} - ORDER BY r.test_starttime DESC; +def get_db_test_runs(project_code: str, table_groups_id: str | None = None, test_suite_id: str | None = None) -> pd.DataFrame: + schema = st.session_state["dbschema"] + table_group_condition = f" AND test_suites.table_groups_id = '{table_groups_id}' " if table_groups_id else "" + test_suite_condition = f" AND test_suites.id = '{test_suite_id}' " if test_suite_id else "" + sql = f""" + WITH run_results AS ( + SELECT test_run_id, + SUM( + CASE + WHEN COALESCE(disposition, 'Confirmed') = 'Confirmed' + AND result_status = 'Passed' THEN 1 + ELSE 0 + END + ) as passed_ct, + SUM( + CASE + WHEN COALESCE(disposition, 'Confirmed') = 'Confirmed' + AND result_status = 'Warning' THEN 1 + ELSE 0 + END + ) as warning_ct, + SUM( + CASE + WHEN COALESCE(disposition, 'Confirmed') = 'Confirmed' + AND result_status = 'Failed' THEN 1 + ELSE 0 + END + ) as failed_ct, + SUM( + CASE + WHEN COALESCE(disposition, 'Confirmed') = 'Confirmed' + AND result_status = 'Error' THEN 1 + ELSE 0 + END + ) as error_ct, + SUM( + CASE + WHEN COALESCE(disposition, 'Confirmed') IN ('Dismissed', 'Inactive') THEN 1 + ELSE 0 + END + ) as dismissed_ct + FROM {schema}.test_results + GROUP BY test_run_id + ) + SELECT test_runs.id::VARCHAR as test_run_id, + test_runs.test_starttime, + table_groups.table_groups_name, + test_suites.test_suite, + test_runs.status, + test_runs.duration, + test_runs.process_id, + test_runs.log_message, + test_runs.test_ct, + run_results.passed_ct, + run_results.warning_ct, + run_results.failed_ct, + run_results.error_ct, + run_results.dismissed_ct + FROM {schema}.test_runs + LEFT JOIN run_results ON (test_runs.id = run_results.test_run_id) + INNER JOIN {schema}.test_suites ON (test_runs.test_suite_id = test_suites.id) + INNER JOIN {schema}.table_groups ON (test_suites.table_groups_id = table_groups.id) + INNER JOIN {schema}.projects ON (test_suites.project_code = projects.project_code) + WHERE test_suites.project_code = '{project_code}' + {table_group_condition} + {test_suite_condition} + ORDER BY test_runs.test_starttime DESC; """ - show_columns = [ - "run_date", - "test_suite", - "test_suite_description", - "status", - "duration", - "test_ct", - "failed_ct", - "warning_ct", - ] - - return db.retrieve_data(str_sql), show_columns - - -def open_record_detail(dct_selected_row): - bottom_left_column, bottom_right_column = st.columns([0.5, 0.5]) - - with bottom_left_column: - # Show Run Detail - lst_detail_columns = [ - "test_suite", - "test_suite_description", - "run_date", - "status", - "log_message", - "table_groups_name", - "test_ct", - "passed_ct", - "failed_ct", - "warning_ct", - "error_ct", - ] - fm.render_html_list(dct_selected_row, lst_detail_columns, "Run Information", 500) - - with bottom_right_column: - st.write("

", unsafe_allow_html=True) - _, button_column = st.columns([0.3, 0.7]) - with button_column: - enable_kill_button = dct_selected_row and dct_selected_row["process_id"] is not None and dct_selected_row["status"] == "Running" - - if enable_kill_button: - if st.button( - ":red[Cancel Run]", - help="Kill the selected test run", - use_container_width=True, - disabled=not enable_kill_button, - ): - process_id = dct_selected_row["process_id"] - test_run_id = dct_selected_row["test_run_id"] - status, message = process_service.kill_test_run(process_id) - - if status: - test_run_service.update_status(test_run_id, "Cancelled") - - fm.reset_post_updates(str_message=f":{'green' if status else 'red'}[{message}]", as_toast=True) + return db.retrieve_data(sql) From b9900c94cd8dde88fe710c6b2940c98feae5c631 Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Wed, 18 Sep 2024 13:37:30 -0400 Subject: [PATCH 57/78] refactor(ui): replace grid with list in profiling runs page --- testgen/ui/views/profiling_summary.py | 304 ++++++++++++++++---------- 1 file changed, 193 insertions(+), 111 deletions(-) diff --git a/testgen/ui/views/profiling_summary.py b/testgen/ui/views/profiling_summary.py index e81fa15..c49921e 100644 --- a/testgen/ui/views/profiling_summary.py +++ b/testgen/ui/views/profiling_summary.py @@ -1,5 +1,7 @@ import typing +from functools import partial +import pandas as pd import streamlit as st import testgen.common.process_service as process_service @@ -12,8 +14,10 @@ from testgen.ui.navigation.menu import MenuItem from testgen.ui.navigation.page import Page from testgen.ui.session import session +from testgen.utils import to_int FORM_DATA_WIDTH = 400 +PAGE_SIZE = 50 class DataProfilingPage(Page): @@ -31,15 +35,12 @@ def render(self, project_code: str | None = None, table_group_id: str | None = N "https://docs.datakitchen.io/article/dataops-testgen-help/investigate-profiling", ) - # Setup Toolbar group_filter_column, actions_column = st.columns([.3, .7], vertical_alignment="bottom") - testgen.flex_row_end(actions_column) with group_filter_column: - # Table Groups selection -- optional criterion - df_tg = get_db_table_group_choices(project_code) - table_groups_id = testgen.toolbar_select( - options=df_tg, + table_groups_df = get_db_table_group_choices(project_code) + table_group_id = testgen.toolbar_select( + options=table_groups_df, value_column="id", display_column="table_groups_name", default_value=table_group_id, @@ -47,121 +48,202 @@ def render(self, project_code: str | None = None, table_group_id: str | None = N label="Table Group", ) - df, show_columns = get_db_profiling_runs(project_code, table_groups_id) + testgen.flex_row_end(actions_column) + fm.render_refresh_button(actions_column) - time_columns = ["start_time"] - date_service.accommodate_dataframe_to_timezone(df, st.session_state, time_columns) + testgen.whitespace(0.5) + list_container = st.container(border=True) + + profiling_runs_df = get_db_profiling_runs(project_code, table_group_id) + + run_count = len(profiling_runs_df) + page_index = testgen.paginator(count=run_count, page_size=PAGE_SIZE) + + with list_container: + testgen.css_class("bg-white") + column_spec = [.2, .2, .2, .4] + + run_column, status_column, schema_column, issues_column = st.columns(column_spec, vertical_alignment="top") + header_styles = "font-size: 12px; text-transform: uppercase; margin-bottom: 8px;" + testgen.caption("Start Time | Table Group", header_styles, run_column) + testgen.caption("Status | Duration", header_styles, status_column) + testgen.caption("Schema", header_styles, schema_column) + testgen.caption("Hygiene Issues", header_styles, issues_column) + testgen.divider(-8) + + paginated_df = profiling_runs_df[PAGE_SIZE * page_index : PAGE_SIZE * (page_index + 1)] + for index, profiling_run in paginated_df.iterrows(): + with st.container(): + render_profiling_run_row(profiling_run, column_spec) + + if (index + 1) % PAGE_SIZE and index != run_count - 1: + testgen.divider(-4, 4) + + +def render_profiling_run_row(profiling_run: pd.Series, column_spec: list[int]) -> None: + profiling_run_id = profiling_run["profiling_run_id"] + status = profiling_run["status"] + + run_column, status_column, schema_column, issues_column = st.columns(column_spec, vertical_alignment="top") + + with run_column: + start_time = date_service.get_timezoned_timestamp(st.session_state, profiling_run["start_time"]) if pd.notnull(profiling_run["start_time"]) else "--" + testgen.no_flex_gap() + testgen.text(start_time) + testgen.caption(profiling_run["table_groups_name"]) + + with status_column: + testgen.flex_row_start() + + status_display_map = { + "Running": { "label": "Running", "color": "blue" }, + "Complete": { "label": "Completed", "color": "" }, + "Error": { "label": "Error", "color": "red" }, + "Cancelled": { "label": "Canceled", "color": "purple" }, + } + status_attrs = status_display_map.get(status, { "label": "Unknown", "color": "grey" }) + + st.html(f""" +

{status_attrs["label"]}

+

{date_service.get_formatted_duration(profiling_run["duration"])}

+ """) + + if status == "Error" and (log_message := profiling_run["log_message"]): + st.markdown("", help=log_message) + + if status == "Running" and pd.notnull(profiling_run["process_id"]): + testgen.button( + type_="stroked", + label="Cancel Run", + style="width: auto; height: 32px; color: var(--purple); margin-left: 16px;", + on_click=partial(on_cancel_run, profiling_run), + key=f"profiling_run:keys:cancel-run:{profiling_run_id}", + ) - dct_selected_rows = fm.render_grid_select(df, show_columns) + with schema_column: + column_count = to_int(profiling_run["column_ct"]) + testgen.no_flex_gap() + testgen.text(profiling_run["schema_name"]) + testgen.caption( + f"{to_int(profiling_run['table_ct'])} tables, {column_count} columns", + f"margin-bottom: 3px;{' color: var(--red);' if status == 'Complete' and not column_count else ''}", + ) - open_drill_downs(dct_selected_rows, actions_column, self.router) - fm.render_refresh_button(actions_column) + if column_count: + testgen.link( + label="View results", + href="profiling-runs:results", + params={ "run_id": str(profiling_run_id) }, + right_icon="chevron_right", + height=18, + key=f"profiling_run:keys:go-to-runs:{profiling_run_id}", + ) - if dct_selected_rows: - show_record_detail(dct_selected_rows[0]) - st.markdown(":orange[Click a button to view profiling outcomes for the selected run.]") + with issues_column: + if anomaly_count := to_int(profiling_run["anomaly_ct"]): + testgen.no_flex_gap() + testgen.summary_bar( + items=[ + { "label": "Definite", "value": to_int(profiling_run["anomalies_definite_ct"]), "color": "red" }, + { "label": "Likely", "value": to_int(profiling_run["anomalies_likely_ct"]), "color": "orange" }, + { "label": "Possible", "value": to_int(profiling_run["anomalies_possible_ct"]), "color": "yellow" }, + { "label": "Dismissed", "value": to_int(profiling_run["anomalies_dismissed_ct"]), "color": "grey" }, + ], + height=10, + width=280, + key=f"test_run:keys:summary:{profiling_run_id}", + ) + testgen.link( + label=f"View {anomaly_count} issues", + href="profiling-runs:hygiene", + params={ "run_id": str(profiling_run_id) }, + right_icon="chevron_right", + height=18, + key=f"profiling_run:keys:go-to-hygiene:{profiling_run_id}", + ) else: - st.markdown(":orange[Select a run to see more information.]") + st.markdown("--") + + +def on_cancel_run(profiling_run: pd.Series) -> None: + process_status, process_message = process_service.kill_test_run(profiling_run["process_id"]) + if process_status: + update_profile_run_status(profiling_run["profile_run_id"], "Cancelled") + + fm.reset_post_updates(str_message=f":{'green' if process_status else 'red'}[{process_message}]", as_toast=True) @st.cache_data(show_spinner=False) -def get_db_table_group_choices(str_project_code): - str_schema = st.session_state["dbschema"] - return dq.run_table_groups_lookup_query(str_schema, str_project_code) +def get_db_table_group_choices(project_code: str) -> pd.DataFrame: + schema = st.session_state["dbschema"] + return dq.run_table_groups_lookup_query(schema, project_code) @st.cache_data(show_spinner="Retrieving Data") -def get_db_profiling_runs(str_project_code, str_tg=None): - str_schema = st.session_state["dbschema"] - str_tg_condition = f" AND table_groups_id = '{str_tg}' " if str_tg else "" - str_sql = f""" - SELECT project_code, connection_name, - connection_id::VARCHAR, - table_groups_id::VARCHAR, - profiling_run_id::VARCHAR, - table_groups_name, schema_name, start_time, duration, - CASE - WHEN status = 'Running' AND start_time < CURRENT_DATE - 1 THEN 'Error' - ELSE status - END as status, - COALESCE(log_message, '(No Errors)') as log_message, - table_ct, column_ct, - anomaly_ct, anomaly_table_ct, anomaly_column_ct, process_id - FROM {str_schema}.v_profiling_runs - WHERE project_code = '{str_project_code}' {str_tg_condition} - ORDER BY start_time DESC; +def get_db_profiling_runs(project_code: str, table_group_id: str | None = None) -> pd.DataFrame: + schema = st.session_state["dbschema"] + table_group_condition = f" AND v_profiling_runs.table_groups_id = '{table_group_id}' " if table_group_id else "" + sql = f""" + WITH profile_anomalies AS ( + SELECT profile_anomaly_results.profile_run_id, + SUM( + CASE + WHEN COALESCE(profile_anomaly_results.disposition, 'Confirmed') = 'Confirmed' + AND profile_anomaly_types.issue_likelihood = 'Definite' THEN 1 + ELSE 0 + END + ) as definite_ct, + SUM( + CASE + WHEN COALESCE(profile_anomaly_results.disposition, 'Confirmed') = 'Confirmed' + AND profile_anomaly_types.issue_likelihood = 'Likely' THEN 1 + ELSE 0 + END + ) as likely_ct, + SUM( + CASE + WHEN COALESCE(profile_anomaly_results.disposition, 'Confirmed') = 'Confirmed' + AND profile_anomaly_types.issue_likelihood = 'Possible' THEN 1 + ELSE 0 + END + ) as possible_ct, + SUM( + CASE + WHEN COALESCE(profile_anomaly_results.disposition, 'Confirmed') IN ('Dismissed', 'Inactive') + AND profile_anomaly_types.issue_likelihood <> 'Potential PII' THEN 1 + ELSE 0 + END + ) as dismissed_ct + FROM {schema}.profile_anomaly_results + LEFT JOIN {schema}.profile_anomaly_types ON ( + profile_anomaly_types.id = profile_anomaly_results.anomaly_id + ) + GROUP BY profile_anomaly_results.profile_run_id + ) + SELECT v_profiling_runs.profiling_run_id::VARCHAR, + v_profiling_runs.start_time, + v_profiling_runs.table_groups_name, + CASE + WHEN v_profiling_runs.status = 'Running' + AND v_profiling_runs.start_time < CURRENT_DATE - 1 THEN 'Error' + ELSE v_profiling_runs.status + END as status, + v_profiling_runs.process_id, + v_profiling_runs.duration, + v_profiling_runs.log_message, + v_profiling_runs.schema_name, + v_profiling_runs.table_ct, + v_profiling_runs.column_ct, + v_profiling_runs.anomaly_ct, + profile_anomalies.definite_ct as anomalies_definite_ct, + profile_anomalies.likely_ct as anomalies_likely_ct, + profile_anomalies.possible_ct as anomalies_possible_ct, + profile_anomalies.dismissed_ct as anomalies_dismissed_ct + FROM {schema}.v_profiling_runs + LEFT JOIN profile_anomalies ON (v_profiling_runs.profiling_run_id = profile_anomalies.profile_run_id) + WHERE project_code = '{project_code}' + {table_group_condition} + ORDER BY start_time DESC; """ - show_columns = [ - "connection_name", - "table_groups_name", - "schema_name", - "start_time", - "duration", - "status", - "table_ct", - "column_ct", - ] - - return db.retrieve_data(str_sql), show_columns - - -def open_drill_downs(dct_selected_rows, container, router): - dct_selected_row = None - if dct_selected_rows: - dct_selected_row = dct_selected_rows[0] - - if container.button( - f":{'gray' if not dct_selected_rows else 'green'}[Profiling →]", - help="Review profiling characteristics for each data column", - disabled=not dct_selected_rows, - ): - router.navigate("profiling-runs:results", { "run_id": dct_selected_row["profiling_run_id"] }) - - if container.button( - f":{'gray' if not dct_selected_rows else 'green'}[Hygiene →]", - help="Review potential data problems identified in profiling", - disabled=not dct_selected_rows, - ): - router.navigate("profiling-runs:hygiene", { "run_id": dct_selected_row["profiling_run_id"] }) - - -def show_record_detail(dct_selected_row): - bottom_left_column, bottom_right_column = st.columns([0.5, 0.5]) - - with bottom_left_column: - str_header = "Profiling Run Information" - lst_columns = [ - "connection_name", - "table_groups_name", - "schema_name", - "log_message", - "table_ct", - "column_ct", - "anomaly_ct", - "anomaly_table_ct", - "anomaly_column_ct", - ] - fm.render_html_list(dct_selected_row, lst_columns, str_header, FORM_DATA_WIDTH) - - with bottom_right_column: - st.write("

", unsafe_allow_html=True) - _, button_column = st.columns([0.3, 0.7]) - with button_column: - enable_kill_button = dct_selected_row and dct_selected_row["process_id"] is not None and dct_selected_row["status"] == "Running" - - if enable_kill_button: - if st.button( - ":red[Cancel Run]", - help="Kill the selected profile run", - use_container_width=True, - disabled=not enable_kill_button, - ): - process_id = dct_selected_row["process_id"] - profile_run_id = dct_selected_row["profiling_run_id"] - status, message = process_service.kill_profile_run(process_id) - - if status: - update_profile_run_status(profile_run_id, "Cancelled") - - fm.reset_post_updates(str_message=f":{'green' if status else 'red'}[{message}]", as_toast=True) + return db.retrieve_data(sql) From e8853613f201ca709032c51ab715ed864dedc145 Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Wed, 18 Sep 2024 13:38:14 -0400 Subject: [PATCH 58/78] fix(ui): handle null data in overview and test suites pages --- testgen/ui/views/overview.py | 123 +++++++++++++++++--------------- testgen/ui/views/test_suites.py | 56 ++++++++------- 2 files changed, 95 insertions(+), 84 deletions(-) diff --git a/testgen/ui/views/overview.py b/testgen/ui/views/overview.py index 4c35890..8e01482 100644 --- a/testgen/ui/views/overview.py +++ b/testgen/ui/views/overview.py @@ -11,6 +11,7 @@ from testgen.ui.navigation.page import Page from testgen.ui.services import test_suite_service from testgen.ui.session import session +from testgen.utils import to_int STALE_PROFILE_DAYS = 30 @@ -29,8 +30,8 @@ def render(self, project_code: str | None = None, **_kwargs): testgen.page_header( "Project Overview", "https://docs.datakitchen.io/article/dataops-testgen-help/introduction-to-dataops-testgen", - ) - + ) + render_project_summary(table_groups_df) st.html(f'
Table Groups ({len(table_groups_df.index)})
') @@ -54,13 +55,13 @@ def render_project_summary(table_groups: pd.DataFrame) -> None: # delta=project_score_delta or 0, # label_visibility="collapsed", # ) - + with summary_column: st.caption("Project Summary") st.html(f"""{len(table_groups.index)} table groups
{int(table_groups['latest_tests_suite_ct'].sum())} test suites
{int(table_groups['latest_tests_ct'].sum())} test definitions - """) + """) @st.fragment @@ -72,7 +73,7 @@ def render_table_group_card(table_group: pd.Series, project_code: str, key: int) # Without this CSS, the "hidden" elements in the expanded state take up space testgen.no_flex_gap() - with test_suite_card.actions: + with test_suite_card.actions: expand_toggle = testgen.expander_toggle(key=f"toggle_{key}") profile_column, tests_column = st.columns([.5, .5]) @@ -90,54 +91,60 @@ def render_table_group_card(table_group: pd.Series, project_code: str, key: int) with profile_column: testgen.no_flex_gap() - profile_days_ago = (datetime.utcnow() - table_group["latest_profile_start"]).days - st.html(f"""

Latest profile {f'({profile_days_ago} days ago)' if profile_days_ago > STALE_PROFILE_DAYS else ""}

""") + latest_profile_start = table_group["latest_profile_start"] - if (latest_profile_id := table_group["latest_profile_id"]) and not pd.isnull(latest_profile_id): + stale_message = "" + if pd.notnull(latest_profile_start) and (profile_days_ago := (datetime.utcnow() - latest_profile_start).days) > STALE_PROFILE_DAYS: + stale_message = f'({profile_days_ago} days ago)' + testgen.caption(f"Latest profile {stale_message}") + + if pd.notnull(latest_profile_start): testgen.link( - label=date_service.get_timezoned_timestamp(st.session_state, table_group["latest_profile_start"]), + label=date_service.get_timezoned_timestamp(st.session_state, latest_profile_start), href="profiling-runs:results", - params={ "run_id": str(latest_profile_id) }, - key=f"overview:keys:go-to-profile:{latest_profile_id}", + params={ "run_id": str(table_group["latest_profile_id"]) }, + key=f"overview:keys:go-to-profile:{table_group['latest_profile_id']}", ) - - st.html(f""" - {int(table_group["latest_anomalies_ct"])} anomalies in {int(table_group["latest_profile_table_ct"])} tables - """) - testgen.summary_bar( - items=[ - { "label": "Definite", "value": int(table_group["latest_anomalies_definite_ct"]), "color": "red" }, - { "label": "Likely", "value": int(table_group["latest_anomalies_likely_ct"]), "color": "orange" }, - { "label": "Possible", "value": int(table_group["latest_anomalies_possible_ct"]), "color": "yellow" }, - { "label": "Dismissed", "value": int(table_group["latest_anomalies_dismissed_ct"]), "color": "grey" }, - ], - key=f"anomalies_{key}", - height=12, - width=280, - ) + anomaly_count = to_int(table_group["latest_anomalies_ct"]) + st.html(f""" + {anomaly_count} hygiene issues in {to_int(table_group["latest_profile_table_ct"])} tables + """) + + if anomaly_count: + testgen.summary_bar( + items=[ + { "label": "Definite", "value": to_int(table_group["latest_anomalies_definite_ct"]), "color": "red" }, + { "label": "Likely", "value": to_int(table_group["latest_anomalies_likely_ct"]), "color": "orange" }, + { "label": "Possible", "value": to_int(table_group["latest_anomalies_possible_ct"]), "color": "yellow" }, + { "label": "Dismissed", "value": to_int(table_group["latest_anomalies_dismissed_ct"]), "color": "grey" }, + ], + key=f"anomalies_{key}", + height=12, + width=280, + ) else: st.markdown("--") with tests_column: testgen.no_flex_gap() st.caption("Latest test results") - total_tests = int(table_group["latest_tests_ct"]) if not pd.isnull(table_group["latest_tests_ct"]) else 0 + total_tests = to_int(table_group["latest_tests_ct"]) if total_tests: - passed_tests = int(table_group["latest_tests_passed_ct"]) - + passed_tests = to_int(table_group["latest_tests_passed_ct"]) + st.html(f"""

{round(passed_tests * 100 / total_tests)}% passed

- {total_tests} tests in {int(table_group["latest_tests_suite_ct"])} test suites + {total_tests} tests in {to_int(table_group["latest_tests_suite_ct"])} test suites """) - + testgen.summary_bar( items=[ { "label": "Passed", "value": passed_tests, "color": "green" }, - { "label": "Warnings", "value": int(table_group["latest_tests_warning_ct"]), "color": "yellow" }, - { "label": "Failed", "value": int(table_group["latest_tests_failed_ct"]), "color": "red" }, - { "label": "Errors", "value": int(table_group["latest_tests_error_ct"]), "color": "brown" }, - { "label": "Dismissed", "value": int(table_group["latest_tests_dismissed_ct"]), "color": "grey" }, + { "label": "Warnings", "value": to_int(table_group["latest_tests_warning_ct"]), "color": "yellow" }, + { "label": "Failed", "value": to_int(table_group["latest_tests_failed_ct"]), "color": "red" }, + { "label": "Errors", "value": to_int(table_group["latest_tests_error_ct"]), "color": "brown" }, + { "label": "Dismissed", "value": to_int(table_group["latest_tests_dismissed_ct"]), "color": "grey" }, ], key=f"tests_{key}", height=12, @@ -148,10 +155,10 @@ def render_table_group_card(table_group: pd.Series, project_code: str, key: int) if expand_toggle: render_table_group_expanded(table_group["id"], project_code, key) - + def render_table_group_expanded(table_group_id: str, project_code: str, key: int) -> None: - st.html('
') + testgen.divider(8, 12) column_spec = [0.25, 0.15, 0.15, 0.5] suite_column, generation_column, run_column, results_column = st.columns(column_spec) @@ -177,39 +184,41 @@ def render_test_suite_item(test_suite: pd.Series, column_spec: list[int], key: i params={ "test_suite_id": str(test_suite["id"]) }, key=f"overview:keys:go-to-definitions:{test_suite['id']}", ) - st.html(f'

{test_suite["last_run_test_ct"]} tests

') + testgen.caption(f"{to_int(test_suite['last_run_test_ct'])} tests", "margin-top: -16px;") - if (latest_generation := test_suite["latest_auto_gen_date"]) and not pd.isnull(latest_generation): - generation_column.html(f'

{date_service.get_timezoned_timestamp(st.session_state, latest_generation)}

') - else: - generation_column.markdown("--") + with generation_column: + if (latest_generation := test_suite["latest_auto_gen_date"]) and pd.notnull(latest_generation): + st.html(f'

{date_service.get_timezoned_timestamp(st.session_state, latest_generation)}

') + else: + st.markdown("--") - latest_run_id = test_suite["latest_run_id"] - if latest_run_id and not pd.isnull(latest_run_id): - with run_column: + with run_column: + if (latest_run_start := test_suite["latest_run_start"]) and pd.notnull(latest_run_start): testgen.link( - label=date_service.get_timezoned_timestamp(st.session_state, test_suite["latest_run_start"]), + label=date_service.get_timezoned_timestamp(st.session_state, latest_run_start), href="test-runs:results", - params={ "run_id": str(latest_run_id) }, - key=f"overview:keys:go-to-run:{latest_run_id}", + params={ "run_id": str(test_suite["latest_run_id"]) }, + key=f"overview:keys:go-to-run:{test_suite['latest_run_id']}", ) + else: + st.markdown("--") - with results_column: + with results_column: + if to_int(test_suite["last_run_test_ct"]): testgen.summary_bar( items=[ - { "label": "Passed", "value": int(test_suite["last_run_passed_ct"]), "color": "green" }, - { "label": "Warnings", "value": int(test_suite["last_run_warning_ct"]), "color": "yellow" }, - { "label": "Failed", "value": int(test_suite["last_run_failed_ct"]), "color": "red" }, - { "label": "Errors", "value": int(test_suite["last_run_error_ct"]), "color": "brown" }, - { "label": "Dismissed", "value": int(test_suite["last_run_dismissed_ct"]), "color": "grey" }, + { "label": "Passed", "value": to_int(test_suite["last_run_passed_ct"]), "color": "green" }, + { "label": "Warnings", "value": to_int(test_suite["last_run_warning_ct"]), "color": "yellow" }, + { "label": "Failed", "value": to_int(test_suite["last_run_failed_ct"]), "color": "red" }, + { "label": "Errors", "value": to_int(test_suite["last_run_error_ct"]), "color": "brown" }, + { "label": "Dismissed", "value": to_int(test_suite["last_run_dismissed_ct"]), "color": "grey" }, ], key=f"tests_{key}", height=8, width=200, ) - else: - run_column.markdown("--") - results_column.html("--

") + else: + st.markdown("--") def get_table_groups_summary(project_code: str) -> pd.DataFrame: diff --git a/testgen/ui/views/test_suites.py b/testgen/ui/views/test_suites.py index 84da279..0024de7 100644 --- a/testgen/ui/views/test_suites.py +++ b/testgen/ui/views/test_suites.py @@ -12,11 +12,13 @@ from testgen.commands.run_execute_tests import run_execution_steps_in_background from testgen.commands.run_generate_tests import run_test_gen_queries from testgen.commands.run_observability_exporter import export_test_results +from testgen.common import date_service from testgen.ui.components import widgets as testgen from testgen.ui.navigation.menu import MenuItem from testgen.ui.navigation.page import Page from testgen.ui.services.string_service import empty_if_null from testgen.ui.session import session +from testgen.utils import to_int class TestSuitesPage(Page): @@ -66,7 +68,7 @@ def render(self, project_code: str | None = None, table_group_id: str | None = N testgen.button( type_="icon", icon="output", - tooltip="Export results to observability", + tooltip="Export results to Observability", tooltip_position="right", on_click=partial(observability_export_dialog, test_suite), key=f"test_suite:keys:export:{test_suite['id']}", @@ -91,46 +93,46 @@ def render(self, project_code: str | None = None, table_group_id: str | None = N main_section, latest_run_section, actions_section = st.columns([.4, .4, .2]) with main_section: + testgen.no_flex_gap() testgen.link( - label=f"{test_suite['last_run_test_ct']} tests definitions", + label=f"{to_int(test_suite['last_run_test_ct'])} tests definitions", href="test-suites:definitions", params={ "test_suite_id": test_suite["id"] }, right_icon="chevron_right", key=f"test_suite:keys:go-to-definitions:{test_suite['id']}", ) - st.html(f""" -
-
Description
-

{test_suite['test_suite_description']}

-
- """) - - if (latest_run_start := test_suite["latest_run_start"]) and not pd.isnull(latest_run_start): - with latest_run_section: - testgen.no_flex_gap() - st.html('
Latest Run
') + testgen.caption("Description") + st.markdown(test_suite["test_suite_description"] or "--") + + with latest_run_section: + testgen.no_flex_gap() + st.caption("Latest Run") + + if (latest_run_start := test_suite["latest_run_start"]) and pd.notnull(latest_run_start): testgen.link( - label=latest_run_start.strftime("%B %d, %H:%M %p"), + label=date_service.get_timezoned_timestamp(st.session_state, latest_run_start), href="test-runs:results", params={ "run_id": str(test_suite["latest_run_id"]) }, - right_icon="chevron_right", style="margin-bottom: 8px;", height=29, key=f"test_suite:keys:go-to-runs:{test_suite['id']}", ) - testgen.summary_bar( - items=[ - { "label": "Passed", "value": int(test_suite["last_run_passed_ct"]), "color": "green" }, - { "label": "Warnings", "value": int(test_suite["last_run_warning_ct"]), "color": "yellow" }, - { "label": "Failed", "value": int(test_suite["last_run_failed_ct"]), "color": "red" }, - { "label": "Errors", "value": int(test_suite["last_run_error_ct"]), "color": "brown" }, - { "label": "Dismissed", "value": int(test_suite["last_run_dismissed_ct"]), "color": "grey" }, - ], - height=20, - width=350, - key=f"test_suite:keys:run-rummary:{test_suite['id']}", - ) + if to_int(test_suite["last_run_test_ct"]): + testgen.summary_bar( + items=[ + { "label": "Passed", "value": to_int(test_suite["last_run_passed_ct"]), "color": "green" }, + { "label": "Warnings", "value": to_int(test_suite["last_run_warning_ct"]), "color": "yellow" }, + { "label": "Failed", "value": to_int(test_suite["last_run_failed_ct"]), "color": "red" }, + { "label": "Errors", "value": to_int(test_suite["last_run_error_ct"]), "color": "brown" }, + { "label": "Dismissed", "value": to_int(test_suite["last_run_dismissed_ct"]), "color": "grey" }, + ], + height=20, + width=350, + key=f"test_suite:keys:run-rummary:{test_suite['id']}", + ) + else: + st.markdown("--") with actions_section: testgen.button( From 68e829adfc028d598383c2d98926c4f53d5d322a Mon Sep 17 00:00:00 2001 From: Luis Trinidad Date: Tue, 17 Sep 2024 11:09:36 -0400 Subject: [PATCH 59/78] misc(ui): add widget to wrap streamlit's native dialog Streamlit native dialog caches usage of a decorated function by the function's making then unusuable for list details dialogs. The new wrapper widget assigns an 8 character random string as a suffix to the function . --- testgen/ui/components/widgets/__init__.py | 1 + testgen/ui/components/widgets/dialog.py | 44 +++++++++++++++++++++++ 2 files changed, 45 insertions(+) create mode 100644 testgen/ui/components/widgets/dialog.py diff --git a/testgen/ui/components/widgets/__init__.py b/testgen/ui/components/widgets/__init__.py index eba62b7..243355c 100644 --- a/testgen/ui/components/widgets/__init__.py +++ b/testgen/ui/components/widgets/__init__.py @@ -3,6 +3,7 @@ from testgen.ui.components.widgets.breadcrumbs import breadcrumbs from testgen.ui.components.widgets.button import button from testgen.ui.components.widgets.card import card +from testgen.ui.components.widgets.dialog import dialog from testgen.ui.components.widgets.expander_toggle import expander_toggle from testgen.ui.components.widgets.link import link from testgen.ui.components.widgets.page import ( diff --git a/testgen/ui/components/widgets/dialog.py b/testgen/ui/components/widgets/dialog.py new file mode 100644 index 0000000..6c2c4e9 --- /dev/null +++ b/testgen/ui/components/widgets/dialog.py @@ -0,0 +1,44 @@ +import functools +import random +import string +import typing + +import streamlit as st +from streamlit.elements.lib.dialog import DialogWidth + + +def dialog(title: str, *, width: DialogWidth = "small", key: str | None = None) -> typing.Callable: + """ + Wrap Streamlit's native dialog to avoid passing parameters that will + be ignored during the fragment's re-run. + """ + dialog_contents: typing.Callable = lambda: None + + def render_dialog() -> typing.Any: + args = [] + kwargs = {} + if key: + args, kwargs = st.session_state[key] + return dialog_contents(*args, **kwargs) + + name_suffix = "".join(random.choices(string.ascii_lowercase, k=8)) # noqa: S311 + + # NOTE: st.dialog uses __qualname__ to generate the fragment hash, effectively overshadowing the uniqueness of the + # render_dialog() function. + render_dialog.__name__ = f"render_dialog_{name_suffix}" + render_dialog.__qualname__ = render_dialog.__qualname__.replace("render_dialog", render_dialog.__name__) + + render_dialog = st.dialog(title=title, width=width)(render_dialog) + + def decorator(func: typing.Callable) -> typing.Callable: + nonlocal dialog_contents + dialog_contents = func + + @functools.wraps(func) + def wrapper(*args, **kwargs): + if key: + st.session_state[key] = (args, kwargs) + render_dialog() + return wrapper + + return decorator From aca2b2e17bacc0a7916378c10d4458caab419dd0 Mon Sep 17 00:00:00 2001 From: Luis Trinidad Date: Tue, 17 Sep 2024 11:10:39 -0400 Subject: [PATCH 60/78] fix(ui): return the custom button value from the widget function --- testgen/ui/components/widgets/button.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testgen/ui/components/widgets/button.py b/testgen/ui/components/widgets/button.py index dba2fe5..a78bc0d 100644 --- a/testgen/ui/components/widgets/button.py +++ b/testgen/ui/components/widgets/button.py @@ -15,7 +15,7 @@ def button( on_click: typing.Callable[..., None] | None = None, style: str | None = None, key: str | None = None, -) -> None: +) -> typing.Any: """ Testgen component to create custom styled buttons. @@ -40,4 +40,4 @@ def button( if style: props.update({"style": style}) - component(id_="button", key=key, props=props, on_change=on_click) + return component(id_="button", key=key, props=props, on_change=on_click) From c61156c44d0643dd462c77ac30539c49b0158a16 Mon Sep 17 00:00:00 2001 From: Luis Trinidad Date: Tue, 17 Sep 2024 11:13:23 -0400 Subject: [PATCH 61/78] refactor(ui): add new methods to the connections page Add methods to the connections page object for better customizability. --- testgen/ui/views/connections.py | 406 ++++++++++++++++++++++++++- testgen/ui/views/connections_base.py | 360 ------------------------ 2 files changed, 399 insertions(+), 367 deletions(-) delete mode 100644 testgen/ui/views/connections_base.py diff --git a/testgen/ui/views/connections.py b/testgen/ui/views/connections.py index 31e50b6..3fc44dc 100644 --- a/testgen/ui/views/connections.py +++ b/testgen/ui/views/connections.py @@ -1,14 +1,19 @@ +import dataclasses import logging +import os +import time import typing import streamlit as st +import testgen.ui.services.database_service as db +from testgen.commands.run_setup_profiling_tools import get_setup_profiling_tools_queries +from testgen.common.database.database_service import empty_cache from testgen.ui.components import widgets as testgen from testgen.ui.navigation.menu import MenuItem from testgen.ui.navigation.page import Page -from testgen.ui.services import connection_service +from testgen.ui.services import authentication_service, connection_service from testgen.ui.session import session -from testgen.ui.views.connections_base import create_qc_schema_dialog, show_connection_form LOG = logging.getLogger("testgen") @@ -34,16 +39,14 @@ def render(self, project_code: str, **_kwargs) -> None: enable_table_groups = connection["project_host"] and connection["project_db"] and connection["project_qc_schema"] - form_container = st.expander("", expanded=True) - with form_container: - mode = "edit" - show_connection_form(connection, mode, project_code) + with st.container(border=True): + self.show_connection_form(connection, "edit", project_code) if actions_column.button( "Configure QC Utility Schema", help="Creates the required Utility schema and related functions in the target database", ): - create_qc_schema_dialog(connection) + self.create_qc_schema_dialog(connection) if actions_column.button( f":{'gray' if not enable_table_groups else 'green'}[Table Groups →]", @@ -53,3 +56,392 @@ def render(self, project_code: str, **_kwargs) -> None: "connections:table-groups", {"connection_id": connection["connection_id"]}, ) + + @testgen.dialog(title="Configure QC Utility Schema", key="config_qc_dialog_args") + def create_qc_schema_dialog(self, selected_connection): + connection_id = selected_connection["connection_id"] + project_qc_schema = selected_connection["project_qc_schema"] + sql_flavor = selected_connection["sql_flavor"] + user = selected_connection["project_user"] + + create_qc_schema = st.toggle("Create QC Utility Schema", value=True) + grant_privileges = st.toggle("Grant access privileges to TestGen user", value=True) + + user_role = None + + # TODO ALEX: This textbox may be needed if we want to grant permissions to user role + # if sql_flavor == "snowflake": + # user_role_textbox_label = f"Primary role for database user {user}" + # user_role = st.text_input(label=user_role_textbox_label, max_chars=100) + + admin_credentials_expander = st.expander("Admin credential options", expanded=True) + with admin_credentials_expander: + admin_connection_option_index = 0 + admin_connection_options = ["Do not use admin credentials", "Use admin credentials with Password"] + if sql_flavor == "snowflake": + admin_connection_options.append("Use admin credentials with Key-Pair") + + admin_connection_option = st.radio( + "Admin credential options", + label_visibility="hidden", + options=admin_connection_options, + index=admin_connection_option_index, + horizontal=True, + ) + + st.markdown("

 
", unsafe_allow_html=True) + + db_user = None + db_password = None + admin_private_key_passphrase = None + admin_private_key = None + if admin_connection_option == admin_connection_options[0]: + st.markdown(":orange[User created in the connection dialog will be used.]") + else: + db_user = st.text_input(label="Admin db user", max_chars=40) + if admin_connection_option == admin_connection_options[1]: + db_password = st.text_input( + label="Admin db password", max_chars=40, type="password" + ) + st.markdown(":orange[Note: Admin credentials are not stored, are only used for this operation.]") + + if len(admin_connection_options) > 2 and admin_connection_option == admin_connection_options[2]: + admin_private_key_passphrase = st.text_input( + label="Private Key Passphrase", + key="create-qc-schema-private-key-password", + type="password", + max_chars=200, + help="Passphrase used while creating the private Key (leave empty if not applicable)", + ) + + admin_uploaded_file = st.file_uploader("Upload private key (rsa_key.p8)", key="admin-uploaded-file") + if admin_uploaded_file: + admin_private_key = admin_uploaded_file.getvalue().decode("utf-8") + + st.markdown(":orange[Note: Admin credentials are not stored, are only used for this operation.]") + + submit = st.button("Update Configuration") + + if submit: + empty_cache() + script_expander = st.expander("Script Details") + + operation_status = st.empty() + operation_status.info(f"Configuring QC Utility Schema '{project_qc_schema}'...") + + try: + skip_granting_privileges = not grant_privileges + queries = get_setup_profiling_tools_queries(sql_flavor, create_qc_schema, skip_granting_privileges, project_qc_schema, user, user_role) + with script_expander: + st.code( + os.linesep.join(queries), + language="sql", + line_numbers=True) + + connection_service.create_qc_schema( + connection_id, + create_qc_schema, + db_user if db_user else None, + db_password if db_password else None, + skip_granting_privileges, + admin_private_key_passphrase=admin_private_key_passphrase, + admin_private_key=admin_private_key, + user_role=user_role, + ) + operation_status.empty() + operation_status.success("Operation has finished successfully.") + + except Exception as e: + operation_status.empty() + operation_status.error("Error configuring QC Utility Schema.") + error_message = e.args[0] + st.text_area("Error Details", value=error_message) + + def show_connection_form(self, selected_connection, mode, project_code): + flavor_options = ["redshift", "snowflake", "mssql", "postgresql"] + connection_options = ["Connect by Password", "Connect by Key-Pair"] + + left_column, right_column = st.columns([0.75, 0.25]) + + mid_column = st.columns(1)[0] + url_override_toogle_container = st.container() + bottom_left_column, bottom_right_column = st.columns([0.25, 0.75]) + button_left_column, button_right_column = st.columns([0.20, 0.80]) + connection_status_wrapper = st.container() + connection_status_container = connection_status_wrapper.empty() + + connection_id = selected_connection["connection_id"] if mode == "edit" else None + connection_name = selected_connection["connection_name"] if mode == "edit" else "" + sql_flavor_index = flavor_options.index(selected_connection["sql_flavor"]) if mode == "edit" else 0 + project_port = selected_connection["project_port"] if mode == "edit" else "" + project_host = selected_connection["project_host"] if mode == "edit" else "" + project_db = selected_connection["project_db"] if mode == "edit" else "" + project_user = selected_connection["project_user"] if mode == "edit" else "" + url = selected_connection["url"] if mode == "edit" else "" + project_qc_schema = selected_connection["project_qc_schema"] if mode == "edit" else "qc" + password = selected_connection["password"] if mode == "edit" else "" + max_threads = selected_connection["max_threads"] if mode == "edit" else 4 + max_query_chars = selected_connection["max_query_chars"] if mode == "edit" else 10000 + connect_by_url = selected_connection["connect_by_url"] if mode == "edit" else False + connect_by_key = selected_connection["connect_by_key"] if mode == "edit" else False + connection_option_index = 1 if connect_by_key else 0 + private_key = selected_connection["private_key"] if mode == "edit" else None + private_key_passphrase = selected_connection["private_key_passphrase"] if mode == "edit" else "" + + new_connection = { + "connection_id": connection_id, + "project_code": project_code, + "private_key": private_key, + "private_key_passphrase": private_key_passphrase, + "password": password, + "url": url, + "max_threads": right_column.number_input( + label="Max Threads (Advanced Tuning)", + min_value=1, + max_value=8, + value=max_threads, + help=( + "Maximum number of concurrent threads that run tests. Default values should be retained unless " + "test queries are failing." + ), + key=f"connections:form:max-threads:{connection_id or 0}", + ), + "max_query_chars": right_column.number_input( + label="Max Expression Length (Advanced Tuning)", + min_value=500, + max_value=14000, + value=max_query_chars, + help="Some tests are consolidated into queries for maximum performance. Default values should be retained unless test queries are failing.", + key=f"connections:form:max-length:{connection_id or 0}", + ), + "connection_name": left_column.text_input( + label="Connection Name", + max_chars=40, + value=connection_name, + help="Your name for this connection. Can be any text.", + key=f"connections:form:name:{connection_id or 0}", + ), + "sql_flavor": left_column.selectbox( + label="SQL Flavor", + options=flavor_options, + index=sql_flavor_index, + help="The type of database server that you will connect to. This determines TestGen's drivers and SQL dialect.", + key=f"connections:form:flavor:{connection_id or 0}", + ) + } + + st.session_state.disable_url_widgets = connect_by_url + + new_connection["project_port"] = right_column.text_input( + label="Port", + max_chars=5, + value=project_port, + disabled=st.session_state.disable_url_widgets, + key=f"connections:form:port:{connection_id or 0}", + ) + new_connection["project_host"] = left_column.text_input( + label="Host", + max_chars=250, + value=project_host, + disabled=st.session_state.disable_url_widgets, + key=f"connections:form:host:{connection_id or 0}", + ) + new_connection["project_db"] = left_column.text_input( + label="Database", + max_chars=100, + value=project_db, + help="The name of the database defined on your host where your schemas and tables is present.", + disabled=st.session_state.disable_url_widgets, + key=f"connections:form:database:{connection_id or 0}", + ) + + new_connection["project_user"] = left_column.text_input( + label="User", + max_chars=50, + value=project_user, + help="Username to connect to your database.", + key=f"connections:form:user:{connection_id or 0}", + ) + + new_connection["project_qc_schema"] = right_column.text_input( + label="QC Utility Schema", + max_chars=50, + value=project_qc_schema, + help="The name of the schema on your database that will contain TestGen's profiling functions.", + key=f"connections:form:qcschema:{connection_id or 0}", + ) + + if new_connection["sql_flavor"] == "snowflake": + mid_column.divider() + + connection_option = mid_column.radio( + "Connection options", + options=connection_options, + index=connection_option_index, + horizontal=True, + help="Connection strategy", + key=f"connections:form:type_options:{connection_id or 0}", + ) + + new_connection["connect_by_key"] = connection_option == "Connect by Key-Pair" + password_column = mid_column + else: + new_connection["connect_by_key"] = False + password_column = left_column + + uploaded_file = None + + if new_connection["connect_by_key"]: + new_connection["private_key_passphrase"] = mid_column.text_input( + label="Private Key Passphrase", + type="password", + max_chars=200, + value=private_key_passphrase, + help="Passphrase used while creating the private Key (leave empty if not applicable)", + key=f"connections:form:passphrase:{connection_id or 0}", + ) + + uploaded_file = mid_column.file_uploader("Upload private key (rsa_key.p8)") + else: + new_connection["password"] = password_column.text_input( + label="Password", + max_chars=50, + type="password", + value=password, + help="Password to connect to your database.", + key=f"connections:form:password:{connection_id or 0}", + ) + + mid_column.divider() + + url_override_help_text = "If this switch is set to on, the connection string will be driven by the field below. " + if new_connection["connect_by_key"]: + url_override_help_text += "Only user name will be passed per the relevant fields above." + else: + url_override_help_text += "Only user name and password will be passed per the relevant fields above." + + def on_connect_by_url_change(): + value = st.session_state.connect_by_url_toggle + st.session_state.disable_url_widgets = value + + new_connection["connect_by_url"] = url_override_toogle_container.toggle( + "URL override", + value=connect_by_url, + key="connect_by_url_toggle", + help=url_override_help_text, + on_change=on_connect_by_url_change, + ) + + if new_connection["connect_by_url"]: + connection_string = connection_service.form_overwritten_connection_url(new_connection) + connection_string_beginning, connection_string_end = connection_string.split("@", 1) + connection_string_header = connection_string_beginning + "@" + connection_string_header = connection_string_header.replace("%3E", ">") + connection_string_header = connection_string_header.replace("%3C", "<") + + if not url: + url = connection_string_end + + new_connection["url"] = bottom_right_column.text_input( + label="URL Suffix", + max_chars=200, + value=url, + help="Provide a connection string directly. This will override connection parameters if the 'Connect by URL' switch is set.", + ) + + bottom_left_column.text_input(label="URL Prefix", value=connection_string_header, disabled=True) + + bottom_left_column.markdown("

 
", unsafe_allow_html=True) + + testgen.flex_row_end(button_right_column) + submit = button_right_column.button( + "Save" if mode == "edit" else "Add Connection", + disabled=authentication_service.current_user_has_read_role(), + ) + + if submit: + if not new_connection["password"] and not new_connection["connect_by_key"]: + st.error("Enter a valid password.") + else: + if uploaded_file: + new_connection["private_key"] = uploaded_file.getvalue().decode("utf-8") + + if mode == "edit": + connection_service.edit_connection(new_connection) + else: + connection_service.add_connection(new_connection) + success_message = ( + "Changes have been saved successfully. " + if mode == "edit" + else "New connection added successfully. " + ) + st.success(success_message) + time.sleep(1) + st.rerun() + + test_connection = button_left_column.button("Test Connection") + + if test_connection: + connection_status_container.empty() + connection_status_container.info("Testing the connection...") + + connection_status = self.test_connection(new_connection) + renderer = { + True: connection_status_container.success, + False: connection_status_container.error, + }[connection_status.successful] + + renderer(connection_status.message) + if not connection_status.successful and connection_status.details: + st.text_area("Connection Error Details", value=connection_status.details) + + def test_connection(self, connection: dict) -> "ConnectionStatus": + if connection["connect_by_key"] and connection["connection_id"] is None: + return ConnectionStatus( + message="Please add the connection before testing it (so that we can get your private key file).", + successful=False, + ) + + empty_cache() + try: + sql_query = "select 1;" + results = db.retrieve_target_db_data( + connection["sql_flavor"], + connection["project_host"], + connection["project_port"], + connection["project_db"], + connection["project_user"], + connection["password"], + connection["url"], + connection["connect_by_url"], + connection["connect_by_key"], + connection["private_key"], + connection["private_key_passphrase"], + sql_query, + ) + connection_successful = len(results) == 1 and results[0][0] == 1 + + if not connection_successful: + return ConnectionStatus(message="Error completing a query to the database server.", successful=False) + + qc_error_message = "The connection was successful, but there is an issue with the QC Utility Schema" + try: + qc_results = connection_service.test_qc_connection(connection["project_code"], connection) + if not all(qc_results): + return ConnectionStatus( + message=qc_error_message, + details=f"QC Utility Schema confirmation failed. details: {qc_results}", + successful=False, + ) + return ConnectionStatus(message="The connection was successful.", successful=True) + except Exception as error: + return ConnectionStatus(message=qc_error_message, details=error.args[0], successful=False) + except Exception as error: + return ConnectionStatus(message="Error attempting the Connection.", details=error.args[0], successful=False) + + +@dataclasses.dataclass(frozen=True, slots=True) +class ConnectionStatus: + message: str + successful: bool + details: str | None = dataclasses.field(default=None) diff --git a/testgen/ui/views/connections_base.py b/testgen/ui/views/connections_base.py deleted file mode 100644 index e3765a9..0000000 --- a/testgen/ui/views/connections_base.py +++ /dev/null @@ -1,360 +0,0 @@ -import os -import time - -import streamlit as st - -import testgen.ui.services.database_service as db -from testgen.commands.run_setup_profiling_tools import get_setup_profiling_tools_queries -from testgen.common.database.database_service import empty_cache -from testgen.ui.services import authentication_service, connection_service - - -@st.dialog(title="Configure QC Utility Schema") -def create_qc_schema_dialog(selected_connection): - connection_id = selected_connection["connection_id"] - project_qc_schema = selected_connection["project_qc_schema"] - sql_flavor = selected_connection["sql_flavor"] - user = selected_connection["project_user"] - - create_qc_schema = st.toggle("Create QC Utility Schema", value=True) - grant_privileges = st.toggle("Grant access privileges to TestGen user", value=True) - - user_role = None - - # TODO ALEX: This textbox may be needed if we want to grant permissions to user role - # if sql_flavor == "snowflake": - # user_role_textbox_label = f"Primary role for database user {user}" - # user_role = st.text_input(label=user_role_textbox_label, max_chars=100) - - admin_credentials_expander = st.expander("Admin credential options", expanded=True) - with admin_credentials_expander: - admin_connection_option_index = 0 - admin_connection_options = ["Do not use admin credentials", "Use admin credentials with Password"] - if sql_flavor == "snowflake": - admin_connection_options.append("Use admin credentials with Key-Pair") - - admin_connection_option = st.radio( - "Admin credential options", - label_visibility="hidden", - options=admin_connection_options, - index=admin_connection_option_index, - horizontal=True, - ) - - st.markdown("

 
", unsafe_allow_html=True) - - db_user = None - db_password = None - admin_private_key_passphrase = None - admin_private_key = None - if admin_connection_option == admin_connection_options[0]: - st.markdown(":orange[User created in the connection dialog will be used.]") - else: - db_user = st.text_input(label="Admin db user", max_chars=40) - if admin_connection_option == admin_connection_options[1]: - db_password = st.text_input( - label="Admin db password", max_chars=40, type="password" - ) - st.markdown(":orange[Note: Admin credentials are not stored, are only used for this operation.]") - - if len(admin_connection_options) > 2 and admin_connection_option == admin_connection_options[2]: - admin_private_key_passphrase = st.text_input( - label="Private Key Passphrase", - key="create-qc-schema-private-key-password", - type="password", - max_chars=200, - help="Passphrase used while creating the private Key (leave empty if not applicable)", - ) - - admin_uploaded_file = st.file_uploader("Upload private key (rsa_key.p8)", key="admin-uploaded-file") - if admin_uploaded_file: - admin_private_key = admin_uploaded_file.getvalue().decode("utf-8") - - st.markdown(":orange[Note: Admin credentials are not stored, are only used for this operation.]") - - submit = st.button("Update Configuration") - - if submit: - empty_cache() - script_expander = st.expander("Script Details") - - operation_status = st.empty() - operation_status.info(f"Configuring QC Utility Schema '{project_qc_schema}'...") - - try: - skip_granting_privileges = not grant_privileges - queries = get_setup_profiling_tools_queries(sql_flavor, create_qc_schema, skip_granting_privileges, project_qc_schema, user, user_role) - with script_expander: - st.code( - os.linesep.join(queries), - language="sql", - line_numbers=True) - - connection_service.create_qc_schema( - connection_id, - create_qc_schema, - db_user if db_user else None, - db_password if db_password else None, - skip_granting_privileges, - admin_private_key_passphrase=admin_private_key_passphrase, - admin_private_key=admin_private_key, - user_role=user_role, - ) - operation_status.empty() - operation_status.success("Operation has finished successfully.") - - except Exception as e: - operation_status.empty() - operation_status.error("Error configuring QC Utility Schema.") - error_message = e.args[0] - st.text_area("Error Details", value=error_message) - - -def show_connection_form(selected_connection, mode, project_code): - flavor_options = ["redshift", "snowflake", "mssql", "postgresql"] - connection_options = ["Connect by Password", "Connect by Key-Pair"] - - left_column, right_column = st.columns([0.75, 0.25]) - mid_column = st.columns(1)[0] - toggle_left_column, toggle_right_column = st.columns([0.25, 0.75]) - bottom_left_column, bottom_right_column = st.columns([0.25, 0.75]) - button_left_column, button_right_column, button_remaining_column = st.columns([0.20, 0.20, 0.60]) - - connection_id = selected_connection["connection_id"] if mode == "edit" else None - connection_name = selected_connection["connection_name"] if mode == "edit" else "" - sql_flavor_index = flavor_options.index(selected_connection["sql_flavor"]) if mode == "edit" else 0 - project_port = selected_connection["project_port"] if mode == "edit" else "" - project_host = selected_connection["project_host"] if mode == "edit" else "" - project_db = selected_connection["project_db"] if mode == "edit" else "" - project_user = selected_connection["project_user"] if mode == "edit" else "" - url = selected_connection["url"] if mode == "edit" else "" - project_qc_schema = selected_connection["project_qc_schema"] if mode == "edit" else "qc" - password = selected_connection["password"] if mode == "edit" else "" - max_threads = selected_connection["max_threads"] if mode == "edit" else 4 - max_query_chars = selected_connection["max_query_chars"] if mode == "edit" else 10000 - connect_by_url = selected_connection["connect_by_url"] if mode == "edit" else False - connect_by_key = selected_connection["connect_by_key"] if mode == "edit" else False - connection_option_index = 1 if connect_by_key else 0 - private_key = selected_connection["private_key"] if mode == "edit" else None - private_key_passphrase = selected_connection["private_key_passphrase"] if mode == "edit" else "" - - new_connection = { - "connection_id": connection_id, - "project_code": project_code, - "private_key": private_key, - "private_key_passphrase": private_key_passphrase, - "password": password, - "url": url, - "max_threads": right_column.number_input( - label="Max Threads (Advanced Tuning)", - min_value=1, - max_value=8, - value=max_threads, - help="Maximum number of concurrent threads that run tests. Default values should be retained unless test queries are failing.", - ), - "max_query_chars": right_column.number_input( - label="Max Expression Length (Advanced Tuning)", - min_value=500, - max_value=14000, - value=max_query_chars, - help="Some tests are consolidated into queries for maximum performance. Default values should be retained unless test queries are failing.", - ), - "connection_name": left_column.text_input( - label="Connection Name", - max_chars=40, - value=connection_name, - help="Your name for this connection. Can be any text.", - ), - "sql_flavor": left_column.selectbox( - label="SQL Flavor", - options=flavor_options, - index=sql_flavor_index, - help="The type of database server that you will connect to. This determines TestGen's drivers and SQL dialect.", - ) - } - - if "disable_url_widgets" not in st.session_state: - st.session_state.disable_url_widgets = connect_by_url - - new_connection["project_port"] = right_column.text_input(label="Port", max_chars=5, value=project_port, disabled=st.session_state.disable_url_widgets) - new_connection["project_host"] = left_column.text_input(label="Host", max_chars=250, value=project_host, disabled=st.session_state.disable_url_widgets) - new_connection["project_db"] = left_column.text_input( - label="Database", - max_chars=100, - value=project_db, - help="The name of the database defined on your host where your schemas and tables is present.", - disabled=st.session_state.disable_url_widgets, - ) - - new_connection["project_user"] = left_column.text_input( - label="User", - max_chars=50, - value=project_user, - help="Username to connect to your database.", - ) - - new_connection["project_qc_schema"] = right_column.text_input( - label="QC Utility Schema", - max_chars=50, - value=project_qc_schema, - help="The name of the schema on your database that will contain TestGen's profiling functions.", - ) - - if new_connection["sql_flavor"] == "snowflake": - mid_column.divider() - - connection_option = mid_column.radio( - "Connection options", - options=connection_options, - index=connection_option_index, - horizontal=True, - help="Connection strategy", - ) - - new_connection["connect_by_key"] = connection_option == "Connect by Key-Pair" - password_column = mid_column - else: - new_connection["connect_by_key"] = False - password_column = left_column - - uploaded_file = None - - if new_connection["connect_by_key"]: - new_connection["private_key_passphrase"] = mid_column.text_input( - label="Private Key Passphrase", - type="password", - max_chars=200, - value=private_key_passphrase, - help="Passphrase used while creating the private Key (leave empty if not applicable)", - ) - - uploaded_file = mid_column.file_uploader("Upload private key (rsa_key.p8)") - else: - new_connection["password"] = password_column.text_input( - label="Password", - max_chars=50, - type="password", - value=password, - help="Password to connect to your database.", - ) - - mid_column.divider() - - url_override_help_text = "If this switch is set to on, the connection string will be driven by the field below. " - if new_connection["connect_by_key"]: - url_override_help_text += "Only user name will be passed per the relevant fields above." - else: - url_override_help_text += "Only user name and password will be passed per the relevant fields above." - - def on_connect_by_url_change(): - value = st.session_state.connect_by_url_toggle - st.session_state.disable_url_widgets = value - - new_connection["connect_by_url"] = toggle_left_column.toggle( - "URL override", - value=connect_by_url, - key="connect_by_url_toggle", - help=url_override_help_text, - on_change=on_connect_by_url_change - ) - - if new_connection["connect_by_url"]: - connection_string = connection_service.form_overwritten_connection_url(new_connection) - connection_string_beginning, connection_string_end = connection_string.split("@", 1) - connection_string_header = connection_string_beginning + "@" - connection_string_header = connection_string_header.replace("%3E", ">") - connection_string_header = connection_string_header.replace("%3C", "<") - - if not url: - url = connection_string_end - - new_connection["url"] = bottom_right_column.text_input( - label="URL Suffix", - max_chars=200, - value=url, - help="Provide a connection string directly. This will override connection parameters if the 'Connect by URL' switch is set.", - ) - - bottom_left_column.text_input(label="URL Prefix", value=connection_string_header, disabled=True) - - bottom_left_column.markdown("

 
", unsafe_allow_html=True) - - submit_button_text = "Save" if mode == "edit" else "Add Connection" - submit = button_left_column.button( - submit_button_text, disabled=authentication_service.current_user_has_read_role() - ) - - if submit: - if not new_connection["password"] and not new_connection["connect_by_key"]: - st.error("Enter a valid password.") - else: - if uploaded_file: - new_connection["private_key"] = uploaded_file.getvalue().decode("utf-8") - - if mode == "edit": - connection_service.edit_connection(new_connection) - else: - connection_service.add_connection(new_connection) - success_message = ( - "Changes have been saved successfully. " - if mode == "edit" - else "New connection added successfully. " - ) - st.success(success_message) - time.sleep(1) - st.rerun() - - test_left_column, test_mid_column, test_right_column = st.columns([0.15, 0.15, 0.70]) - test_connection = button_right_column.button("Test Connection") - - connection_status = test_right_column.empty() - - if test_connection: - if mode == "add" and new_connection["connect_by_key"]: - connection_status.empty() - connection_status.error( - "Please add the connection before testing it (so that we can get your private key file).") - else: - empty_cache() - connection_status.empty() - connection_status.info("Testing the connection...") - try: - sql_query = "select 1;" - results = db.retrieve_target_db_data( - new_connection["sql_flavor"], - new_connection["project_host"], - new_connection["project_port"], - new_connection["project_db"], - new_connection["project_user"], - new_connection["password"], - new_connection["url"], - new_connection["connect_by_url"], - new_connection["connect_by_key"], - new_connection["private_key"], - new_connection["private_key_passphrase"], - sql_query, - ) - if len(results) == 1 and results[0][0] == 1: - qc_error_message = "The connection was successful, but there is an issue with the QC Utility Schema" - try: - qc_results = connection_service.test_qc_connection(project_code, new_connection) - if not all(qc_results): - error_message = f"QC Utility Schema confirmation failed. details: {qc_results}" - connection_status.empty() - connection_status.error(qc_error_message) - st.text_area("Connection Error Details", value=error_message) - else: - connection_status.empty() - connection_status.success("The connection was successful.") - except Exception as e: - connection_status.empty() - connection_status.error(qc_error_message) - error_message = e.args[0] - st.text_area("Connection Error Details", value=error_message) - else: - test_right_column.error("Error completing a query to the database server.") - except Exception as e: - connection_status.empty() - connection_status.error("Error attempting the Connection.") - error_message = e.args[0] - st.text_area("Connection Error Details", value=error_message) From d1ba19488bfa563f9f1e0f3c7db41f4427b806bc Mon Sep 17 00:00:00 2001 From: Luis Trinidad Date: Tue, 17 Sep 2024 11:18:24 -0400 Subject: [PATCH 62/78] fix(ui): use wrapper dialog widget on test suites cards --- testgen/ui/views/test_suites.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/testgen/ui/views/test_suites.py b/testgen/ui/views/test_suites.py index 0024de7..d6d56a5 100644 --- a/testgen/ui/views/test_suites.py +++ b/testgen/ui/views/test_suites.py @@ -155,12 +155,12 @@ def get_db_table_group_choices(project_code): return dq.run_table_groups_lookup_query(schema, project_code) -@st.dialog(title="Add Test Suite") +@testgen.dialog(title="Add Test Suite", key="add_test_suite_dialog_args") def add_test_suite_dialog(project_code, table_groups_df): show_test_suite("add", project_code, table_groups_df) -@st.dialog(title="Edit Test Suite") +@testgen.dialog(title="Edit Test Suite", key="edit_test_suite_dialog_args") def edit_test_suite_dialog(project_code, table_groups_df, selected): show_test_suite("edit", project_code, table_groups_df, selected) @@ -274,7 +274,7 @@ def show_test_suite(mode, project_code, table_groups_df, selected=None): st.rerun() -@st.dialog(title="Delete Test Suite") +@testgen.dialog(title="Delete Test Suite", key="delete_test_suite_dialog_args") def delete_test_suite_dialog(selected_test_suite): test_suite_id = selected_test_suite["id"] test_suite_name = selected_test_suite["test_suite"] @@ -324,7 +324,7 @@ def delete_test_suite_dialog(selected_test_suite): st.rerun() -@st.dialog(title="Run Tests") +@testgen.dialog(title="Run Tests", key="run_tests_dialog_args") def run_tests_dialog(project_code, selected_test_suite): test_suite_key = selected_test_suite["test_suite"] start_process_button_message = "Start" @@ -364,7 +364,7 @@ def run_tests_dialog(project_code, selected_test_suite): ) -@st.dialog(title="Generate Tests") +@testgen.dialog(title="Generate Tests", key="generate_tests_dialog_args") def generate_tests_dialog(selected_test_suite): test_suite_id = selected_test_suite["id"] test_suite_key = selected_test_suite["test_suite"] @@ -441,7 +441,7 @@ def generate_tests_dialog(selected_test_suite): status_container.success("Process has successfully finished.") -@st.dialog(title="Export to Observability") +@testgen.dialog(title="Export to Observability", key="export_to_obs_dialog_args") def observability_export_dialog(selected_test_suite): project_key = selected_test_suite["project_code"] test_suite_key = selected_test_suite["test_suite"] From 0156d7f078406686de9afa11c180a8fbbf085266 Mon Sep 17 00:00:00 2001 From: Luis Trinidad Date: Fri, 20 Sep 2024 08:45:10 -0400 Subject: [PATCH 63/78] misc: upgrade streamlit to version 1.38.0 The upgrade fixes an error with dynamic dialogs opened from callbacks. The upgrade also makes the dialogs look smaller by default, this commit explicitly sets the width to large for most of the dialogs. --- pyproject.toml | 2 +- testgen/ui/assets/style.css | 2 +- testgen/ui/components/widgets/__init__.py | 1 - testgen/ui/components/widgets/dialog.py | 44 ----------------------- testgen/ui/views/connections.py | 2 +- testgen/ui/views/test_suites.py | 12 +++---- 6 files changed, 9 insertions(+), 54 deletions(-) delete mode 100644 testgen/ui/components/widgets/dialog.py diff --git a/pyproject.toml b/pyproject.toml index 5be21d0..6e06b0d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -43,7 +43,7 @@ dependencies = [ "trogon==0.4.0", "numpy==1.26.4", "pandas==2.1.4", - "streamlit==1.37.1", + "streamlit==1.38.0", "streamlit-extras==0.3.0", "streamlit-aggrid==0.3.4.post3", "streamlit-antd-components==0.2.2", diff --git a/testgen/ui/assets/style.css b/testgen/ui/assets/style.css index 25b829e..b5eec64 100644 --- a/testgen/ui/assets/style.css +++ b/testgen/ui/assets/style.css @@ -81,7 +81,7 @@ div[data-testid="collapsedControl"] { /* Dialog - sets the width of all st.dialog */ /* There is no way to target "large" and "small" dialogs reliably */ -div[data-testid="stModal"] div[role="dialog"] { +div[data-testid="stDialog"] div[role="dialog"] { width: calc(55rem); } /* */ diff --git a/testgen/ui/components/widgets/__init__.py b/testgen/ui/components/widgets/__init__.py index 243355c..eba62b7 100644 --- a/testgen/ui/components/widgets/__init__.py +++ b/testgen/ui/components/widgets/__init__.py @@ -3,7 +3,6 @@ from testgen.ui.components.widgets.breadcrumbs import breadcrumbs from testgen.ui.components.widgets.button import button from testgen.ui.components.widgets.card import card -from testgen.ui.components.widgets.dialog import dialog from testgen.ui.components.widgets.expander_toggle import expander_toggle from testgen.ui.components.widgets.link import link from testgen.ui.components.widgets.page import ( diff --git a/testgen/ui/components/widgets/dialog.py b/testgen/ui/components/widgets/dialog.py deleted file mode 100644 index 6c2c4e9..0000000 --- a/testgen/ui/components/widgets/dialog.py +++ /dev/null @@ -1,44 +0,0 @@ -import functools -import random -import string -import typing - -import streamlit as st -from streamlit.elements.lib.dialog import DialogWidth - - -def dialog(title: str, *, width: DialogWidth = "small", key: str | None = None) -> typing.Callable: - """ - Wrap Streamlit's native dialog to avoid passing parameters that will - be ignored during the fragment's re-run. - """ - dialog_contents: typing.Callable = lambda: None - - def render_dialog() -> typing.Any: - args = [] - kwargs = {} - if key: - args, kwargs = st.session_state[key] - return dialog_contents(*args, **kwargs) - - name_suffix = "".join(random.choices(string.ascii_lowercase, k=8)) # noqa: S311 - - # NOTE: st.dialog uses __qualname__ to generate the fragment hash, effectively overshadowing the uniqueness of the - # render_dialog() function. - render_dialog.__name__ = f"render_dialog_{name_suffix}" - render_dialog.__qualname__ = render_dialog.__qualname__.replace("render_dialog", render_dialog.__name__) - - render_dialog = st.dialog(title=title, width=width)(render_dialog) - - def decorator(func: typing.Callable) -> typing.Callable: - nonlocal dialog_contents - dialog_contents = func - - @functools.wraps(func) - def wrapper(*args, **kwargs): - if key: - st.session_state[key] = (args, kwargs) - render_dialog() - return wrapper - - return decorator diff --git a/testgen/ui/views/connections.py b/testgen/ui/views/connections.py index 3fc44dc..0f6fc5f 100644 --- a/testgen/ui/views/connections.py +++ b/testgen/ui/views/connections.py @@ -57,7 +57,7 @@ def render(self, project_code: str, **_kwargs) -> None: {"connection_id": connection["connection_id"]}, ) - @testgen.dialog(title="Configure QC Utility Schema", key="config_qc_dialog_args") + @st.dialog(title="Configure QC Utility Schema") def create_qc_schema_dialog(self, selected_connection): connection_id = selected_connection["connection_id"] project_qc_schema = selected_connection["project_qc_schema"] diff --git a/testgen/ui/views/test_suites.py b/testgen/ui/views/test_suites.py index d6d56a5..0024de7 100644 --- a/testgen/ui/views/test_suites.py +++ b/testgen/ui/views/test_suites.py @@ -155,12 +155,12 @@ def get_db_table_group_choices(project_code): return dq.run_table_groups_lookup_query(schema, project_code) -@testgen.dialog(title="Add Test Suite", key="add_test_suite_dialog_args") +@st.dialog(title="Add Test Suite") def add_test_suite_dialog(project_code, table_groups_df): show_test_suite("add", project_code, table_groups_df) -@testgen.dialog(title="Edit Test Suite", key="edit_test_suite_dialog_args") +@st.dialog(title="Edit Test Suite") def edit_test_suite_dialog(project_code, table_groups_df, selected): show_test_suite("edit", project_code, table_groups_df, selected) @@ -274,7 +274,7 @@ def show_test_suite(mode, project_code, table_groups_df, selected=None): st.rerun() -@testgen.dialog(title="Delete Test Suite", key="delete_test_suite_dialog_args") +@st.dialog(title="Delete Test Suite") def delete_test_suite_dialog(selected_test_suite): test_suite_id = selected_test_suite["id"] test_suite_name = selected_test_suite["test_suite"] @@ -324,7 +324,7 @@ def delete_test_suite_dialog(selected_test_suite): st.rerun() -@testgen.dialog(title="Run Tests", key="run_tests_dialog_args") +@st.dialog(title="Run Tests") def run_tests_dialog(project_code, selected_test_suite): test_suite_key = selected_test_suite["test_suite"] start_process_button_message = "Start" @@ -364,7 +364,7 @@ def run_tests_dialog(project_code, selected_test_suite): ) -@testgen.dialog(title="Generate Tests", key="generate_tests_dialog_args") +@st.dialog(title="Generate Tests") def generate_tests_dialog(selected_test_suite): test_suite_id = selected_test_suite["id"] test_suite_key = selected_test_suite["test_suite"] @@ -441,7 +441,7 @@ def generate_tests_dialog(selected_test_suite): status_container.success("Process has successfully finished.") -@testgen.dialog(title="Export to Observability", key="export_to_obs_dialog_args") +@st.dialog(title="Export to Observability") def observability_export_dialog(selected_test_suite): project_key = selected_test_suite["project_code"] test_suite_key = selected_test_suite["test_suite"] From 3d37b11bcf8651921a998779ef2bbeb91ec5541a Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Fri, 20 Sep 2024 12:24:08 -0400 Subject: [PATCH 64/78] fix(ui): redirect on invalid query params in inner pages --- testgen/ui/navigation/router.py | 8 ++++++++ testgen/ui/queries/profiling_queries.py | 18 +++++++++--------- testgen/ui/queries/test_suite_queries.py | 2 +- testgen/ui/services/connection_service.py | 7 ++++--- testgen/ui/services/test_suite_service.py | 9 +++++++-- testgen/ui/views/profiling_anomalies.py | 11 ++++++++--- testgen/ui/views/profiling_results.py | 9 ++++++++- testgen/ui/views/table_groups.py | 6 ++++++ testgen/ui/views/test_definitions.py | 6 ++++++ testgen/ui/views/test_results.py | 23 +++++++++++++++-------- 10 files changed, 72 insertions(+), 27 deletions(-) diff --git a/testgen/ui/navigation/router.py b/testgen/ui/navigation/router.py index 0c58484..d010ee9 100644 --- a/testgen/ui/navigation/router.py +++ b/testgen/ui/navigation/router.py @@ -1,6 +1,7 @@ from __future__ import annotations import logging +import time import streamlit as st @@ -66,6 +67,13 @@ def navigate(self, /, to: str, with_args: dict = {}) -> None: # noqa: B006 st.error(error_message) LOG.exception(error_message) + + def navigate_with_warning(self, warning: str, to: str, with_args: dict = {}) -> None: # noqa: B006 + st.warning(warning) + time.sleep(3) + self.navigate(to, with_args) + + def set_query_params(self, with_args: dict) -> None: params = st.query_params params.update(with_args) diff --git a/testgen/ui/queries/profiling_queries.py b/testgen/ui/queries/profiling_queries.py index f831bbc..e22d711 100644 --- a/testgen/ui/queries/profiling_queries.py +++ b/testgen/ui/queries/profiling_queries.py @@ -1,3 +1,4 @@ +import pandas as pd import streamlit as st import testgen.ui.services.database_service as db @@ -63,19 +64,18 @@ def run_column_lookup_query(str_table_groups_id, str_table_name): @st.cache_data(show_spinner=False) -def lookup_db_parentage_from_run(str_profile_run_id): - str_schema = st.session_state["dbschema"] - # Define the query - str_sql = f""" +def lookup_db_parentage_from_run(profile_run_id: str) -> tuple[pd.Timestamp, str, str, str] | None: + schema: str = st.session_state["dbschema"] + sql = f""" SELECT profiling_starttime as profile_run_date, table_groups_id, g.table_groups_name, g.project_code - FROM {str_schema}.profiling_runs pr - INNER JOIN {str_schema}.table_groups g + FROM {schema}.profiling_runs pr + INNER JOIN {schema}.table_groups g ON pr.table_groups_id = g.id - WHERE pr.id = '{str_profile_run_id}' + WHERE pr.id = '{profile_run_id}' """ - df = db.retrieve_data(str_sql) + df = db.retrieve_data(sql) if not df.empty: - return df.at[0, "profile_run_date"], df.at[0, "table_groups_id"], df.at[0, "table_groups_name"], df.at[0, "project_code"] + return df.at[0, "profile_run_date"], str(df.at[0, "table_groups_id"]), df.at[0, "table_groups_name"], df.at[0, "project_code"] @st.cache_data(show_spinner="Retrieving Data") diff --git a/testgen/ui/queries/test_suite_queries.py b/testgen/ui/queries/test_suite_queries.py index 61982ca..8885545 100644 --- a/testgen/ui/queries/test_suite_queries.py +++ b/testgen/ui/queries/test_suite_queries.py @@ -118,7 +118,7 @@ def get_by_project(schema, project_code, table_group_id=None): @st.cache_data(show_spinner=False) -def get_by_id(schema, test_suite_id): +def get_by_id(schema: str, test_suite_id: str) -> pd.DataFrame: sql = f""" SELECT suites.id::VARCHAR(50), diff --git a/testgen/ui/services/connection_service.py b/testgen/ui/services/connection_service.py index faad168..394c82a 100644 --- a/testgen/ui/services/connection_service.py +++ b/testgen/ui/services/connection_service.py @@ -14,11 +14,12 @@ from testgen.common.encrypt import DecryptText, EncryptText -def get_by_id(connection_id, hide_passwords: bool = True): +def get_by_id(connection_id: str, hide_passwords: bool = True) -> dict | None: connections_df = connection_queries.get_by_id(connection_id) decrypt_connections(connections_df, hide_passwords) - connection = connections_df.to_dict(orient="records")[0] - return connection + connections_list = connections_df.to_dict(orient="records") + if len(connections_list): + return connections_list[0] def get_connections(project_code, hide_passwords: bool = False): diff --git a/testgen/ui/services/test_suite_service.py b/testgen/ui/services/test_suite_service.py index 720695e..b877963 100644 --- a/testgen/ui/services/test_suite_service.py +++ b/testgen/ui/services/test_suite_service.py @@ -1,3 +1,4 @@ +import pandas as pd import streamlit as st import testgen.ui.queries.test_suite_queries as test_suite_queries @@ -9,9 +10,13 @@ def get_by_project(project_code, table_group_id=None): return test_suite_queries.get_by_project(schema, project_code, table_group_id) -def get_by_id(test_suite_id): +def get_by_id(test_suite_id: str) -> pd.Series: schema = st.session_state["dbschema"] - return test_suite_queries.get_by_id(schema, test_suite_id).iloc[0] + df = test_suite_queries.get_by_id(schema, test_suite_id) + if not df.empty: + return df.iloc[0] + else: + return pd.Series() def edit(test_suite): diff --git a/testgen/ui/views/profiling_anomalies.py b/testgen/ui/views/profiling_anomalies.py index ab1bfcc..c94c2bf 100644 --- a/testgen/ui/views/profiling_anomalies.py +++ b/testgen/ui/views/profiling_anomalies.py @@ -23,9 +23,14 @@ class ProfilingAnomaliesPage(Page): ] def render(self, run_id: str, issue_class: str | None = None, issue_type: str | None = None, **_kwargs) -> None: - run_date, _table_group_id, table_group_name, project_code = profiling_queries.lookup_db_parentage_from_run( - run_id - ) + run_parentage = profiling_queries.lookup_db_parentage_from_run(run_id) + if not run_parentage: + self.router.navigate_with_warning( + f"Profiling run with ID '{run_id}' does not exist. Redirecting to list of Profiling Runs ...", + "profiling-runs", + ) + + run_date, _table_group_id, table_group_name, project_code = run_parentage run_date = date_service.get_timezoned_timestamp(st.session_state, run_date) project_service.set_current_project(project_code) diff --git a/testgen/ui/views/profiling_results.py b/testgen/ui/views/profiling_results.py index bd134d9..e0a38f0 100644 --- a/testgen/ui/views/profiling_results.py +++ b/testgen/ui/views/profiling_results.py @@ -22,9 +22,16 @@ class ProfilingResultsPage(Page): ] def render(self, run_id: str, table_name: str | None = None, column_name: str | None = None, **_kwargs) -> None: - run_date, table_group_id, table_group_name, project_code = profiling_queries.lookup_db_parentage_from_run( + run_parentage = profiling_queries.lookup_db_parentage_from_run( run_id ) + if not run_parentage: + self.router.navigate_with_warning( + f"Profiling run with ID '{run_id}' does not exist. Redirecting to list of Profiling Runs ...", + "profiling-runs", + ) + + run_date, table_group_id, table_group_name, project_code = run_parentage run_date = date_service.get_timezoned_timestamp(st.session_state, run_date) project_service.set_current_project(project_code) diff --git a/testgen/ui/views/table_groups.py b/testgen/ui/views/table_groups.py index 734214a..7403743 100644 --- a/testgen/ui/views/table_groups.py +++ b/testgen/ui/views/table_groups.py @@ -27,6 +27,12 @@ class TableGroupsPage(Page): def render(self, connection_id: str, **_kwargs) -> None: connection = connection_service.get_by_id(connection_id, hide_passwords=False) + if not connection: + self.router.navigate_with_warning( + f"Connection with ID '{connection_id}' does not exist. Redirecting to list of Connections ...", + "connections", + ) + project_code = connection["project_code"] project_service.set_current_project(project_code) diff --git a/testgen/ui/views/test_definitions.py b/testgen/ui/views/test_definitions.py index 463d8df..7921b58 100644 --- a/testgen/ui/views/test_definitions.py +++ b/testgen/ui/views/test_definitions.py @@ -30,6 +30,12 @@ class TestDefinitionsPage(Page): def render(self, test_suite_id: str, table_name: str | None = None, column_name: str | None = None, **_kwargs) -> None: test_suite = test_suite_service.get_by_id(test_suite_id) + if test_suite.empty: + self.router.navigate_with_warning( + f"Test suite with ID '{test_suite_id}' does not exist. Redirecting to list of Test Suites ...", + "test-suites", + ) + table_group = table_group_service.get_by_id(test_suite["table_groups_id"]) project_code = table_group["project_code"] project_service.set_current_project(project_code) diff --git a/testgen/ui/views/test_results.py b/testgen/ui/views/test_results.py index 67e49a5..fbbff5e 100644 --- a/testgen/ui/views/test_results.py +++ b/testgen/ui/views/test_results.py @@ -29,7 +29,14 @@ class TestResultsPage(Page): ] def render(self, run_id: str, status: str | None = None, test_type: str | None = None, **_kwargs) -> None: - run_date, test_suite_name, project_code = get_drill_test_run(run_id) + run_parentage = get_drill_test_run(run_id) + if not run_parentage: + self.router.navigate_with_warning( + f"Test run with ID '{run_id}' does not exist. Redirecting to list of Test Runs ...", + "test-runs", + ) + + run_date, test_suite_name, project_code = run_parentage run_date = date_service.get_timezoned_timestamp(st.session_state, run_date) project_service.set_current_project(project_code) @@ -160,17 +167,17 @@ def render(self, run_id: str, status: str | None = None, test_type: str | None = @st.cache_data(show_spinner=ALWAYS_SPIN) -def get_drill_test_run(str_test_run_id): - str_schema = st.session_state["dbschema"] - str_sql = f""" +def get_drill_test_run(test_run_id: str) -> tuple[pd.Timestamp, str, str] | None: + schema: str = st.session_state["dbschema"] + sql = f""" SELECT tr.test_starttime as test_date, ts.test_suite, ts.project_code - FROM {str_schema}.test_runs tr - INNER JOIN {str_schema}.test_suites ts ON tr.test_suite_id = ts.id - WHERE tr.id = '{str_test_run_id}'::UUID; + FROM {schema}.test_runs tr + INNER JOIN {schema}.test_suites ts ON tr.test_suite_id = ts.id + WHERE tr.id = '{test_run_id}'::UUID; """ - df = db.retrieve_data(str_sql) + df = db.retrieve_data(sql) if not df.empty: return df.at[0, "test_date"], df.at[0, "test_suite"], df.at[0, "project_code"] From 994e0d9a1f54aa4a9ab845ff49b04ccddb17b294 Mon Sep 17 00:00:00 2001 From: Ricardo Boni Date: Thu, 19 Sep 2024 15:43:41 -0400 Subject: [PATCH 65/78] fix(test validation): Fixing the test validation --- .../test_parameter_validation_query.py | 16 +++-- .../commands/run_test_parameter_validation.py | 62 +++++++++++++------ .../dbsetup/060_create_standard_views.sql | 6 +- .../ex_flag_tests_test_definitions.sql | 58 ++--------------- .../ex_get_test_column_list_tg.sql | 9 ++- .../ex_prep_flag_tests_test_definitions.sql | 6 ++ .../ex_write_test_val_errors.sql | 5 +- testgen/ui/views/test_results.py | 6 +- 8 files changed, 79 insertions(+), 89 deletions(-) create mode 100644 testgen/template/validate_tests/ex_prep_flag_tests_test_definitions.sql diff --git a/testgen/commands/queries/test_parameter_validation_query.py b/testgen/commands/queries/test_parameter_validation_query.py index be0c9bc..d34e210 100644 --- a/testgen/commands/queries/test_parameter_validation_query.py +++ b/testgen/commands/queries/test_parameter_validation_query.py @@ -10,8 +10,8 @@ class CTestParamValidationSQL: project_code = "" test_suite = "" test_schemas = "" - missing_columns = "" - missing_tables = "" + message = "" + test_ids = [] # noqa exception_message = "" flag_val = "" @@ -29,11 +29,9 @@ def _ReplaceParms(self, strInputString): strInputString = strInputString.replace("{TEST_RUN_ID}", self.test_run_id) strInputString = strInputString.replace("{FLAG}", self.flag_val) strInputString = strInputString.replace("{TEST_SCHEMAS}", self.test_schemas) - strInputString = strInputString.replace("{MISSING_COLUMNS}", self.missing_columns) - strInputString = strInputString.replace("{MISSING_TABLES}", self.missing_tables) strInputString = strInputString.replace("{EXCEPTION_MESSAGE}", self.exception_message) - strInputString = strInputString.replace("{MISSING_COLUMNS_NO_QUOTES}", self.missing_columns.replace("'", "")) - strInputString = strInputString.replace("{MISSING_TABLES_NO_QUOTES}", self.missing_tables.replace("'", "")) + strInputString = strInputString.replace("{MESSAGE}", self.message) + strInputString = strInputString.replace("{CAT_TEST_IDS}", ", ".join(map(str, self.test_ids))) strInputString = strInputString.replace("{START_TIME}", self.today) strInputString = strInputString.replace("{NOW}", date_service.get_now_as_string()) @@ -62,6 +60,12 @@ def GetProjectTestValidationColumns(self): return strQ + def PrepFlagTestsWithFailedValidation(self): + # Runs on Project DB + strQ = self._ReplaceParms(read_template_sql_file("ex_prep_flag_tests_test_definitions.sql", "validate_tests")) + + return strQ + def FlagTestsWithFailedValidation(self): # Runs on Project DB strQ = self._ReplaceParms(read_template_sql_file("ex_flag_tests_test_definitions.sql", "validate_tests")) diff --git a/testgen/commands/run_test_parameter_validation.py b/testgen/commands/run_test_parameter_validation.py index 21f9102..8e93148 100644 --- a/testgen/commands/run_test_parameter_validation.py +++ b/testgen/commands/run_test_parameter_validation.py @@ -1,7 +1,15 @@ import logging +from collections import defaultdict +from itertools import chain from testgen.commands.queries.test_parameter_validation_query import CTestParamValidationSQL -from testgen.common import AssignConnectParms, RetrieveDBResultsToDictList, RetrieveTestExecParms, RunActionQueryList +from testgen.common import ( + AssignConnectParms, + RetrieveDBResultsToDictList, + RetrieveDBResultsToList, + RetrieveTestExecParms, + RunActionQueryList, +) LOG = logging.getLogger("testgen") @@ -45,15 +53,15 @@ def run_parameter_validation_queries( # Retrieve Test Column list LOG.info("CurrentStep: Retrieve Test Columns for Validation") strColumnList = clsExecute.GetTestValidationColumns(booClean) - lstTestColumns = RetrieveDBResultsToDictList("DKTG", strColumnList) + test_columns, _ = RetrieveDBResultsToList("DKTG", strColumnList) - if len(lstTestColumns) == 0: + if not test_columns: LOG.warning(f"No test columns are present to validate in Test Suite {strTestSuite}") missing_columns = [] else: # Derive test schema list -- make CSV string from list of columns # to be used as criteria for retrieving data dictionary - setSchemas = {s["columns"].split(".")[0] for s in lstTestColumns} + setSchemas = {col.split(".")[0] for col, _ in test_columns} strSchemas = ", ".join([f"'{value}'" for value in setSchemas]) LOG.debug("Test column list successfully retrieved") @@ -71,7 +79,7 @@ def run_parameter_validation_queries( LOG.debug("Project column list successfully received") LOG.info("CurrentStep: Compare column sets") # load results into sets - result_set1 = {item["columns"].lower() for item in set(lstTestColumns)} + result_set1 = {col.lower() for col, _ in test_columns} result_set2 = {item["columns"].lower() for item in set(lstProjectTestColumns)} # Check if all columns exist in the table @@ -80,11 +88,8 @@ def run_parameter_validation_queries( if len(missing_columns) == 0: LOG.info("No missing column in Project Column list.") - strMissingColumns = ", ".join(f"'{x}'" for x in missing_columns) - srtNoQuoteMissingCols = strMissingColumns.replace("'", "") - if missing_columns: - LOG.debug("Test Columns are missing in target database: %s", srtNoQuoteMissingCols) + LOG.debug("Test Columns are missing in target database: %s", ", ".join(missing_columns)) # Extracting schema.tables that are missing from the result sets tables_set1 = {elem.rsplit(".", 1)[0] for elem in result_set1} @@ -94,25 +99,46 @@ def run_parameter_validation_queries( missing_tables = tables_set1.difference(tables_set2) if missing_tables: - strMissingtables = ", ".join(f"'{x}'" for x in missing_tables) + LOG.info("Missing tables: %s", ", ".join(missing_tables)) else: LOG.info("No missing tables in Project Column list.") - strMissingtables = "''" # Flag test_definitions tests with missing columns: LOG.info("CurrentStep: Flagging Tests That Failed Validation") - clsExecute.missing_columns = strMissingColumns - clsExecute.missing_tables = strMissingtables + # Flag Value is D if called from execute_tests_qry.py, otherwise N to disable now if booRunFromTestExec: clsExecute.flag_val = "D" - strTempMessage = "Tests that failed parameter validation have been flagged." + LOG.debug("Tests that failed parameter validation will be flagged.") else: clsExecute.flag_val = "N" - strTempMessage = "Tests that failed parameter validation have been set to inactive." - strFlagTests = clsExecute.FlagTestsWithFailedValidation() - RunActionQueryList("DKTG", [strFlagTests]) - LOG.debug(strTempMessage) + LOG.debug("Tests that failed parameter validation will be deactivated.") + + tests_missing_tables = defaultdict(list) + tests_missing_columns = defaultdict(list) + for column_name, test_ids in test_columns: + column_name = column_name.lower() + table_name = column_name.rsplit(".", 1)[0] + if table_name in missing_tables: + tests_missing_tables[table_name].extend(test_ids) + elif column_name in missing_columns: + tests_missing_columns[column_name].extend(test_ids) + + clsExecute.test_ids = list(set(chain(*tests_missing_tables.values(), *tests_missing_columns.values()))) + strPrepFlagTests = clsExecute.PrepFlagTestsWithFailedValidation() + RunActionQueryList("DKTG", [strPrepFlagTests]) + + for column_name, test_ids in tests_missing_columns.items(): + clsExecute.message = f"Missing column: {column_name}" + clsExecute.test_ids = test_ids + strFlagTests = clsExecute.FlagTestsWithFailedValidation() + RunActionQueryList("DKTG", [strFlagTests]) + + for table_name, test_ids in tests_missing_tables.items(): + clsExecute.message = f"Missing table: {table_name}" + clsExecute.test_ids = test_ids + strFlagTests = clsExecute.FlagTestsWithFailedValidation() + RunActionQueryList("DKTG", [strFlagTests]) # when run_parameter_validation_queries() is called from execute_tests_query.py: # we disable tests and write validation errors to test_results table. diff --git a/testgen/template/dbsetup/060_create_standard_views.sql b/testgen/template/dbsetup/060_create_standard_views.sql index 2984bcf..9ec8331 100644 --- a/testgen/template/dbsetup/060_create_standard_views.sql +++ b/testgen/template/dbsetup/060_create_standard_views.sql @@ -136,14 +136,14 @@ SELECT p.project_name, (1 - r.result_code)::INTEGER as exception_ct, CASE WHEN result_status = 'Warning' - AND result_message NOT ILIKE 'ERROR - TEST COLUMN MISSING%' THEN 1 + AND result_message NOT ILIKE 'Inactivated%' THEN 1 END::INTEGER as warning_ct, CASE WHEN result_status = 'Failed' - AND result_message NOT ILIKE 'ERROR - TEST COLUMN MISSING%' THEN 1 + AND result_message NOT ILIKE 'Inactivated%' THEN 1 END::INTEGER as failed_ct, CASE - WHEN result_message ILIKE 'ERROR - TEST COLUMN MISSING%' THEN 1 + WHEN result_message ILIKE 'Inactivated%' THEN 1 END as execution_error_ct, p.project_code, r.table_groups_id, diff --git a/testgen/template/validate_tests/ex_flag_tests_test_definitions.sql b/testgen/template/validate_tests/ex_flag_tests_test_definitions.sql index 2ef7689..e9ebc1f 100644 --- a/testgen/template/validate_tests/ex_flag_tests_test_definitions.sql +++ b/testgen/template/validate_tests/ex_flag_tests_test_definitions.sql @@ -1,55 +1,7 @@ /* -Mark Test inactive for Missing columns with update status +Mark Test inactive for Missing columns/tables with update status */ -with test_columns as - (SELECT DISTINCT schema_name || '.' || table_name || '.' || column_name AS columns - FROM ( SELECT cat_test_id, - schema_name, - table_name, - UNNEST(STRING_TO_ARRAY(all_columns, '~|~')) AS column_name - FROM ( SELECT cat_test_id, - schema_name, - table_name, - CONCAT_WS('~|~', column_name, - groupby_names, - window_date_column) AS all_columns - FROM test_definitions d - INNER JOIN test_types t - ON d.test_type = t.test_type - WHERE test_suite_id = '{TEST_SUITE_ID}' - AND t.test_scope = 'column' - - UNION - SELECT cat_test_id, - match_schema_name AS schema_name, - match_table_name AS table_name, - CONCAT_WS('~|~', - match_column_names, - match_groupby_names) AS all_columns - FROM test_definitions d - INNER JOIN test_types t - ON d.test_type = t.test_type - WHERE test_suite_id = '{TEST_SUITE_ID}' - AND t.test_scope = 'column') a ) b) -update test_definitions -set test_active = '{FLAG}', - test_definition_status = 'Inactivated {RUN_DATE}: Missing Column' -where cat_test_id in (select distinct cat_test_id - from test_columns - where lower(columns) in - ({MISSING_COLUMNS})); - - -/* -Mark Test inactive for Missing table with update status -*/ -with test_columns as - (select distinct cat_test_id, schema_name || '.' || table_name || '.' || column_name as columns - from test_definitions - where test_suite_id = '{TEST_SUITE_ID}' - and lower(schema_name || '.' || table_name) in ({MISSING_TABLES})) -update test_definitions -set test_active = '{FLAG}', - test_definition_status = 'Inactivated {RUN_DATE}: Missing Table' -where cat_test_id in (select distinct cat_test_id - from test_columns); +UPDATE test_definitions +SET test_active = '{FLAG}', + test_definition_status = LEFT('Inactivated {RUN_DATE}: ' || CONCAT_WS('; ', substring(test_definition_status from 34), '{MESSAGE}'), 200) +WHERE cat_test_id IN ({CAT_TEST_IDS}); diff --git a/testgen/template/validate_tests/ex_get_test_column_list_tg.sql b/testgen/template/validate_tests/ex_get_test_column_list_tg.sql index 318909c..df7bdde 100644 --- a/testgen/template/validate_tests/ex_get_test_column_list_tg.sql +++ b/testgen/template/validate_tests/ex_get_test_column_list_tg.sql @@ -1,5 +1,6 @@ -SELECT DISTINCT schema_name || '.' || table_name || '.' || column_name AS columns - FROM ( SELECT cat_test_id, + SELECT schema_name || '.' || table_name || '.' || column_name AS columns, + ARRAY_AGG(cat_test_id) as test_id_array + FROM (SELECT cat_test_id, schema_name AS schema_name, table_name AS table_name, TRIM(UNNEST(STRING_TO_ARRAY(column_name, ','))) as column_name @@ -47,4 +48,6 @@ SELECT DISTINCT schema_name || '.' || table_name || '.' || column_name AS column INNER JOIN test_types t ON d.test_type = t.test_type WHERE test_suite_id = '{TEST_SUITE_ID}' - AND t.test_scope = 'referential' ) cols; + AND t.test_scope = 'referential' ) cols + WHERE column_name SIMILAR TO '[A-Za-z0-9_]+' +GROUP BY columns; diff --git a/testgen/template/validate_tests/ex_prep_flag_tests_test_definitions.sql b/testgen/template/validate_tests/ex_prep_flag_tests_test_definitions.sql new file mode 100644 index 0000000..d5eb6a2 --- /dev/null +++ b/testgen/template/validate_tests/ex_prep_flag_tests_test_definitions.sql @@ -0,0 +1,6 @@ +/* +Clean the test definition status before it's set with missing tables / columns information +*/ +UPDATE test_definitions +SET test_definition_status = NULL +WHERE cat_test_id IN ({CAT_TEST_IDS}); diff --git a/testgen/template/validate_tests/ex_write_test_val_errors.sql b/testgen/template/validate_tests/ex_write_test_val_errors.sql index 8546863..b1d47d3 100644 --- a/testgen/template/validate_tests/ex_write_test_val_errors.sql +++ b/testgen/template/validate_tests/ex_write_test_val_errors.sql @@ -21,9 +21,8 @@ INSERT INTO test_results '{TEST_RUN_ID}' as test_run_id, NULL as input_parameters, 0 as result_code, - -- TODO: show only missing columns referenced in this test - left('ERROR - TEST COLUMN MISSING: {MISSING_COLUMNS_NO_QUOTES}', 470) AS result_message, + test_definition_status AS result_message, NULL as result_measure FROM test_definitions - WHERE test_active = '-1' + WHERE test_active = 'D' AND test_suite_id = '{TEST_SUITE_ID}'; diff --git a/testgen/ui/views/test_results.py b/testgen/ui/views/test_results.py index fbbff5e..9a194b4 100644 --- a/testgen/ui/views/test_results.py +++ b/testgen/ui/views/test_results.py @@ -225,14 +225,14 @@ def get_test_results_uncached(str_schema, str_run_id, str_sel_test_status, test_ (1 - r.result_code)::INTEGER as exception_ct, CASE WHEN result_status = 'Warning' - AND result_message NOT ILIKE 'ERROR - TEST COLUMN MISSING%%' THEN 1 + AND result_message NOT ILIKE 'Inactivated%%' THEN 1 END::INTEGER as warning_ct, CASE WHEN result_status = 'Failed' - AND result_message NOT ILIKE 'ERROR - TEST COLUMN MISSING%%' THEN 1 + AND result_message NOT ILIKE 'Inactivated%%' THEN 1 END::INTEGER as failed_ct, CASE - WHEN result_message ILIKE 'ERROR - TEST COLUMN MISSING%%' THEN 1 + WHEN result_message ILIKE 'Inactivated%%' THEN 1 END as execution_error_ct, p.project_code, r.table_groups_id::VARCHAR, r.id::VARCHAR as test_result_id, r.test_run_id::VARCHAR, From dd5f043e43c8bfa0498c133949871debc0f4c553 Mon Sep 17 00:00:00 2001 From: Ricardo Boni Date: Tue, 24 Sep 2024 12:09:29 -0400 Subject: [PATCH 66/78] Revert "feat(ui): bind grid selection to query params on result pages" This reverts commit 2d73a57ba5173e9df1633e2e047d106cfdb08af6. --- testgen/ui/services/form_service.py | 28 +++---------------------- testgen/ui/views/profiling_anomalies.py | 8 ++----- testgen/ui/views/profiling_results.py | 6 +----- testgen/ui/views/test_results.py | 9 ++------ 4 files changed, 8 insertions(+), 43 deletions(-) diff --git a/testgen/ui/services/form_service.py b/testgen/ui/services/form_service.py index e1810f3..a1a56de 100644 --- a/testgen/ui/services/form_service.py +++ b/testgen/ui/services/form_service.py @@ -19,7 +19,6 @@ import testgen.common.date_service as date_service import testgen.ui.services.authentication_service as authentication_service import testgen.ui.services.database_service as db -from testgen.ui.navigation.router import Router """ Shared rendering of UI elements @@ -767,10 +766,8 @@ def render_grid_select( str_prompt=None, int_height=400, do_multi_select=False, - bind_to_query=None, show_column_headers=None, render_highlights=True, - key="aggrid", ): show_prompt(str_prompt) @@ -844,18 +841,7 @@ def render_grid_select( gb = GridOptionsBuilder.from_dataframe(df) selection_mode = "multiple" if do_multi_select else "single" - - pre_selected_rows = None - if bind_to_query: - query_value = st.query_params.get(bind_to_query) - # Workaround for this open issue: https://github.com/PablocFonseca/streamlit-aggrid/issues/207#issuecomment-1793039564 - pre_selected_rows = { query_value: True } if isinstance(query_value, str) and query_value.isdigit() else None - - gb.configure_selection( - selection_mode=selection_mode, - use_checkbox=do_multi_select, - pre_selected_rows=pre_selected_rows, - ) + gb.configure_selection(selection_mode=selection_mode, use_checkbox=do_multi_select) all_columns = list(df.columns) @@ -910,18 +896,10 @@ def render_grid_select( "padding-bottom": "0px !important", } }, - # Key is needed for query binding to work - # Changing selection mode does not work if same key is used for both modes - key=f"{key}_{selection_mode}", ) - selected_rows = grid_data["selected_rows"] - if bind_to_query: - Router().set_query_params({ - bind_to_query: selected_rows[0].get("_selectedRowNodeInfo", {}).get("nodeRowIndex") if len(selected_rows) else None, - }) - if len(selected_rows): - return selected_rows + if len(grid_data["selected_rows"]): + return grid_data["selected_rows"] def render_logo(logo_path: str = logo_file): diff --git a/testgen/ui/views/profiling_anomalies.py b/testgen/ui/views/profiling_anomalies.py index c94c2bf..b81e158 100644 --- a/testgen/ui/views/profiling_anomalies.py +++ b/testgen/ui/views/profiling_anomalies.py @@ -132,11 +132,7 @@ def render(self, run_id: str, issue_class: str | None = None, issue_type: str | # Show main grid and retrieve selections selected = fm.render_grid_select( - df_pa, - lst_show_columns, - int_height=400, - do_multi_select=do_multi_select, - bind_to_query="selected", + df_pa, lst_show_columns, int_height=400, do_multi_select=do_multi_select ) with export_button_column: @@ -158,7 +154,7 @@ def render(self, run_id: str, issue_class: str | None = None, issue_type: str | if selected: # Always show details for last selected row - selected_row = selected[0] + selected_row = selected[len(selected) - 1] else: selected_row = None diff --git a/testgen/ui/views/profiling_results.py b/testgen/ui/views/profiling_results.py index e0a38f0..aa94ae6 100644 --- a/testgen/ui/views/profiling_results.py +++ b/testgen/ui/views/profiling_results.py @@ -105,11 +105,7 @@ def render(self, run_id: str, table_name: str | None = None, column_name: str | with st.expander("📜 **Table CREATE script with suggested datatypes**"): st.code(generate_create_script(df), "sql") - selected_row = fm.render_grid_select( - df, - show_columns, - bind_to_query="selected", - ) + selected_row = fm.render_grid_select(df, show_columns) with export_button_column: testgen.flex_row_end() diff --git a/testgen/ui/views/test_results.py b/testgen/ui/views/test_results.py index fbbff5e..14990c3 100644 --- a/testgen/ui/views/test_results.py +++ b/testgen/ui/views/test_results.py @@ -667,12 +667,7 @@ def show_result_detail(str_run_id, str_sel_test_status, test_type_id, sorting_co ] selected_rows = fm.render_grid_select( - df, - lst_show_columns, - do_multi_select=do_multi_select, - show_column_headers=lst_show_headers, - key="grid:test-results", - bind_to_query="selected", + df, lst_show_columns, do_multi_select=do_multi_select, show_column_headers=lst_show_headers ) with export_container: @@ -717,7 +712,7 @@ def show_result_detail(str_run_id, str_sel_test_status, test_type_id, sorting_co if not selected_rows: st.markdown(":orange[Select a record to see more information.]") else: - selected_row = selected_rows[0] + selected_row = selected_rows[len(selected_rows) - 1] dfh = get_test_result_history( selected_row["test_type"], selected_row["test_suite_id"], From e7494f366be0e2e33d747b87baaff3f993ca74a7 Mon Sep 17 00:00:00 2001 From: Ricardo Boni Date: Tue, 24 Sep 2024 12:26:13 -0400 Subject: [PATCH 67/78] fix(profiling results): Disabling sorting was making the page crash --- testgen/ui/queries/profiling_queries.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/testgen/ui/queries/profiling_queries.py b/testgen/ui/queries/profiling_queries.py index e22d711..dc93496 100644 --- a/testgen/ui/queries/profiling_queries.py +++ b/testgen/ui/queries/profiling_queries.py @@ -81,11 +81,13 @@ def lookup_db_parentage_from_run(profile_run_id: str) -> tuple[pd.Timestamp, str @st.cache_data(show_spinner="Retrieving Data") def get_profiling_detail(str_profile_run_id, str_table_name, str_column_name, sorting_columns = None): str_schema = st.session_state["dbschema"] - sorting_columns_str = ( - "p.schema_name, p.table_name, position" - if sorting_columns is None - else ", ".join(" ".join(col) for col in sorting_columns) - ) + if sorting_columns is None: + order_by_str = "ORDER BY p.schema_name, p.table_name, position" + elif len(sorting_columns): + order_by_str = "ORDER BY " + ", ".join(" ".join(col) for col in sorting_columns) + else: + order_by_str = "" + str_sql = f""" SELECT -- Identifiers id::VARCHAR, dk_id, @@ -147,7 +149,7 @@ def get_profiling_detail(str_profile_run_id, str_table_name, str_column_name, so WHERE p.profile_run_id = '{str_profile_run_id}'::UUID AND p.table_name ILIKE '{str_table_name}' AND p.column_name ILIKE '{str_column_name}' - ORDER BY {sorting_columns_str}; + {order_by_str}; """ return db.retrieve_data(str_sql) From e24d221d5bb5bbdc0a120c7fcec6fe18929e998c Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Tue, 24 Sep 2024 12:54:10 -0400 Subject: [PATCH 68/78] fix(ui): use consistent labels for test result status --- testgen/ui/views/overview.py | 8 ++++---- testgen/ui/views/test_results.py | 22 +++++++++++----------- testgen/ui/views/test_runs.py | 4 ++-- testgen/ui/views/test_suites.py | 4 ++-- 4 files changed, 19 insertions(+), 19 deletions(-) diff --git a/testgen/ui/views/overview.py b/testgen/ui/views/overview.py index 8e01482..2423fef 100644 --- a/testgen/ui/views/overview.py +++ b/testgen/ui/views/overview.py @@ -141,9 +141,9 @@ def render_table_group_card(table_group: pd.Series, project_code: str, key: int) testgen.summary_bar( items=[ { "label": "Passed", "value": passed_tests, "color": "green" }, - { "label": "Warnings", "value": to_int(table_group["latest_tests_warning_ct"]), "color": "yellow" }, + { "label": "Warning", "value": to_int(table_group["latest_tests_warning_ct"]), "color": "yellow" }, { "label": "Failed", "value": to_int(table_group["latest_tests_failed_ct"]), "color": "red" }, - { "label": "Errors", "value": to_int(table_group["latest_tests_error_ct"]), "color": "brown" }, + { "label": "Error", "value": to_int(table_group["latest_tests_error_ct"]), "color": "brown" }, { "label": "Dismissed", "value": to_int(table_group["latest_tests_dismissed_ct"]), "color": "grey" }, ], key=f"tests_{key}", @@ -208,9 +208,9 @@ def render_test_suite_item(test_suite: pd.Series, column_spec: list[int], key: i testgen.summary_bar( items=[ { "label": "Passed", "value": to_int(test_suite["last_run_passed_ct"]), "color": "green" }, - { "label": "Warnings", "value": to_int(test_suite["last_run_warning_ct"]), "color": "yellow" }, + { "label": "Warning", "value": to_int(test_suite["last_run_warning_ct"]), "color": "yellow" }, { "label": "Failed", "value": to_int(test_suite["last_run_failed_ct"]), "color": "red" }, - { "label": "Errors", "value": to_int(test_suite["last_run_error_ct"]), "color": "brown" }, + { "label": "Error", "value": to_int(test_suite["last_run_error_ct"]), "color": "brown" }, { "label": "Dismissed", "value": to_int(test_suite["last_run_dismissed_ct"]), "color": "grey" }, ], key=f"tests_{key}", diff --git a/testgen/ui/views/test_results.py b/testgen/ui/views/test_results.py index 14990c3..abf26f4 100644 --- a/testgen/ui/views/test_results.py +++ b/testgen/ui/views/test_results.py @@ -62,14 +62,14 @@ def render(self, run_id: str, status: str | None = None, test_type: str | None = with status_filter_column: status_options = [ - "Failures and Warnings", - "Failed Tests", - "Tests with Warnings", - "Passed Tests", + "Failed + Warning", + "Failed", + "Warning", + "Passed", ] status = testgen.toolbar_select( options=status_options, - default_value=status or "Failures and Warnings", + default_value=status or "Failed + Warning", required=False, bind_to_query="status", label="Result Status", @@ -104,13 +104,13 @@ def render(self, run_id: str, status: str | None = None, test_type: str | None = do_multi_select = st.toggle("Multi-Select", help=str_help) match status: - case "Failures and Warnings": + case "Failed + Warning": status = "'Failed','Warning'" - case "Failed Tests": + case "Failed": status = "'Failed'" - case "Tests with Warnings": + case "Warning": status = "'Warning'" - case "Passed Tests": + case "Passed": status = "'Passed'" # Display main grid and retrieve selection @@ -341,9 +341,9 @@ def get_test_result_summary(run_id): return [ { "label": "Passed", "value": int(df.at[0, "passed_ct"]), "color": "green" }, - { "label": "Warnings", "value": int(df.at[0, "warning_ct"]), "color": "yellow" }, + { "label": "Warning", "value": int(df.at[0, "warning_ct"]), "color": "yellow" }, { "label": "Failed", "value": int(df.at[0, "failed_ct"]), "color": "red" }, - { "label": "Errors", "value": int(df.at[0, "error_ct"]), "color": "brown" }, + { "label": "Error", "value": int(df.at[0, "error_ct"]), "color": "brown" }, { "label": "Dismissed", "value": int(df.at[0, "dismissed_ct"]), "color": "grey" }, ] diff --git a/testgen/ui/views/test_runs.py b/testgen/ui/views/test_runs.py index eb972dd..0c26007 100644 --- a/testgen/ui/views/test_runs.py +++ b/testgen/ui/views/test_runs.py @@ -144,9 +144,9 @@ def render_test_run_row(test_run: pd.Series, column_spec: list[int]) -> None: testgen.summary_bar( items=[ { "label": "Passed", "value": to_int(test_run["passed_ct"]), "color": "green" }, - { "label": "Warnings", "value": to_int(test_run["warning_ct"]), "color": "yellow" }, + { "label": "Warning", "value": to_int(test_run["warning_ct"]), "color": "yellow" }, { "label": "Failed", "value": to_int(test_run["failed_ct"]), "color": "red" }, - { "label": "Errors", "value": to_int(test_run["error_ct"]), "color": "brown" }, + { "label": "Error", "value": to_int(test_run["error_ct"]), "color": "brown" }, { "label": "Dismissed", "value": to_int(test_run["dismissed_ct"]), "color": "grey" }, ], height=10, diff --git a/testgen/ui/views/test_suites.py b/testgen/ui/views/test_suites.py index 0024de7..2bf5516 100644 --- a/testgen/ui/views/test_suites.py +++ b/testgen/ui/views/test_suites.py @@ -122,9 +122,9 @@ def render(self, project_code: str | None = None, table_group_id: str | None = N testgen.summary_bar( items=[ { "label": "Passed", "value": to_int(test_suite["last_run_passed_ct"]), "color": "green" }, - { "label": "Warnings", "value": to_int(test_suite["last_run_warning_ct"]), "color": "yellow" }, + { "label": "Warning", "value": to_int(test_suite["last_run_warning_ct"]), "color": "yellow" }, { "label": "Failed", "value": to_int(test_suite["last_run_failed_ct"]), "color": "red" }, - { "label": "Errors", "value": to_int(test_suite["last_run_error_ct"]), "color": "brown" }, + { "label": "Error", "value": to_int(test_suite["last_run_error_ct"]), "color": "brown" }, { "label": "Dismissed", "value": to_int(test_suite["last_run_dismissed_ct"]), "color": "grey" }, ], height=20, From bf7ac1bc36012cc2ad9350f8b275bd9f9e444c2d Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Tue, 24 Sep 2024 14:11:50 -0400 Subject: [PATCH 69/78] fix(ui): stop disposition buttons from glitching when clicked --- testgen/ui/views/profiling_anomalies.py | 53 +++++++++---------------- testgen/ui/views/test_definitions.py | 52 +++++++++--------------- testgen/ui/views/test_results.py | 53 +++++++++---------------- 3 files changed, 57 insertions(+), 101 deletions(-) diff --git a/testgen/ui/views/profiling_anomalies.py b/testgen/ui/views/profiling_anomalies.py index b81e158..60a6fc1 100644 --- a/testgen/ui/views/profiling_anomalies.py +++ b/testgen/ui/views/profiling_anomalies.py @@ -197,41 +197,26 @@ def render(self, run_id: str, issue_class: str | None = None, issue_type: str | if "r.disposition" in dict(sorting_columns): cached_functions.append(get_profiling_anomalies) + disposition_actions = [ + { "icon": "✓", "help": "Confirm this issue as relevant for this run", "status": "Confirmed" }, + { "icon": "✘", "help": "Dismiss this issue as not relevant for this run", "status": "Dismissed" }, + { "icon": "🔇", "help": "Mute this test to deactivate it for future runs", "status": "Inactive" }, + { "icon": "↩︎", "help": "Clear action", "status": "No Decision" }, + ] + # Need to render toolbar buttons after grid, so selection status is maintained - if actions_column.button( - "✓", help="Confirm this issue as relevant for this run", disabled=not selected - ): - fm.reset_post_updates( - do_disposition_update(selected, "Confirmed"), - as_toast=True, - clear_cache=True, - lst_cached_functions=cached_functions, - ) - if actions_column.button( - "✘", help="Dismiss this issue as not relevant for this run", disabled=not selected - ): - fm.reset_post_updates( - do_disposition_update(selected, "Dismissed"), - as_toast=True, - clear_cache=True, - lst_cached_functions=cached_functions, - ) - if actions_column.button( - "🔇", help="Mute this test to deactivate it for future runs", disabled=not selected - ): - fm.reset_post_updates( - do_disposition_update(selected, "Inactive"), - as_toast=True, - clear_cache=True, - lst_cached_functions=cached_functions, - ) - if actions_column.button("↩︎", help="Clear action", disabled=not selected): - fm.reset_post_updates( - do_disposition_update(selected, "No Decision"), - as_toast=True, - clear_cache=True, - lst_cached_functions=cached_functions, - ) + for action in disposition_actions: + action["button"] = actions_column.button(action["icon"], help=action["help"], disabled=not selected) + + # This has to be done as a second loop - otherwise, the rest of the buttons after the clicked one are not displayed briefly while refreshing + for action in disposition_actions: + if action["button"]: + fm.reset_post_updates( + do_disposition_update(selected, action["status"]), + as_toast=True, + clear_cache=True, + lst_cached_functions=cached_functions, + ) else: st.markdown(":green[**No Hygiene Issues Found**]") diff --git a/testgen/ui/views/test_definitions.py b/testgen/ui/views/test_definitions.py index 7921b58..f3d17eb 100644 --- a/testgen/ui/views/test_definitions.py +++ b/testgen/ui/views/test_definitions.py @@ -91,39 +91,25 @@ def render(self, test_suite_id: str, table_name: str | None = None, column_name: ) fm.render_refresh_button(table_actions_column) - # Display buttons - if disposition_column.button("✓", help="Activate for future runs", disabled=not selected): - fm.reset_post_updates( - update_test_definition(selected, "test_active", True, "Activated"), - as_toast=True, - clear_cache=True, - lst_cached_functions=[], - ) - if disposition_column.button("✘", help="Inactivate Test for future runs", disabled=not selected): - fm.reset_post_updates( - update_test_definition(selected, "test_active", False, "Inactivated"), - as_toast=True, - clear_cache=True, - lst_cached_functions=[], - ) - if disposition_column.button( - "🔒", help="Protect from future test generation", disabled=not selected - ): - fm.reset_post_updates( - update_test_definition(selected, "lock_refresh", True, "Locked"), - as_toast=True, - clear_cache=True, - lst_cached_functions=[], - ) - if disposition_column.button( - "🔐", help="Unlock for future test generation", disabled=not selected - ): - fm.reset_post_updates( - update_test_definition(selected, "lock_refresh", False, "Unlocked"), - as_toast=True, - clear_cache=True, - lst_cached_functions=[], - ) + disposition_actions = [ + { "icon": "✓", "help": "Activate for future runs", "attribute": "test_active", "value": True, "message": "Activated" }, + { "icon": "✘", "help": "Inactivate Test for future runs", "attribute": "test_active", "value": False, "message": "Inactivated" }, + { "icon": "🔒", "help": "Protect from future test generation", "attribute": "lock_refresh", "value": True, "message": "Locked" }, + { "icon": "🔐", "help": "Unlock for future test generation", "attribute": "lock_refresh", "value": False, "message": "Unlocked" }, + ] + + for action in disposition_actions: + action["button"] = disposition_column.button(action["icon"], help=action["help"], disabled=not selected) + + # This has to be done as a second loop - otherwise, the rest of the buttons after the clicked one are not displayed briefly while refreshing + for action in disposition_actions: + if action["button"]: + fm.reset_post_updates( + update_test_definition(selected, action["attribute"], action["value"], action["message"]), + as_toast=True, + clear_cache=True, + lst_cached_functions=[], + ) if selected: selected_test_def = selected[0] diff --git a/testgen/ui/views/test_results.py b/testgen/ui/views/test_results.py index abf26f4..f77e932 100644 --- a/testgen/ui/views/test_results.py +++ b/testgen/ui/views/test_results.py @@ -125,40 +125,25 @@ def render(self, run_id: str, status: str | None = None, test_type: str | None = if "r.disposition" in dict(sorting_columns): affected_cached_functions.append(get_test_results) - if actions_column.button( - "✓", help="Confirm this issue as relevant for this run", disabled=disable_dispo - ): - fm.reset_post_updates( - do_disposition_update(selected, "Confirmed"), - as_toast=True, - clear_cache=True, - lst_cached_functions=affected_cached_functions, - ) - if actions_column.button( - "✘", help="Dismiss this issue as not relevant for this run", disabled=disable_dispo - ): - fm.reset_post_updates( - do_disposition_update(selected, "Dismissed"), - as_toast=True, - clear_cache=True, - lst_cached_functions=affected_cached_functions, - ) - if actions_column.button( - "🔇", help="Mute this test to deactivate it for future runs", disabled=not selected - ): - fm.reset_post_updates( - do_disposition_update(selected, "Inactive"), - as_toast=True, - clear_cache=True, - lst_cached_functions=affected_cached_functions, - ) - if actions_column.button("⟲", help="Clear action", disabled=not selected): - fm.reset_post_updates( - do_disposition_update(selected, "No Decision"), - as_toast=True, - clear_cache=True, - lst_cached_functions=affected_cached_functions, - ) + disposition_actions = [ + { "icon": "✓", "help": "Confirm this issue as relevant for this run", "status": "Confirmed" }, + { "icon": "✘", "help": "Dismiss this issue as not relevant for this run", "status": "Dismissed" }, + { "icon": "🔇", "help": "Mute this test to deactivate it for future runs", "status": "Inactive" }, + { "icon": "↩︎", "help": "Clear action", "status": "No Decision" }, + ] + + for action in disposition_actions: + action["button"] = actions_column.button(action["icon"], help=action["help"], disabled=disable_dispo) + + # This has to be done as a second loop - otherwise, the rest of the buttons after the clicked one are not displayed briefly while refreshing + for action in disposition_actions: + if action["button"]: + fm.reset_post_updates( + do_disposition_update(selected, action["status"]), + as_toast=True, + clear_cache=True, + lst_cached_functions=affected_cached_functions, + ) # Help Links st.markdown( From 7c887cfe60f1e635e1d27dce2015fb43152956ac Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Tue, 24 Sep 2024 18:19:02 -0400 Subject: [PATCH 70/78] fix(css): override streamlit's default colors for buttons and form inputs --- testgen/ui/assets/style.css | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/testgen/ui/assets/style.css b/testgen/ui/assets/style.css index b5eec64..5b8386e 100644 --- a/testgen/ui/assets/style.css +++ b/testgen/ui/assets/style.css @@ -87,22 +87,24 @@ div[data-testid="stDialog"] div[role="dialog"] { /* */ /* Theming for buttons and form inputs */ -button[data-testid="baseButton-secondary"]:hover, -button[data-testid="baseButton-secondary"]:focus:not(:active), -button[data-testid="baseButton-secondaryFormSubmit"]:hover, -button[data-testid="baseButton-secondaryFormSubmit"]:focus:not(:active) { +button[data-testid="stBaseButton-secondary"]:hover, +button[data-testid="stBaseButton-secondary"]:focus:not(:active), +button[data-testid="stBaseButton-secondaryFormSubmit"]:hover, +button[data-testid="stBaseButton-secondaryFormSubmit"]:focus:not(:active) { border-color: var(--primary-color); color: var(--primary-color); } -button[data-testid="baseButton-secondary"]:active, -button[data-testid="baseButton-secondaryFormSubmit"]:active, +button[data-testid="stBaseButton-secondary"]:active, +button[data-testid="stBaseButton-secondaryFormSubmit"]:active, label[data-baseweb="checkbox"]:has(input[aria-checked="true"]) > span { border-color: var(--primary-color); background-color: var(--primary-color); } -div[data-testid="stTextInput-RootElement"]:focus-within, +div[data-testid="stTextInputRootElement"]:focus-within, +div[data-testid="stNumberInputContainer"]:focus-within, +div[data-baseweb="select"]:focus-within > div, div[data-baseweb="select"] > div:has(input[aria-expanded="true"]) { border-color: var(--primary-color); } From d748c348de6c95a23bcb7ddb4fa4bbef4c01356e Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Wed, 25 Sep 2024 00:24:41 -0400 Subject: [PATCH 71/78] fix(ui): hide actions not accessible to current user role --- testgen/ui/services/form_service.py | 3 +- testgen/ui/views/connections.py | 2 +- testgen/ui/views/table_groups.py | 2 +- testgen/ui/views/test_definitions.py | 7 +- testgen/ui/views/test_results.py | 5 +- testgen/ui/views/test_suites.py | 99 ++++++++++++++-------------- 6 files changed, 62 insertions(+), 56 deletions(-) diff --git a/testgen/ui/services/form_service.py b/testgen/ui/services/form_service.py index a1a56de..ba07527 100644 --- a/testgen/ui/services/form_service.py +++ b/testgen/ui/services/form_service.py @@ -631,6 +631,7 @@ def render_edit_form( lst_key_columns, lst_disabled=None, str_text_display=None, + submit_disabled=False, form_unique_key: str | None = None, ): show_header(str_form_name) @@ -687,7 +688,7 @@ def render_edit_form( else: # If Hidden, add directly to dct_mods for updates dct_mods[column] = row_selected[column] - edit_allowed = authentication_service.current_user_has_edit_role() + edit_allowed = not submit_disabled and authentication_service.current_user_has_edit_role() submit = st.form_submit_button("Save Changes", disabled=not edit_allowed) if submit and edit_allowed: diff --git a/testgen/ui/views/connections.py b/testgen/ui/views/connections.py index 0f6fc5f..07a5031 100644 --- a/testgen/ui/views/connections.py +++ b/testgen/ui/views/connections.py @@ -23,7 +23,7 @@ class ConnectionsPage(Page): can_activate: typing.ClassVar = [ lambda: session.authentication_status, ] - menu_item = MenuItem(icon="database", label="Data Configuration", order=3) + menu_item = MenuItem(icon="database", label="Data Configuration", order=4) def render(self, project_code: str, **_kwargs) -> None: dataframe = connection_service.get_connections(project_code) diff --git a/testgen/ui/views/table_groups.py b/testgen/ui/views/table_groups.py index 7403743..418acb0 100644 --- a/testgen/ui/views/table_groups.py +++ b/testgen/ui/views/table_groups.py @@ -20,8 +20,8 @@ class TableGroupsPage(Page): path = "connections:table-groups" can_activate: typing.ClassVar = [ - lambda: authentication_service.current_user_has_admin_role() or "overview", lambda: session.authentication_status, + lambda: authentication_service.current_user_has_admin_role(), lambda: "connection_id" in session.current_page_args or "connections", ] diff --git a/testgen/ui/views/test_definitions.py b/testgen/ui/views/test_definitions.py index f3d17eb..795f319 100644 --- a/testgen/ui/views/test_definitions.py +++ b/testgen/ui/views/test_definitions.py @@ -39,6 +39,7 @@ def render(self, test_suite_id: str, table_name: str | None = None, column_name: table_group = table_group_service.get_by_id(test_suite["table_groups_id"]) project_code = table_group["project_code"] project_service.set_current_project(project_code) + user_can_edit = authentication_service.current_user_has_edit_role() testgen.page_header( "Test Definitions", @@ -80,7 +81,7 @@ def render(self, test_suite_id: str, table_name: str | None = None, column_name: str_help = "Toggle on to perform actions on multiple test definitions" do_multi_select = st.toggle("Multi-Select", help=str_help) - if actions_column.button( + if user_can_edit and actions_column.button( ":material/add: Add", help="Add a new Test Definition" ): add_test_dialog(project_code, table_group, test_suite, table_name, column_name) @@ -114,14 +115,14 @@ def render(self, test_suite_id: str, table_name: str | None = None, column_name: if selected: selected_test_def = selected[0] - if actions_column.button( + if user_can_edit and actions_column.button( ":material/edit: Edit", help="Edit the Test Definition", disabled=not selected, ): edit_test_dialog(project_code, table_group, test_suite, table_name, column_name, selected_test_def) - if actions_column.button( + if user_can_edit and actions_column.button( ":material/delete: Delete", help="Delete the selected Test Definition", disabled=not selected, diff --git a/testgen/ui/views/test_results.py b/testgen/ui/views/test_results.py index f77e932..2c58271 100644 --- a/testgen/ui/views/test_results.py +++ b/testgen/ui/views/test_results.py @@ -12,7 +12,7 @@ from testgen.common import ConcatColumnList, date_service from testgen.ui.components import widgets as testgen from testgen.ui.navigation.page import Page -from testgen.ui.services import project_service +from testgen.ui.services import authentication_service, project_service from testgen.ui.services.string_service import empty_if_null from testgen.ui.session import session from testgen.ui.views.profiling_modal import view_profiling_button @@ -715,7 +715,8 @@ def show_result_detail(str_run_id, str_sel_test_status, test_type_id, sorting_co with pg_col2: v_col1, v_col2, v_col3 = st.columns([0.33, 0.33, 0.33]) - view_edit_test(v_col1, selected_row["test_definition_id_current"]) + if authentication_service.current_user_has_edit_role(): + view_edit_test(v_col1, selected_row["test_definition_id_current"]) if selected_row["test_scope"] == "column": view_profiling_button( v_col2, selected_row["table_name"], selected_row["column_names"], diff --git a/testgen/ui/views/test_suites.py b/testgen/ui/views/test_suites.py index 2bf5516..25d8cdc 100644 --- a/testgen/ui/views/test_suites.py +++ b/testgen/ui/views/test_suites.py @@ -24,10 +24,9 @@ class TestSuitesPage(Page): path = "test-suites" can_activate: typing.ClassVar = [ - lambda: authentication_service.current_user_has_admin_role() or "overview", lambda: session.authentication_status, ] - menu_item = MenuItem(icon="list_alt", label="Test Suites", order=4) + menu_item = MenuItem(icon="list_alt", label="Test Suites", order=3) def render(self, project_code: str | None = None, table_group_id: str | None = None, **_kwargs) -> None: project_code = st.session_state["project"] @@ -52,43 +51,46 @@ def render(self, project_code: str | None = None, table_group_id: str | None = N ) df = test_suite_service.get_by_project(project_code, table_group_id) - - with actions_column: - st.button( - ":material/add: Add Test Suite", - key="test_suite:keys:add", - help="Add a new test suite", - on_click=lambda: add_test_suite_dialog(project_code, df_tg), - ) + user_can_edit = authentication_service.current_user_has_edit_role() + + if user_can_edit: + with actions_column: + st.button( + ":material/add: Add Test Suite", + key="test_suite:keys:add", + help="Add a new test suite", + on_click=lambda: add_test_suite_dialog(project_code, df_tg), + ) for _, test_suite in df.iterrows(): subtitle = f"{test_suite['connection_name']} > {test_suite['table_groups_name']}" with testgen.card(title=test_suite["test_suite"], subtitle=subtitle) as test_suite_card: - with test_suite_card.actions: - testgen.button( - type_="icon", - icon="output", - tooltip="Export results to Observability", - tooltip_position="right", - on_click=partial(observability_export_dialog, test_suite), - key=f"test_suite:keys:export:{test_suite['id']}", - ) - testgen.button( - type_="icon", - icon="edit", - tooltip="Edit test suite", - tooltip_position="right", - on_click=partial(edit_test_suite_dialog, project_code, df_tg, test_suite), - key=f"test_suite:keys:edit:{test_suite['id']}", - ) - testgen.button( - type_="icon", - icon="delete", - tooltip="Delete test suite", - tooltip_position="right", - on_click=partial(delete_test_suite_dialog, test_suite), - key=f"test_suite:keys:delete:{test_suite['id']}", - ) + if user_can_edit: + with test_suite_card.actions: + testgen.button( + type_="icon", + icon="output", + tooltip="Export results to Observability", + tooltip_position="right", + on_click=partial(observability_export_dialog, test_suite), + key=f"test_suite:keys:export:{test_suite['id']}", + ) + testgen.button( + type_="icon", + icon="edit", + tooltip="Edit test suite", + tooltip_position="right", + on_click=partial(edit_test_suite_dialog, project_code, df_tg, test_suite), + key=f"test_suite:keys:edit:{test_suite['id']}", + ) + testgen.button( + type_="icon", + icon="delete", + tooltip="Delete test suite", + tooltip_position="right", + on_click=partial(delete_test_suite_dialog, test_suite), + key=f"test_suite:keys:delete:{test_suite['id']}", + ) main_section, latest_run_section, actions_section = st.columns([.4, .4, .2]) @@ -134,19 +136,20 @@ def render(self, project_code: str | None = None, table_group_id: str | None = N else: st.markdown("--") - with actions_section: - testgen.button( - type_="stroked", - label="Run Tests", - on_click=partial(run_tests_dialog, project_code, test_suite), - key=f"test_suite:keys:runtests:{test_suite['id']}", - ) - testgen.button( - type_="stroked", - label="Generate Tests", - on_click=partial(generate_tests_dialog, test_suite), - key=f"test_suite:keys:generatetests:{test_suite['id']}", - ) + if user_can_edit: + with actions_section: + testgen.button( + type_="stroked", + label="Run Tests", + on_click=partial(run_tests_dialog, project_code, test_suite), + key=f"test_suite:keys:runtests:{test_suite['id']}", + ) + testgen.button( + type_="stroked", + label="Generate Tests", + on_click=partial(generate_tests_dialog, test_suite), + key=f"test_suite:keys:generatetests:{test_suite['id']}", + ) @st.cache_data(show_spinner=False) From d74871cb84b8f92f75e21ec4d94395ec5e377ab6 Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Wed, 25 Sep 2024 20:53:51 -0400 Subject: [PATCH 72/78] fix(ui): workaround for form inputs disappearing in dialogs when button clicked --- testgen/ui/assets/style.css | 8 +++++- testgen/ui/views/connections.py | 33 ++++++++++++++++--------- testgen/ui/views/overview.py | 2 +- testgen/ui/views/profiling_anomalies.py | 2 +- testgen/ui/views/profiling_modal.py | 2 +- testgen/ui/views/test_definitions.py | 6 +++++ testgen/ui/views/test_results.py | 2 +- 7 files changed, 38 insertions(+), 17 deletions(-) diff --git a/testgen/ui/assets/style.css b/testgen/ui/assets/style.css index 5b8386e..e183bf9 100644 --- a/testgen/ui/assets/style.css +++ b/testgen/ui/assets/style.css @@ -86,7 +86,7 @@ div[data-testid="stDialog"] div[role="dialog"] { } /* */ -/* Theming for buttons and form inputs */ +/* Theming for buttons, tabs and form inputs */ button[data-testid="stBaseButton-secondary"]:hover, button[data-testid="stBaseButton-secondary"]:focus:not(:active), button[data-testid="stBaseButton-secondaryFormSubmit"]:hover, @@ -109,6 +109,12 @@ div[data-baseweb="select"] > div:has(input[aria-expanded="true"]) { border-color: var(--primary-color); } +button[data-testid="stTab"][aria-selected="true"], +button[data-testid="stTab"]:hover { + color: var(--primary-color); +} + +div[data-baseweb="tab-highlight"], label[data-baseweb="radio"]:has(input[tabindex="0"]) > div:first-child, label[data-baseweb="checkbox"]:has(input[aria-checked="true"]) > div:first-child { background-color: var(--primary-color); diff --git a/testgen/ui/views/connections.py b/testgen/ui/views/connections.py index 07a5031..33df711 100644 --- a/testgen/ui/views/connections.py +++ b/testgen/ui/views/connections.py @@ -168,7 +168,6 @@ def show_connection_form(self, selected_connection, mode, project_code): bottom_left_column, bottom_right_column = st.columns([0.25, 0.75]) button_left_column, button_right_column = st.columns([0.20, 0.80]) connection_status_wrapper = st.container() - connection_status_container = connection_status_wrapper.empty() connection_id = selected_connection["connection_id"] if mode == "edit" else None connection_name = selected_connection["connection_name"] if mode == "edit" else "" @@ -382,18 +381,28 @@ def on_connect_by_url_change(): test_connection = button_left_column.button("Test Connection") if test_connection: - connection_status_container.empty() - connection_status_container.info("Testing the connection...") - + single_element_container = connection_status_wrapper.empty() + single_element_container.info("Connecting ...") connection_status = self.test_connection(new_connection) - renderer = { - True: connection_status_container.success, - False: connection_status_container.error, - }[connection_status.successful] - - renderer(connection_status.message) - if not connection_status.successful and connection_status.details: - st.text_area("Connection Error Details", value=connection_status.details) + + with single_element_container.container(): + renderer = { + True: st.success, + False: st.error, + }[connection_status.successful] + + renderer(connection_status.message) + if not connection_status.successful and connection_status.details: + st.caption("Connection Error Details") + + with st.container(border=True): + st.markdown(connection_status.details) + else: + # This is needed to fix a strange bug in Streamlit when using dialog + input fields + button + # If an input field is changed and the button is clicked immediately (without unfocusing the input first), + # two fragment reruns happen successively, one for unfocusing the input and the other for clicking the button + # Some or all (it seems random) of the input fields disappear when this happens + time.sleep(0.1) def test_connection(self, connection: dict) -> "ConnectionStatus": if connection["connect_by_key"] and connection["connection_id"] is None: diff --git a/testgen/ui/views/overview.py b/testgen/ui/views/overview.py index 2423fef..132b66e 100644 --- a/testgen/ui/views/overview.py +++ b/testgen/ui/views/overview.py @@ -188,7 +188,7 @@ def render_test_suite_item(test_suite: pd.Series, column_spec: list[int], key: i with generation_column: if (latest_generation := test_suite["latest_auto_gen_date"]) and pd.notnull(latest_generation): - st.html(f'

{date_service.get_timezoned_timestamp(st.session_state, latest_generation)}

') + testgen.text(date_service.get_timezoned_timestamp(st.session_state, latest_generation)) else: st.markdown("--") diff --git a/testgen/ui/views/profiling_anomalies.py b/testgen/ui/views/profiling_anomalies.py index 60a6fc1..7dfbe7a 100644 --- a/testgen/ui/views/profiling_anomalies.py +++ b/testgen/ui/views/profiling_anomalies.py @@ -188,7 +188,7 @@ def render(self, run_id: str, issue_class: str | None = None, issue_type: str | ) with v_col2: if st.button( - ":green[Source Data →]", help="Review current source data for highlighted issue", use_container_width=True + "Source Data →", help="Review current source data for highlighted issue", use_container_width=True ): source_data_dialog(selected_row) diff --git a/testgen/ui/views/profiling_modal.py b/testgen/ui/views/profiling_modal.py index 5a8ea45..26f3078 100644 --- a/testgen/ui/views/profiling_modal.py +++ b/testgen/ui/views/profiling_modal.py @@ -8,7 +8,7 @@ LOG = logging.getLogger("testgen") -BUTTON_TEXT = ":green[Profiling →]" # Profiling ⚲ +BUTTON_TEXT = "Profiling →" # Profiling ⚲ BUTTON_HELP = "Review profiling for highlighted column" diff --git a/testgen/ui/views/test_definitions.py b/testgen/ui/views/test_definitions.py index 795f319..0892998 100644 --- a/testgen/ui/views/test_definitions.py +++ b/testgen/ui/views/test_definitions.py @@ -569,6 +569,12 @@ def show_test_form( bottom_right_column.success("Validation is successful.") except Exception as e: bottom_right_column.error(f"Test validation failed with error: {e}") + else: + # This is needed to fix a strange bug in Streamlit when using dialog + input fields + button + # If an input field is changed and the button is clicked immediately (without unfocusing the input first), + # two fragment reruns happen successively, one for unfocusing the input and the other for clicking the button + # Some or all (it seems random) of the input fields disappear when this happens + time.sleep(0.1) submit = bottom_left_column.button("Save", disabled=authentication_service.current_user_has_read_role()) diff --git a/testgen/ui/views/test_results.py b/testgen/ui/views/test_results.py index 2c58271..2101359 100644 --- a/testgen/ui/views/test_results.py +++ b/testgen/ui/views/test_results.py @@ -837,7 +837,7 @@ def do_disposition_update(selected, str_new_status): def view_bad_data(button_container, selected_row): with button_container: if st.button( - ":green[Source Data →]", help="Review current source data for highlighted result", use_container_width=True + "Source Data →", help="Review current source data for highlighted result", use_container_width=True ): source_data_dialog(selected_row) From 24c128dfc9c4b856358df072dbe443a6905e40ab Mon Sep 17 00:00:00 2001 From: Luis Trinidad Date: Tue, 24 Sep 2024 09:54:58 -0400 Subject: [PATCH 73/78] refactor(ui): replace grid with cards in table group list --- testgen/ui/views/table_groups.py | 382 ++++++++++++++----------------- 1 file changed, 172 insertions(+), 210 deletions(-) diff --git a/testgen/ui/views/table_groups.py b/testgen/ui/views/table_groups.py index 418acb0..1f82de5 100644 --- a/testgen/ui/views/table_groups.py +++ b/testgen/ui/views/table_groups.py @@ -1,5 +1,6 @@ import time import typing +from functools import partial import pandas as pd import streamlit as st @@ -50,149 +51,151 @@ def render(self, connection_id: str, **_kwargs) -> None: df = table_group_service.get_by_connection(project_code, connection_id) - show_columns = [ - "table_groups_name", - "table_group_schema", - "profiling_include_mask", - "profiling_exclude_mask", - "profiling_table_set", - "profile_use_sampling", - "profiling_delay_days", - ] - - show_column_headers = [ - "Table Groups Name", - "DB Schema", - "Tables to Include Mask", - "Tables to Exclude Mask", - "Explicit Table List", - "Uses Record Sampling", - "Min Profiling Age (Days)", - ] - - selected = fm.render_grid_select(df, show_columns, show_column_headers=show_column_headers) - - if actions_column.button( - ":material/add: Add", help="Add a new Table Group" - ): - add_table_group_dialog(project_code, connection) - - disable_buttons = selected is None - if actions_column.button( - ":material/edit: Edit", help="Edit the selected Table Group", disabled=disable_buttons - ): - edit_table_group_dialog(project_code, connection, selected) - - if actions_column.button( - ":material/delete: Delete", help="Delete the selected Table Group", disabled=disable_buttons - ): - delete_table_group_dialog(selected) - - if actions_column.button( - f":{'gray' if disable_buttons else 'green'}[Test Suites →]", - help="Create or edit Test Suites for the selected Table Group", - disabled=disable_buttons, - ): - self.router.navigate( - "test-suites", - {"table_group_id": selected[0]["id"]}, - ) + for _, table_group in df.iterrows(): + with testgen.card(title=table_group["table_groups_name"]) as table_group_card: + with table_group_card.actions: + testgen.button( + type_="icon", + icon="edit", + tooltip="Edit table group", + tooltip_position="right", + on_click=partial(self.edit_table_group_dialog, project_code, connection, table_group), + key=f"tablegroups:keys:edit:{table_group['id']}", + ) + testgen.button( + type_="icon", + icon="delete", + tooltip="Delete table group", + tooltip_position="right", + on_click=partial(self.delete_table_group_dialog, table_group), + key=f"tablegroups:keys:delete:{table_group['id']}", + ) - if not selected: - st.markdown(":orange[Select a row to see Table Group details.]") - else: - show_record_detail(selected[0]) + main_section, actions_section = st.columns([.8, .2]) + + with main_section: + testgen.link( + label="Test Suites", + href="test-suites", + params={"table_group_id": table_group["id"]}, + right_icon="chevron_right", + key=f"tablegroups:keys:go-to-tsuites:{table_group['id']}", + ) + col1, col2, col3 = st.columns([1/3] * 3, vertical_alignment="bottom") + col4, col5, col6 = st.columns([1/3] * 3, vertical_alignment="bottom") + + with col1: + testgen.no_flex_gap() + testgen.caption("DB Schema") + st.markdown(table_group["table_group_schema"] or "--") + with col2: + testgen.no_flex_gap() + testgen.caption("Tables to Include Mask") + st.markdown(table_group["profiling_include_mask"] or "--") + with col3: + testgen.no_flex_gap() + testgen.caption("Tables to Exclude Mask") + st.markdown(table_group["profiling_exclude_mask"] or "--") + with col4: + testgen.no_flex_gap() + testgen.caption("Explicit Table List") + st.markdown(table_group["profiling_table_set"] or "--") + with col5: + testgen.no_flex_gap() + testgen.caption("Uses Record Sampling") + st.markdown(table_group["profile_use_sampling"] or "N") + with col6: + testgen.no_flex_gap() + testgen.caption("Min Profiling Age (Days)") + st.markdown(table_group["profiling_delay_days"] or "0") + + with actions_section: + testgen.button( + type_="stroked", + label="Run Profiling", + on_click=partial(run_profiling_dialog, table_group), + key=f"tablegroups:keys:runprofiling:{table_group['id']}", + ) + + actions_column.button( + ":material/add: Add Table Group", + help="Add a new Table Group", + on_click=partial(self.add_table_group_dialog, project_code, connection) + ) -def show_record_detail(selected): - left_column, right_column = st.columns([0.5, 0.5]) + @st.dialog(title="Add Table Group") + def add_table_group_dialog(self, project_code, connection): + show_table_group_form("add", project_code, connection) + + @st.dialog(title="Edit Table Group") + def edit_table_group_dialog(self, project_code: str, connection: dict, table_group: pd.Series): + show_table_group_form("edit", project_code, connection, table_group) + + @st.dialog(title="Delete Table Group") + def delete_table_group_dialog(self, table_group: pd.Series): + table_group_name = table_group["table_groups_name"] + can_be_deleted = table_group_service.cascade_delete([table_group_name], dry_run=True) - with left_column: fm.render_html_list( - selected, - lst_columns=[ + table_group, + [ "id", - "project_code", "table_groups_name", "table_group_schema", - "profiling_include_mask", - "profiling_exclude_mask", - "profiling_table_set", - "profile_id_column_mask", - "profile_sk_column_mask", - - "data_source", - "source_system", - "data_location", - "business_domain", - "transform_level", - "source_process", - "stakeholder_group", - - "profile_use_sampling", - "profile_sample_percent", - "profile_sample_min_count", - "profiling_delay_days", ], - str_section_header="Table Group Information", + "Table Group Information", int_data_width=700, - lst_labels=[ - "id", - "Project", - "Table Groups Name", - "Database Schema", - "Tables to Include Mask", - "Tables to Exlude Mask", - "Explicit Table List", - "ID Column Mask", - "Surrogate Key Column Mask", - - "Data Source", - "Source System", - "Data Location", - "Business Domain", - "Transform Level", - "Source Process", - "Stakeholder Group", - - "Uses Record Sampling", - "Sample Record Percent", - "Sample Minimum Record Count", - "Minimum Profiling Age (Days)", - ], ) - with right_column: - st.write("

", unsafe_allow_html=True) - _, button_column = st.columns([0.3, 0.7]) - with button_column: - if st.button("Run Profiling", help="Performs profiling on the Table Group", use_container_width=True): - run_profiling_dialog(selected) - if st.button( - "Show Run Profile CLI Command", help="Shows the run-profile CLI command", use_container_width=True - ): - run_profiling_cli_dialog(selected) + if not can_be_deleted: + st.markdown( + ":orange[This Table Group has related data, which may include profiling, test definitions and test results. If you proceed, all related data will be permanently deleted.
Are you sure you want to proceed?]", + unsafe_allow_html=True, + ) + accept_cascade_delete = st.toggle("I accept deletion of this Table Group and all related TestGen data.") + + with st.form("Delete Table Group", clear_on_submit=True): + disable_delete_button = authentication_service.current_user_has_read_role() or ( + not can_be_deleted and not accept_cascade_delete + ) + delete = st.form_submit_button("Delete", disabled=disable_delete_button, type="primary") + + if delete: + if table_group_service.are_table_groups_in_use([table_group_name]): + st.error("This Table Group is in use by a running process and cannot be deleted.") + else: + table_group_service.cascade_delete([table_group_name]) + success_message = f"Table Group {table_group_name} has been deleted. " + st.success(success_message) + time.sleep(1) + st.rerun() @st.dialog(title="Run Profiling") -def run_profiling_dialog(selected_table_group): - container = st.empty() - with container: +def run_profiling_dialog(table_group: pd.Series) -> None: + table_group_id = table_group["id"] + + with st.container(): st.markdown( - ":green[Execute Profile for the Table Group (since can take time, it is performed in background)]" + f"Execute profiling for the Table Group :green[{table_group['table_groups_name']}]?" + " Profiling will be performed in a background process" ) + if testgen.expander_toggle(expand_label="Show CLI command", key="test_suite:keys:run-tests-show-cli"): + st.code(f"testgen run-profile --table-group-id {table_group_id}", language="shellSession") + button_container = st.empty() status_container = st.empty() with button_container: - start_process_button_message = "Start" - profile_button = st.button(start_process_button_message) + _, button_column = st.columns([.85, .15]) + with button_column: + profile_button = st.button("Start", use_container_width=True) if profile_button: button_container.empty() - table_group_id = selected_table_group["id"] status_container.info("Executing Profiling...") try: @@ -207,80 +210,40 @@ def run_profiling_dialog(selected_table_group): ) -@st.dialog(title="Run Profiling CLI Command") -def run_profiling_cli_dialog(selected_table_group): - table_group_id = selected_table_group["id"] - profile_command = f"testgen run-profile --table-group-id {table_group_id}" - st.code(profile_command, language="shellSession") - - -@st.dialog(title="Delete Table Group") -def delete_table_group_dialog(selected): - selected_table_group = selected[0] - table_group_name = selected_table_group["table_groups_name"] - can_be_deleted = table_group_service.cascade_delete([table_group_name], dry_run=True) - - fm.render_html_list( - selected_table_group, - [ - "id", - "table_groups_name", - "table_group_schema", - ], - "Table Group Information", - int_data_width=700, - ) - - if not can_be_deleted: - st.markdown( - ":orange[This Table Group has related data, which may include profiling, test definitions and test results. If you proceed, all related data will be permanently deleted.
Are you sure you want to proceed?]", - unsafe_allow_html=True, - ) - accept_cascade_delete = st.toggle("I accept deletion of this Table Group and all related TestGen data.") - - with st.form("Delete Table Group", clear_on_submit=True): - disable_delete_button = authentication_service.current_user_has_read_role() or ( - not can_be_deleted and not accept_cascade_delete - ) - delete = st.form_submit_button("Delete", disabled=disable_delete_button, type="primary") - - if delete: - if table_group_service.are_table_groups_in_use([table_group_name]): - st.error("This Table Group is in use by a running process and cannot be deleted.") - else: - table_group_service.cascade_delete([table_group_name]) - success_message = f"Table Group {table_group_name} has been deleted. " - st.success(success_message) - time.sleep(1) - st.rerun() - - -def show_table_group_form(mode, project_code, connection, selected=None): +def show_table_group_form(mode, project_code: str, connection: dict, table_group: pd.Series | None = None): connection_id = connection["connection_id"] table_groups_settings_tab, table_groups_preview_tab = st.tabs(["Table Group Settings", "Test"]) + table_group_id = None + table_groups_name = "" + table_group_schema = "" + profiling_table_set = "" + profiling_include_mask = "%" + profiling_exclude_mask = "tmp%" + profile_id_column_mask = "%_id" + profile_sk_column_mask = "%_sk" + profile_use_sampling = False + profile_sample_percent = 30 + profile_sample_min_count = 15000 + profiling_delay_days = 0 + with table_groups_settings_tab: - selected_table_group = selected[0] if mode == "edit" else None - - # establish default values - table_group_id = selected_table_group["id"] if mode == "edit" else None - table_groups_name = selected_table_group["table_groups_name"] if mode == "edit" else "" - table_group_schema = selected_table_group["table_group_schema"] if mode == "edit" else "" - profiling_table_set = ( - selected_table_group["profiling_table_set"] - if mode == "edit" and selected_table_group["profiling_table_set"] - else "" - ) - profiling_include_mask = selected_table_group["profiling_include_mask"] if mode == "edit" else "%" - profiling_exclude_mask = selected_table_group["profiling_exclude_mask"] if mode == "edit" else "tmp%" - profile_id_column_mask = selected_table_group["profile_id_column_mask"] if mode == "edit" else "%_id" - profile_sk_column_mask = selected_table_group["profile_sk_column_mask"] if mode == "edit" else "%_sk" - profile_use_sampling = selected_table_group["profile_use_sampling"] == "Y" if mode == "edit" else False - profile_sample_percent = int(selected_table_group["profile_sample_percent"]) if mode == "edit" else 30 - profile_sample_min_count = ( - int(selected_table_group["profile_sample_min_count"]) if mode == "edit" else 15000 - ) - profiling_delay_days = int(selected_table_group["profiling_delay_days"]) if mode == "edit" else 0 + selected_table_group = table_group if mode == "edit" else None + + if selected_table_group is not None: + # establish default values + table_group_id = selected_table_group["id"] + table_groups_name = selected_table_group["table_groups_name"] + table_group_schema = selected_table_group["table_group_schema"] + profiling_table_set = selected_table_group["profiling_table_set"] + profiling_include_mask = selected_table_group["profiling_include_mask"] + profiling_exclude_mask = selected_table_group["profiling_exclude_mask"] + profile_id_column_mask = selected_table_group["profile_id_column_mask"] + profile_sk_column_mask = selected_table_group["profile_sk_column_mask"] + profile_use_sampling = selected_table_group["profile_use_sampling"] == "Y" + profile_sample_percent = int(selected_table_group["profile_sample_percent"]) + profile_sample_min_count = int(selected_table_group["profile_sample_min_count"]) + profiling_delay_days = int(selected_table_group["profiling_delay_days"]) left_column, right_column = st.columns([0.50, 0.50]) @@ -292,7 +255,7 @@ def show_table_group_form(mode, project_code, connection, selected=None): with provenance_expander: provenance_left_column, provenance_right_column = st.columns([0.50, 0.50]) - with st.form("Table Group Add / Edit", clear_on_submit=True): + with st.form("Table Group Add / Edit", clear_on_submit=True, border=False): entity = { "id": table_group_id, "project_code": project_code, @@ -370,58 +333,67 @@ def show_table_group_form(mode, project_code, connection, selected=None): "data_source": provenance_left_column.text_input( label="Data Source", max_chars=40, - value=empty_if_null(selected_table_group["data_source"]) if mode == "edit" else "", + value=empty_if_null(selected_table_group["data_source"]) + if mode == "edit" and selected_table_group is not None else "", help="Original source of all tables in this dataset. This can be overridden at the table level. (Optional)", ), "source_system": provenance_left_column.text_input( label="System of Origin", max_chars=40, - value=empty_if_null(selected_table_group["source_system"]) if mode == "edit" else "", + value=empty_if_null(selected_table_group["source_system"]) + if mode == "edit" and selected_table_group is not None else "", help="Enterprise system source for all tables in this dataset. " "This can be overridden at the table level. (Optional)", ), "business_domain": provenance_left_column.text_input( label="Business Domain", max_chars=40, - value=empty_if_null(selected_table_group["business_domain"]) if mode == "edit" else "", + value=empty_if_null(selected_table_group["business_domain"]) + if mode == "edit" and selected_table_group is not None else "", help="Business division responsible for all tables in this dataset. " "e.g. Finance, Sales, Manufacturing. (Optional)", ), "data_location": provenance_left_column.text_input( label="Location", max_chars=40, - value=empty_if_null(selected_table_group["data_location"]) if mode == "edit" else "", + value=empty_if_null(selected_table_group["data_location"]) + if mode == "edit" and selected_table_group is not None else "", help="Physical or virtual location of all tables in this dataset. " "e.g. Headquarters, Cloud, etc. (Optional)", ), "transform_level": provenance_right_column.text_input( label="Transform Level", max_chars=40, - value=empty_if_null(selected_table_group["transform_level"]) if mode == "edit" else "", + value=empty_if_null(selected_table_group["transform_level"]) + if mode == "edit" and selected_table_group is not None else "", help="Data warehouse processing layer. " "Indicates the processing stage: e.g. Raw, Conformed, Processed, Reporting. (Optional)", ), "source_process": provenance_right_column.text_input( label="Source Process", max_chars=40, - value=empty_if_null(selected_table_group["source_process"]) if mode == "edit" else "", + value=empty_if_null(selected_table_group["source_process"]) + if mode == "edit" and selected_table_group is not None else "", help="The process, program or data flow that produced this data. (Optional)", ), "stakeholder_group": provenance_right_column.text_input( label="Stakeholder Group", max_chars=40, - value=empty_if_null(selected_table_group["stakeholder_group"]) if mode == "edit" else "", + value=empty_if_null(selected_table_group["stakeholder_group"]) + if mode == "edit" and selected_table_group is not None else "", help="Designator for data owners or stakeholders who are responsible for this data. (Optional)", ), } - submit_button_text = "Save" if mode == "edit" else "Add" - submit = st.form_submit_button( - submit_button_text, disabled=authentication_service.current_user_has_read_role() - ) + _, button_column = st.columns([.85, .15]) + with button_column: + submit = st.form_submit_button( + "Save" if mode == "edit" else "Add", + use_container_width=True, + disabled=authentication_service.current_user_has_read_role(), + ) if submit: - if not entity["table_groups_name"]: st.error("'Name' is required. ") return @@ -495,13 +467,3 @@ def show_test_results(schemas, tables, columns, qc_results): tables_df = pd.DataFrame({"[tables]": list(tables)}) fm.render_grid_select(tables_df, ["[tables]"]) - - -@st.dialog(title="Add Table Group") -def add_table_group_dialog(project_code, connection): - show_table_group_form("add", project_code, connection) - - -@st.dialog(title="Edit Table Group") -def edit_table_group_dialog(project_code, connection, selected): - show_table_group_form("edit", project_code, connection, selected) From c64736f49fb6580e9ab2e5dc670206231457d41c Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Mon, 30 Sep 2024 12:33:12 -0400 Subject: [PATCH 74/78] fix(ui): reduce pagination size on run pages --- testgen/ui/views/profiling_summary.py | 2 +- testgen/ui/views/test_runs.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/testgen/ui/views/profiling_summary.py b/testgen/ui/views/profiling_summary.py index c49921e..390aca5 100644 --- a/testgen/ui/views/profiling_summary.py +++ b/testgen/ui/views/profiling_summary.py @@ -17,7 +17,7 @@ from testgen.utils import to_int FORM_DATA_WIDTH = 400 -PAGE_SIZE = 50 +PAGE_SIZE = 10 class DataProfilingPage(Page): diff --git a/testgen/ui/views/test_runs.py b/testgen/ui/views/test_runs.py index 0c26007..af0d3ba 100644 --- a/testgen/ui/views/test_runs.py +++ b/testgen/ui/views/test_runs.py @@ -16,7 +16,7 @@ from testgen.ui.session import session from testgen.utils import to_int -PAGE_SIZE = 50 +PAGE_SIZE = 10 class TestRunsPage(Page): From b213ea301a16f0a2d5fbd84952cb07d0694c795a Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Mon, 30 Sep 2024 13:20:15 -0400 Subject: [PATCH 75/78] refactor(components): make summary_bar a python-only component --- testgen/ui/assets/style.css | 26 +++++++++ testgen/ui/components/frontend/js/main.js | 2 - testgen/ui/components/widgets/summary_bar.py | 55 +++++++++++++++----- testgen/ui/queries/test_suite_queries.py | 2 +- testgen/ui/views/overview.py | 13 ++--- testgen/ui/views/profiling_anomalies.py | 2 - testgen/ui/views/profiling_summary.py | 1 - testgen/ui/views/test_results.py | 2 +- testgen/ui/views/test_runs.py | 1 - testgen/ui/views/test_suites.py | 1 - 10 files changed, 76 insertions(+), 29 deletions(-) diff --git a/testgen/ui/assets/style.css b/testgen/ui/assets/style.css index e183bf9..1a1d86f 100644 --- a/testgen/ui/assets/style.css +++ b/testgen/ui/assets/style.css @@ -241,6 +241,32 @@ Use as testgen.text("text", "extra_styles") */ } /* */ +/* Summary bar component */ +.tg-summary-bar--label { + margin-bottom: 4px; +} + +.tg-summary-bar { + height: 100%; + display: flex; + flex-flow: row nowrap; + align-items: flex-start; + justify-content: flex-start; + border-radius: 4px; + overflow: hidden; +} + +.tg-summary-bar--item { + height: 100%; +} + +.tg-summary-bar--caption { + margin-top: 4px; + color: var(--caption-text-color); + font-style: italic; +} +/* */ + /* Dark mode */ @media (prefers-color-scheme: dark) { body { diff --git a/testgen/ui/components/frontend/js/main.js b/testgen/ui/components/frontend/js/main.js index bf8bc4b..ece2e49 100644 --- a/testgen/ui/components/frontend/js/main.js +++ b/testgen/ui/components/frontend/js/main.js @@ -13,7 +13,6 @@ import { ExpanderToggle } from './components/expander_toggle.js'; import { Link } from './components/link.js'; import { Paginator } from './components/paginator.js'; import { Select } from './components/select.js' -import { SummaryBar } from './components/summary_bar.js'; import { SortingSelector } from './components/sorting_selector.js'; let currentWindowVan = van; @@ -29,7 +28,6 @@ const TestGenComponent = (/** @type {string} */ id, /** @type {object} */ props) select: Select, sorting_selector: SortingSelector, sidebar: window.top.testgen.components.Sidebar, - summary_bar: SummaryBar, }; if (Object.keys(componentById).includes(id)) { diff --git a/testgen/ui/components/widgets/summary_bar.py b/testgen/ui/components/widgets/summary_bar.py index fe1576a..c4b636d 100644 --- a/testgen/ui/components/widgets/summary_bar.py +++ b/testgen/ui/components/widgets/summary_bar.py @@ -1,17 +1,23 @@ -import logging import typing -from testgen.ui.components.utils.component import component - -LOG = logging.getLogger("testgen") +import streamlit as st +COLOR_MAP = { + "red": "#EF5350", + "orange": "#FF9800", + "yellow": "#FDD835", + "green": "#9CCC65", + "purple": "#AB47BC", + "blue": "#42A5F5", + "brown": "#8D6E63", + "grey": "#BDBDBD", +} def summary_bar( items: list["SummaryItem"], label: str | None = None, - height: int | None = None, + height: int = 24, width: int | None = None, - key: str = "testgen:summary_bar", ) -> None: """ Testgen component to display a summary status bar. @@ -23,12 +29,37 @@ def summary_bar( :param key: unique key to give the component a persisting state """ - component( - id_="summary_bar", - key=key, - default={}, - props={"items": items, "label": label, "height": height, "width": width}, - ) + label_div = "" + item_spans = "" + caption_div = "" + + if label: + label_div = f""" +
+ {label} +
+ """ + + total = sum(item["value"] for item in items) + if total: + item_spans = "".join([ f'' for item in items ]) + + caption = ", ".join([ f"{item['label']}: {item['value']}" for item in items ]) + caption_div = f""" +
+ {caption} +
+ """ + + st.html(f""" +
+ {label_div} +
+ {item_spans} +
+ {caption_div} +
+ """) class SummaryItem(typing.TypedDict): diff --git a/testgen/ui/queries/test_suite_queries.py b/testgen/ui/queries/test_suite_queries.py index 8885545..80a3fcc 100644 --- a/testgen/ui/queries/test_suite_queries.py +++ b/testgen/ui/queries/test_suite_queries.py @@ -23,7 +23,7 @@ def get_by_project(schema, project_code, table_group_id=None): SELECT test_runs.test_suite_id, test_runs.id, test_runs.test_starttime, - COUNT(*) as test_ct, + test_runs.test_ct, SUM( CASE WHEN COALESCE(test_results.disposition, 'Confirmed') = 'Confirmed' diff --git a/testgen/ui/views/overview.py b/testgen/ui/views/overview.py index 132b66e..9a546ea 100644 --- a/testgen/ui/views/overview.py +++ b/testgen/ui/views/overview.py @@ -119,7 +119,6 @@ def render_table_group_card(table_group: pd.Series, project_code: str, key: int) { "label": "Possible", "value": to_int(table_group["latest_anomalies_possible_ct"]), "color": "yellow" }, { "label": "Dismissed", "value": to_int(table_group["latest_anomalies_dismissed_ct"]), "color": "grey" }, ], - key=f"anomalies_{key}", height=12, width=280, ) @@ -146,7 +145,6 @@ def render_table_group_card(table_group: pd.Series, project_code: str, key: int) { "label": "Error", "value": to_int(table_group["latest_tests_error_ct"]), "color": "brown" }, { "label": "Dismissed", "value": to_int(table_group["latest_tests_dismissed_ct"]), "color": "grey" }, ], - key=f"tests_{key}", height=12, width=350, ) @@ -154,10 +152,10 @@ def render_table_group_card(table_group: pd.Series, project_code: str, key: int) st.markdown("--") if expand_toggle: - render_table_group_expanded(table_group["id"], project_code, key) + render_table_group_expanded(table_group["id"], project_code) -def render_table_group_expanded(table_group_id: str, project_code: str, key: int) -> None: +def render_table_group_expanded(table_group_id: str, project_code: str) -> None: testgen.divider(8, 12) column_spec = [0.25, 0.15, 0.15, 0.5] @@ -170,11 +168,11 @@ def render_table_group_expanded(table_group_id: str, project_code: str, key: int test_suites_df: pd.DataFrame = test_suite_service.get_by_project(project_code, table_group_id) - for index, suite in test_suites_df.iterrows(): - render_test_suite_item(suite, column_spec, f"{key}_{index}") + for suite in test_suites_df: + render_test_suite_item(suite, column_spec) -def render_test_suite_item(test_suite: pd.Series, column_spec: list[int], key: int) -> None: +def render_test_suite_item(test_suite: pd.Series, column_spec: list[int]) -> None: suite_column, generation_column, run_column, results_column = st.columns(column_spec) with suite_column: testgen.no_flex_gap() @@ -213,7 +211,6 @@ def render_test_suite_item(test_suite: pd.Series, column_spec: list[int], key: i { "label": "Error", "value": to_int(test_suite["last_run_error_ct"]), "color": "brown" }, { "label": "Dismissed", "value": to_int(test_suite["last_run_dismissed_ct"]), "color": "grey" }, ], - key=f"tests_{key}", height=8, width=200, ) diff --git a/testgen/ui/views/profiling_anomalies.py b/testgen/ui/views/profiling_anomalies.py index 7dfbe7a..29dc430 100644 --- a/testgen/ui/views/profiling_anomalies.py +++ b/testgen/ui/views/profiling_anomalies.py @@ -104,7 +104,6 @@ def render(self, run_id: str, issue_class: str | None = None, issue_type: str | testgen.summary_bar( items=others_summary, label="Hygiene Issues", - key="test_results_summary:others", height=40, width=400, ) @@ -115,7 +114,6 @@ def render(self, run_id: str, issue_class: str | None = None, issue_type: str | testgen.summary_bar( items=anomalies_pii_summary, label="Potential PII", - key="test_results_summary:pii", height=40, width=400, ) diff --git a/testgen/ui/views/profiling_summary.py b/testgen/ui/views/profiling_summary.py index 390aca5..1a0beb0 100644 --- a/testgen/ui/views/profiling_summary.py +++ b/testgen/ui/views/profiling_summary.py @@ -151,7 +151,6 @@ def render_profiling_run_row(profiling_run: pd.Series, column_spec: list[int]) - ], height=10, width=280, - key=f"test_run:keys:summary:{profiling_run_id}", ) testgen.link( label=f"View {anomaly_count} issues", diff --git a/testgen/ui/views/test_results.py b/testgen/ui/views/test_results.py index 592df08..a521c05 100644 --- a/testgen/ui/views/test_results.py +++ b/testgen/ui/views/test_results.py @@ -51,7 +51,7 @@ def render(self, run_id: str, status: str | None = None, test_type: str | None = # Display summary bar tests_summary = get_test_result_summary(run_id) - testgen.summary_bar(items=tests_summary, key="test_results", height=40, width=800) + testgen.summary_bar(items=tests_summary, height=40, width=800) # Setup Toolbar status_filter_column, test_type_filter_column, sort_column, actions_column, export_button_column = st.columns( diff --git a/testgen/ui/views/test_runs.py b/testgen/ui/views/test_runs.py index af0d3ba..6aa358e 100644 --- a/testgen/ui/views/test_runs.py +++ b/testgen/ui/views/test_runs.py @@ -151,7 +151,6 @@ def render_test_run_row(test_run: pd.Series, column_spec: list[int]) -> None: ], height=10, width=300, - key=f"test_run:keys:summary:{test_run_id}", ) else: st.markdown("--") diff --git a/testgen/ui/views/test_suites.py b/testgen/ui/views/test_suites.py index 25d8cdc..780a2ed 100644 --- a/testgen/ui/views/test_suites.py +++ b/testgen/ui/views/test_suites.py @@ -131,7 +131,6 @@ def render(self, project_code: str | None = None, table_group_id: str | None = N ], height=20, width=350, - key=f"test_suite:keys:run-rummary:{test_suite['id']}", ) else: st.markdown("--") From c57853400e3531b7d45705775c620521fd3151cd Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Mon, 30 Sep 2024 16:58:22 -0400 Subject: [PATCH 76/78] fix(overview): fix broken expanded cards --- testgen/ui/views/overview.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testgen/ui/views/overview.py b/testgen/ui/views/overview.py index 9a546ea..727d643 100644 --- a/testgen/ui/views/overview.py +++ b/testgen/ui/views/overview.py @@ -168,7 +168,7 @@ def render_table_group_expanded(table_group_id: str, project_code: str) -> None: test_suites_df: pd.DataFrame = test_suite_service.get_by_project(project_code, table_group_id) - for suite in test_suites_df: + for _, suite in test_suites_df.iterrows(): render_test_suite_item(suite, column_spec) From c73de8909cdd390da53b6b2e83d39ec23b1d0e93 Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Fri, 11 Oct 2024 16:20:07 -0400 Subject: [PATCH 77/78] fix(ui): fix duration error when runs are in progress --- testgen/common/date_service.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/testgen/common/date_service.py b/testgen/common/date_service.py index 28e4b06..e5e89c1 100644 --- a/testgen/common/date_service.py +++ b/testgen/common/date_service.py @@ -68,7 +68,10 @@ def get_timezoned_now(streamlit_session): return get_timezoned_timestamp(streamlit_session, value) -def get_formatted_duration(duration: str) -> str: +def get_formatted_duration(duration: str | None) -> str: + if not duration: + return "--" + hour, minute, second = duration.split(":") formatted = "" if int(hour): From d5603a8675868026d2ca38a1a0ea7fdf03fe9c52 Mon Sep 17 00:00:00 2001 From: Aarthy Adityan Date: Tue, 8 Oct 2024 23:02:00 -0400 Subject: [PATCH 78/78] fix(upgrade): add where clause in sql update statements with joins --- testgen/template/dbupgrade/0108_incremental_upgrade.sql | 3 ++- testgen/template/dbupgrade/0109_incremental_upgrade.sql | 6 ++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/testgen/template/dbupgrade/0108_incremental_upgrade.sql b/testgen/template/dbupgrade/0108_incremental_upgrade.sql index df8b7cb..a7f4980 100644 --- a/testgen/template/dbupgrade/0108_incremental_upgrade.sql +++ b/testgen/template/dbupgrade/0108_incremental_upgrade.sql @@ -16,7 +16,8 @@ DROP INDEX ix_td_pc_stc_tst; SET test_suite_id = ts.id FROM test_definitions td INNER JOIN test_suites AS ts ON td.test_suite = ts.test_suite AND td.project_code = ts.project_code - WHERE td.test_suite_id is NULL; + WHERE td.test_suite_id is NULL + AND test_definitions.id = td.id; ALTER TABLE test_definitions ALTER COLUMN test_suite_id SET NOT NULL; diff --git a/testgen/template/dbupgrade/0109_incremental_upgrade.sql b/testgen/template/dbupgrade/0109_incremental_upgrade.sql index 028dcc4..ceb3304 100644 --- a/testgen/template/dbupgrade/0109_incremental_upgrade.sql +++ b/testgen/template/dbupgrade/0109_incremental_upgrade.sql @@ -18,7 +18,8 @@ ALTER TABLE test_runs ADD COLUMN test_suite_id UUID; UPDATE test_runs SET test_suite_id = ts.id FROM test_runs tr -INNER JOIN test_suites AS ts ON tr.test_suite = ts.test_suite AND tr.project_code = ts.project_code; +INNER JOIN test_suites AS ts ON tr.test_suite = ts.test_suite AND tr.project_code = ts.project_code + WHERE test_runs.id = tr.id; ALTER TABLE test_runs ALTER COLUMN test_suite_id SET NOT NULL; @@ -27,7 +28,8 @@ ALTER TABLE test_runs ALTER COLUMN test_suite_id SET NOT NULL; SET test_suite_id = ts.id FROM test_results tr INNER JOIN test_suites AS ts ON tr.test_suite = ts.test_suite AND tr.project_code = ts.project_code - WHERE tr.test_suite_id is NULL; + WHERE tr.test_suite_id is NULL + AND test_results.id = tr.id; ALTER TABLE test_results ALTER COLUMN test_suite_id SET NOT NULL; ALTER TABLE test_results ALTER COLUMN test_run_id SET NOT NULL;