Skip to content

Commit

Permalink
fix: remove unused parameters and apply cache when loading widget
Browse files Browse the repository at this point in the history
Signed-off-by: seolmin <[email protected]>
  • Loading branch information
stat-kwon committed Dec 12, 2024
1 parent 934c636 commit cbbde2e
Show file tree
Hide file tree
Showing 6 changed files with 40 additions and 217 deletions.
4 changes: 0 additions & 4 deletions src/spaceone/dashboard/error/data_table.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,6 @@ class ERROR_QUERY_OPTION(ERROR_INVALID_ARGUMENT):
_message = "Query option is invalid. (key = {key})"


class ERROR_NOT_SUPPORTED_QUERY_OPTION(ERROR_INVALID_ARGUMENT):
_message = "Query option is not supported. (key = {key})"


class ERROR_NOT_SUPPORTED_OPERATOR(ERROR_INVALID_ARGUMENT):
_message = "Data table does not support operator. (operator = {operator})"

Expand Down
225 changes: 24 additions & 201 deletions src/spaceone/dashboard/manager/data_table_manager/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,13 @@
from jinja2 import Environment, meta
import pandas as pd

from spaceone.core import cache
from spaceone.core.manager import BaseManager
from spaceone.dashboard.error.data_table import (
ERROR_REQUIRED_PARAMETER,
)
from spaceone.dashboard.error.data_table import (
ERROR_QUERY_OPTION,
ERROR_NOT_SUPPORTED_QUERY_OPTION,
ERROR_INVALID_PARAMETER,
ERROR_NO_FIELDS_TO_GLOBAL_VARIABLES,
ERROR_NOT_GLOBAL_VARIABLE_KEY,
)
Expand All @@ -24,6 +23,9 @@
class DataTableManager(BaseManager):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.widget_id = None
self.domain_id = None

self.df: Union[pd.DataFrame, None] = None
self.jinja_variables = None
self.state = None
Expand All @@ -46,44 +48,33 @@ def load_from_widget(self, query: dict, vars: dict = None) -> dict:
granularity = query["granularity"]
start = query["start"]
end = query["end"]
group_by = query.get("group_by")
filter = query.get("filter")
fields = query.get("fields")
field_group = query.get("field_group")
sort = query.get("sort")
page = query.get("page")

self.load(
granularity,
start,
end,
vars=vars,
)

if filter:
self.apply_filter(filter)

if fields:
self.apply_group_by(fields, group_by)

if field_group:
self.apply_field_group(field_group, fields)
if cache_data := cache.get(
f"dashboard:Widget:load:{granularity}:{start}:{end}:{vars}:{self.widget_id}:{self.domain_id}"
):
self.df = pd.DataFrame(cache_data)

if sort:
changed_sort = []
for condition in sort:
key = condition.get("key")
desc = condition.get("desc", False)

if key in fields:
changed_sort.append({"key": f"_total_{key}", "desc": desc})
else:
changed_sort.append(condition)

sort = changed_sort
else:
self.load(
granularity,
start,
end,
vars=vars,
)

return self.response(sort, page)

def make_cache_data(self, granularity, start, end, vars) -> None:
cache_key = f"dashboard:Widget:load:{granularity}:{start}:{end}:{vars}:{self.widget_id}:{self.domain_id}"
if not cache.get(cache_key):
cache.set(
cache_key,
self.df.to_dict(orient="records"),
expire=1800,
)

@staticmethod
def _check_query(query: dict) -> None:
if "granularity" not in query:
Expand All @@ -95,15 +86,6 @@ def _check_query(query: dict) -> None:
if "end" not in query:
raise ERROR_REQUIRED_PARAMETER(key="query.end")

if "fields" not in query:
raise ERROR_REQUIRED_PARAMETER(key="query.fields")

if "select" in query:
raise ERROR_NOT_SUPPORTED_QUERY_OPTION(key="query.select")

if "filter_or" in query:
raise ERROR_NOT_SUPPORTED_QUERY_OPTION(key="query.filter_or")

def response(self, sort: list = None, page: dict = None) -> dict:
total_count = len(self.df)

Expand All @@ -121,165 +103,6 @@ def response(self, sort: list = None, page: dict = None) -> dict:
"total_count": total_count,
}

def apply_filter(self, filter: list) -> None:
if len(self.df) > 0:
for condition in filter:
key = condition.get("key", condition.get("k"))
operator = condition.get("operator", condition.get("o"))
value = condition.get("value", condition.get("v"))

if operator in ["in", "not in"]:
if not isinstance(value, list):
raise ERROR_QUERY_OPTION(key="filter")

if key and operator and value:
try:
if operator == "in":
self.df = self.df[self.df[key].isin(value)]
elif operator == "not_in":
self.df = self.df[~self.df[key].isin(value)]
elif operator == "eq":
if isinstance(value, int) or isinstance(value, float):
self.df = self.df.query(f"{key} == {value}")
else:
self.df = self.df.query(f"{key} == '{value}'")
elif operator == "not":
if isinstance(value, int) or isinstance(value, float):
self.df = self.df.query(f"{key} != {value}")
else:
self.df = self.df.query(f"{key} != '{value}'")
elif operator == "gt":
if isinstance(value, int) or isinstance(value, float):
self.df = self.df.query(f"{key} > {value}")
else:
self.df = self.df.query(f"{key} > '{value}'")
elif operator == "gte":
if isinstance(value, int) or isinstance(value, float):
self.df = self.df.query(f"{key} >= {value}")
else:
self.df = self.df.query(f"{key} >= '{value}'")
elif operator == "lt":
if isinstance(value, int) or isinstance(value, float):
self.df = self.df.query(f"{key} < {value}")
else:
self.df = self.df.query(f"{key} < '{value}'")
elif operator == "lte":
if isinstance(value, int) or isinstance(value, float):
self.df = self.df.query(f"{key} <= {value}")
else:
self.df = self.df.query(f"{key} <= '{value}'")
elif operator == "contain":
self.df = self.df[self.df[key].str.contains(str(value))]
elif operator == "not_contain":
self.df = self.df[~self.df[key].str.contains(str(value))]
else:
raise ERROR_NOT_SUPPORTED_QUERY_OPTION(
key=f"filter.operator.{operator}"
)
except Exception as e:
raise ERROR_QUERY_OPTION(key="filter")
else:
raise ERROR_QUERY_OPTION(key="filter")

def apply_group_by(self, fields: dict, group_by: list = None) -> None:
if len(self.df) > 0:
columns = list(fields.keys())
for key in columns:
if key not in self.df.columns:
self.df[key] = 0

if group_by:
group_by = list(set(group_by))
for key in group_by:
if key not in self.df.columns:
self.df[key] = ""

columns.extend(group_by)

self.df = self.df[columns]

agg_options = {}
for name, options in fields.items():
operator = options.get("operator", "sum")
if operator not in ["sum", "average", "max", "min"]:
raise ERROR_NOT_SUPPORTED_QUERY_OPTION(
key=f"fields.operator.{operator}"
)

if operator == "average":
operator = "mean"

agg_options[name] = operator

if group_by:
df = self.df.groupby(group_by).agg(agg_options).reset_index()
if not df.empty:
self.df = df

else:
self.df = self.df.agg(agg_options).to_frame().T

def apply_field_group(self, field_group: list, fields: dict) -> None:
if len(self.df) > 0:
for key in field_group:
if key not in self.df.columns:
raise ERROR_INVALID_PARAMETER(
key="query.field_group", reason=f"Invalid key: {key}"
)

data_fields = list(fields.keys())
agg_fields = set(data_fields + field_group)
group_by = list(set(self.df.columns) - agg_fields)
agg_options = {}
for field in agg_fields:
agg_options[field] = lambda x: list(x)

if group_by:
self.df = self.df.groupby(group_by).agg(agg_options).reset_index()
rows = self.df.to_dict(orient="records")
else:
aggr_row = {}
for key in agg_options.keys():
aggr_row[key] = []

for row in self.df.to_dict(orient="records"):
for key in agg_options.keys():
if key in row:
aggr_row[key].append(row[key])

rows = [aggr_row]

changed_data = []
for row in rows:
changed_row = {}
for data_field in data_fields:
changed_row[data_field] = []

for key, value in row.items():
if key in group_by:
changed_row[key] = value
elif key in fields:
operator = fields[key].get("operator", "sum")
if operator == "sum":
changed_row[f"_total_{key}"] = sum(value)
elif operator == "average":
changed_row[f"_total_{key}"] = sum(value) / len(value)
elif operator == "max":
changed_row[f"_total_{key}"] = max(value)
elif operator == "min":
changed_row[f"_total_{key}"] = min(value)

for idx, v in enumerate(value):
data = {"value": v}
for fg in field_group:
data[fg] = row[fg][idx]

changed_row[key].append(data)

changed_data.append(changed_row)

self.df = pd.DataFrame(changed_data)

def apply_sort(self, sort: list) -> None:
if len(self.df) > 0:
keys = []
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,8 @@ def load(
self.error_message = e.message if hasattr(e, "message") else str(e)
_LOGGER.error(f"[load] add {self.source_type} source error: {e}")

self.make_cache_data(granularity, start, end, vars)

return self.df

def _analyze_asset(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,8 @@ def load(
self.error_message = e.message if hasattr(e, "message") else str(e)
_LOGGER.error(f"[load] {self.operator} operation error: {e}")

self.make_cache_data(granularity, start, end, vars)

return self.df

def join_data_tables(
Expand Down
12 changes: 6 additions & 6 deletions src/spaceone/dashboard/service/private_widget_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,9 +148,9 @@ def _create_data_tables(
if "data_table_id" in operator_options:
data_table_idx = operator_options["data_table_id"]
if data_table_idx in self.data_table_id_map:
data_table["options"][operator][
"data_table_id"
] = self.data_table_id_map[data_table_idx]
data_table["options"][operator]["data_table_id"] = (
self.data_table_id_map[data_table_idx]
)
else:
retry_data_tables[idx] = data_table
idx += 1
Expand Down Expand Up @@ -209,9 +209,9 @@ def _retry_create_data_tables(self, data_tables: dict):
if "data_table_id" in operator_options:
data_table_idx = operator_options["data_table_id"]
if data_table_idx in self.data_table_id_map:
data_table["options"][operator][
"data_table_id"
] = self.data_table_id_map[data_table_idx]
data_table["options"][operator]["data_table_id"] = (
self.data_table_id_map[data_table_idx]
)
else:
retry_data_tables[idx] = data_table
idx += 1
Expand Down
12 changes: 6 additions & 6 deletions src/spaceone/dashboard/service/public_widget_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,9 +154,9 @@ def _create_data_tables(
if "data_table_id" in operator_options:
data_table_idx = operator_options["data_table_id"]
if data_table_idx in self.data_table_id_map:
data_table["options"][operator][
"data_table_id"
] = self.data_table_id_map[data_table_idx]
data_table["options"][operator]["data_table_id"] = (
self.data_table_id_map[data_table_idx]
)
else:
retry_data_tables[idx] = data_table
idx += 1
Expand Down Expand Up @@ -215,9 +215,9 @@ def _retry_create_data_tables(self, data_tables: dict):
if "data_table_id" in operator_options:
data_table_idx = operator_options["data_table_id"]
if data_table_idx in self.data_table_id_map:
data_table["options"][operator][
"data_table_id"
] = self.data_table_id_map[data_table_idx]
data_table["options"][operator]["data_table_id"] = (
self.data_table_id_map[data_table_idx]
)
else:
retry_data_tables[idx] = data_table
idx += 1
Expand Down

0 comments on commit cbbde2e

Please sign in to comment.