diff --git a/pyproject.toml b/pyproject.toml
index ed531007..7267ca28 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -8,7 +8,7 @@ build-backend = "setuptools.build_meta"
[project]
name = "dataops-testgen"
-version = "4.38.7"
+version = "4.39.2"
description = "DataKitchen's Data Quality DataOps TestGen"
authors = [
{ "name" = "DataKitchen, Inc.", "email" = "info@datakitchen.io" },
diff --git a/testgen/__main__.py b/testgen/__main__.py
index 1f0f4480..8463ab4f 100644
--- a/testgen/__main__.py
+++ b/testgen/__main__.py
@@ -44,8 +44,12 @@
)
from testgen.common.models import with_database_session
from testgen.common.models.profiling_run import ProfilingRun
+from testgen.common.models.settings import PersistedSetting
from testgen.common.models.test_run import TestRun
from testgen.common.models.test_suite import TestSuite
+from testgen.common.notifications.base import smtp_configured
+from testgen.common.notifications.profiling_run import send_profiling_run_notifications
+from testgen.common.notifications.test_run import send_test_run_notifications
from testgen.scheduler import register_scheduler_job, run_scheduler
from testgen.utils import plugins
@@ -645,14 +649,18 @@ def run_ui():
patch_streamlit.patch(force=True)
@with_database_session
- def cancel_all_running():
+ def init_ui():
try:
- ProfilingRun.cancel_all_running()
- TestRun.cancel_all_running()
+ for profiling_run_id in ProfilingRun.cancel_all_running():
+ send_profiling_run_notifications(ProfilingRun.get(profiling_run_id))
+ for test_run_id in TestRun.cancel_all_running():
+ send_test_run_notifications(TestRun.get(test_run_id))
except Exception:
LOG.warning("Failed to cancel 'Running' profiling/test runs")
- cancel_all_running()
+ PersistedSetting.set("SMTP_CONFIGURED", smtp_configured())
+
+ init_ui()
app_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "ui/app.py")
process= subprocess.Popen(
diff --git a/testgen/commands/run_profiling.py b/testgen/commands/run_profiling.py
index de217cb9..3764f584 100644
--- a/testgen/commands/run_profiling.py
+++ b/testgen/commands/run_profiling.py
@@ -27,6 +27,7 @@
from testgen.common.models.profiling_run import ProfilingRun
from testgen.common.models.table_group import TableGroup
from testgen.common.models.test_suite import TestSuite
+from testgen.common.notifications.profiling_run import send_profiling_run_notifications
from testgen.ui.session import session
from testgen.utils import get_exception_message
@@ -53,7 +54,7 @@ def run_profiling_in_background(table_group_id: str | UUID) -> None:
def run_profiling(table_group_id: str | UUID, username: str | None = None, run_date: datetime | None = None) -> str:
if table_group_id is None:
raise ValueError("Table Group ID was not specified")
-
+
LOG.info(f"Starting profiling run for table group {table_group_id}")
time_delta = (run_date - datetime.now(UTC)) if run_date else timedelta()
@@ -104,12 +105,16 @@ def run_profiling(table_group_id: str | UUID, username: str | None = None, run_d
profiling_run.profiling_endtime = datetime.now(UTC) + time_delta
profiling_run.status = "Error"
profiling_run.save()
+
+ send_profiling_run_notifications(profiling_run)
else:
LOG.info("Setting profiling run status to Completed")
profiling_run.profiling_endtime = datetime.now(UTC) + time_delta
profiling_run.status = "Complete"
profiling_run.save()
+ send_profiling_run_notifications(profiling_run)
+
_rollup_profiling_scores(profiling_run, table_group)
if bool(table_group.monitor_test_suite_id) and not table_group.last_complete_profile_run_id:
diff --git a/testgen/commands/run_refresh_score_cards_results.py b/testgen/commands/run_refresh_score_cards_results.py
index 7f0015f8..3a6a71f3 100644
--- a/testgen/commands/run_refresh_score_cards_results.py
+++ b/testgen/commands/run_refresh_score_cards_results.py
@@ -11,6 +11,7 @@
ScoreDefinitionResult,
ScoreDefinitionResultHistoryEntry,
)
+from testgen.common.notifications.score_drop import collect_score_notification_data, send_score_drop_notifications
LOG = logging.getLogger("testgen")
@@ -26,16 +27,16 @@ def run_refresh_score_cards_results(
_refresh_date = refresh_date or datetime.datetime.now(datetime.UTC)
try:
- definitions = []
if not definition_id:
definitions = ScoreDefinition.all(project_code=project_code)
else:
- definitions.append(ScoreDefinition.get(str(definition_id)))
+ definitions = [ScoreDefinition.get(str(definition_id))]
except Exception:
LOG.exception("Stopping scorecards results refresh after unexpected error")
return
db_session = get_current_session()
+ score_notification_data = []
for definition in definitions:
LOG.info(
@@ -46,6 +47,9 @@ def run_refresh_score_cards_results(
try:
fresh_score_card = definition.as_score_card()
+
+ collect_score_notification_data(score_notification_data, definition, fresh_score_card)
+
definition.clear_results()
definition.results = _score_card_to_results(fresh_score_card)
definition.breakdown = _score_definition_to_results_breakdown(definition)
@@ -89,6 +93,8 @@ def run_refresh_score_cards_results(
end_time = time.time()
LOG.info("Refreshing results for %s done after %s seconds", scope, round(end_time - start_time, 2))
+ send_score_drop_notifications(score_notification_data)
+
def _score_card_to_results(score_card: ScoreCard) -> list[ScoreDefinitionResult]:
return [
diff --git a/testgen/commands/run_test_execution.py b/testgen/commands/run_test_execution.py
index 374b27b5..14403f46 100644
--- a/testgen/commands/run_test_execution.py
+++ b/testgen/commands/run_test_execution.py
@@ -25,6 +25,7 @@
from testgen.common.models.table_group import TableGroup
from testgen.common.models.test_run import TestRun
from testgen.common.models.test_suite import TestSuite
+from testgen.common.notifications.test_run import send_test_run_notifications
from testgen.ui.session import session
from testgen.utils import get_exception_message
@@ -81,6 +82,10 @@ def run_test_execution(test_suite_id: str | UUID, username: str | None = None, r
sql_generator = TestExecutionSQL(connection, table_group, test_run)
+ # Update the thresholds before retrieving the test definitions in the next steps
+ LOG.info("Updating historic test thresholds")
+ execute_db_queries([sql_generator.update_historic_thresholds()])
+
LOG.info("Retrieving active test definitions in test suite")
test_defs = fetch_dict_from_db(*sql_generator.get_active_test_definitions())
test_defs = [TestExecutionDef(**item) for item in test_defs]
@@ -99,9 +104,6 @@ def run_test_execution(test_suite_id: str | UUID, username: str | None = None, r
)
if valid_test_defs:
- LOG.info("Updating historic test thresholds")
- execute_db_queries([sql_generator.update_historic_thresholds()])
-
column_types = {(col.schema_name, col.table_name, col.column_name): col.column_type for col in data_chars}
for td in valid_test_defs:
td.column_type = column_types.get((td.schema_name, td.table_name, td.column_name))
@@ -128,6 +130,7 @@ def run_test_execution(test_suite_id: str | UUID, username: str | None = None, r
execute_db_queries(sql_generator.update_test_results())
# Refresh needed because previous query updates the test run too
test_run.refresh()
+
except Exception as e:
LOG.exception("Test execution encountered an error.")
LOG.info("Setting test run status to Error")
@@ -135,6 +138,8 @@ def run_test_execution(test_suite_id: str | UUID, username: str | None = None, r
test_run.test_endtime = datetime.now(UTC) + time_delta
test_run.status = "Error"
test_run.save()
+
+ send_test_run_notifications(test_run)
else:
LOG.info("Setting test run status to Completed")
test_run.test_endtime = datetime.now(UTC) + time_delta
@@ -145,6 +150,7 @@ def run_test_execution(test_suite_id: str | UUID, username: str | None = None, r
test_suite.last_complete_test_run_id = test_run.id
test_suite.save()
+ send_test_run_notifications(test_run)
_rollup_test_scores(test_run, table_group)
finally:
MixpanelService().send_event(
diff --git a/testgen/common/models/hygiene_issue.py b/testgen/common/models/hygiene_issue.py
new file mode 100644
index 00000000..6deb0763
--- /dev/null
+++ b/testgen/common/models/hygiene_issue.py
@@ -0,0 +1,145 @@
+import re
+from collections.abc import Iterable
+from dataclasses import dataclass
+from typing import Self
+from uuid import UUID, uuid4
+
+from sqlalchemy import Column, ForeignKey, String, and_, case, null, select
+from sqlalchemy.dialects import postgresql
+from sqlalchemy.ext.hybrid import hybrid_property
+from sqlalchemy.orm import aliased, relationship
+from sqlalchemy.sql.functions import func
+
+from testgen.common.models import Base, get_current_session
+from testgen.common.models.entity import Entity
+
+PII_RISK_RE = re.compile(r"Risk: (MODERATE|HIGH),")
+
+
+@dataclass
+class IssueCount:
+ total: int = 0
+ inactive: int = 0
+
+ @property
+ def active(self):
+ return self.total - self.inactive
+
+
+class HygieneIssueType(Base):
+ __tablename__ = "profile_anomaly_types"
+
+ id: str = Column(String, primary_key=True)
+ likelihood: str = Column("issue_likelihood", String)
+ name: str = Column("anomaly_name", String)
+
+ # Note: not all table columns are implemented by this entity
+
+
+class HygieneIssue(Entity):
+ __tablename__ = "profile_anomaly_results"
+
+ id: UUID = Column(postgresql.UUID(as_uuid=True), primary_key=True, nullable=False, default=uuid4)
+
+ project_code: str = Column(String, ForeignKey("projects.project_code"))
+ table_groups_id: UUID = Column(postgresql.UUID(as_uuid=True), ForeignKey("table_groups.id"), nullable=False)
+ profile_run_id: UUID = Column(postgresql.UUID(as_uuid=True), ForeignKey("profiling_runs.id"), nullable=False)
+
+ type_id: str = Column("anomaly_id", String, ForeignKey("profile_anomaly_types.id"), nullable=False)
+ type_ = relationship(HygieneIssueType)
+
+ schema_name: str = Column(String, nullable=False)
+ table_name: str = Column(String, nullable=False)
+ column_name: str = Column(String, nullable=False)
+
+ detail: str = Column(String, nullable=False)
+ disposition: str = Column(String)
+
+ # Note: not all table columns are implemented by this entity
+
+ @hybrid_property
+ def priority(self):
+ if self.type_.likelihood != "Potential PII":
+ return self.type_.likelihood
+ elif self.detail and (match := PII_RISK_RE.search(self.detail)):
+ return match.group(1).capitalize()
+ else:
+ return None
+
+ @priority.expression
+ def priority(cls):
+ return case(
+ (
+ HygieneIssueType.likelihood != "Potential PII",
+ HygieneIssueType.likelihood,
+ ),
+ else_=func.initcap(
+ func.substring(cls.detail, PII_RISK_RE.pattern)
+ ),
+ )
+
+ @classmethod
+ def select_count_by_priority(cls, profiling_run_id: UUID) -> dict[str, IssueCount]:
+ count_query = (
+ select(
+ cls.priority,
+ func.count(),
+ func.count(cls.disposition.in_(("Dismissed", "Inactive"))),
+ )
+ .select_from(cls)
+ .join(HygieneIssueType)
+ .where(cls.profile_run_id == profiling_run_id)
+ .group_by(cls.priority)
+ )
+ result = {
+ priority: IssueCount(total, inactive)
+ for priority, total, inactive in get_current_session().execute(count_query)
+ }
+ for p in ("Definite", "Likely", "Possible", "High", "Moderate"):
+ result.setdefault(p, IssueCount())
+ return result
+
+ @classmethod
+ def select_with_diff(
+ cls, profiling_run_id: UUID, other_profiling_run_id: UUID | None, *where_clauses, limit: int | None = None
+ ) -> Iterable[tuple[Self,bool,str]]:
+ other = aliased(cls)
+ order_weight = case(
+ (cls.priority == "Definite", 1),
+ (cls.priority == "Likely", 2),
+ (cls.priority == "Possible", 3),
+ (cls.priority == "High", 4),
+ (cls.priority == "Moderate", 5),
+ else_=6,
+ )
+ is_new_col = (other.id.is_(None) if other_profiling_run_id else null()).label("is_new")
+ query = (
+ select(
+ cls,
+ is_new_col,
+ )
+ .outerjoin(
+ other,
+ and_(
+ other.table_groups_id == cls.table_groups_id,
+ other.schema_name == cls.schema_name,
+ other.table_name == cls.table_name,
+ other.column_name == cls.column_name,
+ other.type_id == cls.type_id,
+ other.profile_run_id == other_profiling_run_id,
+ ),
+ ).join(
+ HygieneIssueType,
+ HygieneIssueType.id == cls.type_id,
+ ).where(
+ cls.profile_run_id == profiling_run_id,
+ *where_clauses
+ ).order_by(
+ is_new_col.desc(),
+ order_weight,
+ ).limit(
+ limit,
+ )
+ )
+
+ return get_current_session().execute(query)
diff --git a/testgen/common/models/notification_settings.py b/testgen/common/models/notification_settings.py
new file mode 100644
index 00000000..66c616bb
--- /dev/null
+++ b/testgen/common/models/notification_settings.py
@@ -0,0 +1,286 @@
+import enum
+import re
+from collections.abc import Iterable
+from decimal import Decimal
+from typing import ClassVar, Generic, Self, TypeVar
+from uuid import UUID, uuid4
+
+from sqlalchemy import Boolean, Column, Enum, ForeignKey, String, and_, or_, select
+from sqlalchemy.dialects import postgresql
+from sqlalchemy.sql import Select
+
+from testgen.common.models import get_current_session
+from testgen.common.models.custom_types import JSON_TYPE
+from testgen.common.models.entity import Entity
+from testgen.common.models.scores import ScoreDefinition
+from testgen.common.models.table_group import TableGroup
+from testgen.common.models.test_suite import TestSuite
+
+SENTINEL_TYPE = type("Sentinel", (object,), {})
+
+SENTINEL = SENTINEL_TYPE()
+
+TriggerT = TypeVar("TriggerT", bound=Enum)
+
+
+class TestRunNotificationTrigger(enum.Enum):
+ always = "always"
+ on_failures = "on_failures"
+ on_warnings = "on_warnings"
+ on_changes = "on_changes"
+
+
+class ProfilingRunNotificationTrigger(enum.Enum):
+ always = "always"
+ on_changes = "on_changes"
+
+
+class NotificationEvent(enum.Enum):
+ test_run = "test_run"
+ profiling_run = "profiling_run"
+ score_drop = "score_drop"
+
+
+class NotificationSettingsValidationError(Exception):
+ """Validation Exception. Messaging should be suitable for the users."""
+ pass
+
+
+class NotificationSettings(Entity):
+ __tablename__ = "notification_settings"
+
+ id: UUID = Column(postgresql.UUID(as_uuid=True), primary_key=True, default=uuid4)
+ project_code: str = Column(String)
+
+ event: NotificationEvent = Column(Enum(NotificationEvent))
+ enabled: bool = Column(Boolean, default=True)
+ recipients: list[str] = Column(postgresql.JSONB, nullable=False, default=[])
+
+ test_suite_id: UUID | None = Column(
+ postgresql.UUID(as_uuid=True),
+ ForeignKey("test_suites.id", ondelete="CASCADE"),
+ nullable=True,
+ default=None,
+ )
+ table_group_id: UUID | None = Column(
+ postgresql.UUID(as_uuid=True),
+ ForeignKey("table_groups.id", ondelete="CASCADE"),
+ nullable=True,
+ default=None,
+ )
+ score_definition_id: UUID | None = Column(
+ postgresql.UUID(as_uuid=True),
+ ForeignKey("score_definitions.id", ondelete="CASCADE"),
+ nullable=True,
+ default=None,
+ )
+
+ settings: JSON_TYPE = Column(postgresql.JSONB, nullable=False, default={})
+
+ __mapper_args__: ClassVar = {
+ "polymorphic_on": event,
+ "polymorphic_identity": "base",
+ }
+
+ @classmethod
+ def _base_select_query(
+ cls,
+ *,
+ enabled: bool | SENTINEL_TYPE = SENTINEL,
+ event: NotificationEvent | SENTINEL_TYPE = SENTINEL,
+ project_code: str | SENTINEL_TYPE = SENTINEL,
+ test_suite_id: UUID | None | SENTINEL_TYPE = SENTINEL,
+ table_group_id: UUID | None | SENTINEL_TYPE = SENTINEL,
+ score_definition_id: UUID | None | SENTINEL_TYPE = SENTINEL,
+ ) -> Select:
+ fk_count = len([None for fk in (test_suite_id, table_group_id, score_definition_id) if fk is not SENTINEL])
+ if fk_count > 1:
+ raise ValueError("Only one foreign key can be used at a time.")
+ elif fk_count == 1 and (project_code is not SENTINEL or event is not SENTINEL):
+ raise ValueError("Filtering by project_code or event is not allowed when filtering by a foreign key.")
+
+ query = select(cls)
+ if enabled is not SENTINEL:
+ query = query.where(cls.enabled == enabled)
+ if event is not SENTINEL:
+ query = query.where(cls.event == event)
+ if project_code is not SENTINEL:
+ query = query.where(cls.project_code == project_code)
+
+ def _subquery_clauses(entity, rel_col, id_value):
+ return and_(
+ cls.project_code.in_(select(entity.project_code).where(entity.id == id_value)),
+ or_(rel_col == id_value, rel_col.is_(None)),
+ )
+
+ if test_suite_id is not SENTINEL:
+ query = query.where(_subquery_clauses(TestSuite, cls.test_suite_id, test_suite_id))
+ elif table_group_id is not SENTINEL:
+ query = query.where(_subquery_clauses(TableGroup, cls.table_group_id, table_group_id))
+ elif score_definition_id is not SENTINEL:
+ query = query.where(_subquery_clauses(ScoreDefinition, cls.score_definition_id, score_definition_id))
+
+ return query
+
+ @classmethod
+ def select(
+ cls,
+ *,
+ enabled: bool | SENTINEL_TYPE = SENTINEL,
+ event: NotificationEvent | SENTINEL_TYPE = SENTINEL,
+ project_code: str | SENTINEL_TYPE = SENTINEL,
+ test_suite_id: UUID | None | SENTINEL_TYPE = SENTINEL,
+ table_group_id: UUID | None | SENTINEL_TYPE = SENTINEL,
+ score_definition_id: UUID | None | SENTINEL_TYPE = SENTINEL,
+ ) -> Iterable[Self]:
+ query = cls._base_select_query(
+ enabled=enabled,
+ event=event,
+ project_code=project_code,
+ test_suite_id=test_suite_id,
+ table_group_id=table_group_id,
+ score_definition_id=score_definition_id,
+ ).order_by(
+ cls.project_code, cls.event, cls.test_suite_id, cls.table_group_id, cls.score_definition_id, cls.id,
+ )
+ return get_current_session().scalars(query)
+
+ def _validate_settings(self):
+ pass
+
+ def validate(self):
+ if len(self.recipients) < 1:
+ raise NotificationSettingsValidationError("At least one recipient must be defined.")
+ for addr in self.recipients:
+ if not re.match(r"^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$", addr):
+ raise NotificationSettingsValidationError(f"Invalid email address: {addr}.")
+ self._validate_settings()
+
+ def save(self) -> None:
+ self.validate()
+ super().save()
+
+
+class RunNotificationSettings(NotificationSettings, Generic[TriggerT]):
+ __abstract__ = True
+ trigger_enum: ClassVar[type[TriggerT]]
+
+ @property
+ def trigger(self) -> TriggerT | None:
+ return self.trigger_enum(self.settings["trigger"]) if "trigger" in self.settings else None
+
+ @trigger.setter
+ def trigger(self, trigger: TriggerT) -> None:
+ self.settings = {"trigger": trigger.value}
+
+ def _validate_settings(self):
+ if not isinstance(self.trigger, self.trigger_enum):
+ raise NotificationSettingsValidationError("Invalid notification trigger.")
+
+
+class TestRunNotificationSettings(RunNotificationSettings[TestRunNotificationTrigger]):
+
+ __mapper_args__: ClassVar = {
+ "polymorphic_identity": NotificationEvent.test_run,
+ }
+ trigger_enum = TestRunNotificationTrigger
+
+ @classmethod
+ def create(
+ cls,
+ project_code: str,
+ test_suite_id: UUID | None,
+ recipients: list[str],
+ trigger: TestRunNotificationTrigger,
+ ) -> Self:
+ ns = cls(
+ event=NotificationEvent.test_run,
+ project_code=project_code,
+ test_suite_id=test_suite_id,
+ recipients=recipients,
+ settings={"trigger": trigger.value}
+ )
+ ns.save()
+ return ns
+
+
+class ProfilingRunNotificationSettings(RunNotificationSettings[ProfilingRunNotificationTrigger]):
+
+ __mapper_args__: ClassVar = {
+ "polymorphic_identity": NotificationEvent.profiling_run,
+ }
+ trigger_enum = ProfilingRunNotificationTrigger
+
+ @classmethod
+ def create(
+ cls,
+ project_code: str,
+ table_group_id: UUID | None,
+ recipients: list[str],
+ trigger: ProfilingRunNotificationTrigger,
+ ) -> Self:
+ ns = cls(
+ event=NotificationEvent.profiling_run,
+ project_code=project_code,
+ table_group_id=table_group_id,
+ recipients=recipients,
+ settings={"trigger": trigger.value}
+ )
+ ns.save()
+ return ns
+
+
+class ScoreDropNotificationSettings(NotificationSettings):
+
+ __mapper_args__: ClassVar = {
+ "polymorphic_identity": NotificationEvent.score_drop,
+ }
+
+ @staticmethod
+ def _value_to_threshold(value: Decimal | float | None):
+ return str(Decimal(value).quantize(Decimal("0.1"))) if value is not None else None
+
+ @property
+ def total_score_threshold(self) -> Decimal | None:
+ return Decimal(self.settings["total_threshold"]) if self.settings.get("total_threshold") else None
+
+ @total_score_threshold.setter
+ def total_score_threshold(self, value: Decimal | float | None) -> None:
+ self.settings = {**self.settings, "total_threshold": self._value_to_threshold(value)}
+
+ @property
+ def cde_score_threshold(self) -> Decimal | None:
+ return Decimal(self.settings["cde_threshold"]) if self.settings.get("cde_threshold") else None
+
+ @cde_score_threshold.setter
+ def cde_score_threshold(self, value: Decimal | float | None) -> None:
+ self.settings = {**self.settings, "cde_threshold": self._value_to_threshold(value)}
+
+ def _validate_settings(self):
+ if not (self.total_score_threshold or self.cde_score_threshold):
+ raise NotificationSettingsValidationError("At least one score threshold must be set.")
+ for score, label in ((self.total_score_threshold, "Total"), (self.cde_score_threshold, "CDE")):
+ if score is not None and not 0 <= score <= 100:
+ raise NotificationSettingsValidationError(f"The {label} score threshold must be between 0 and 100")
+
+ @classmethod
+ def create(
+ cls,
+ project_code: str,
+ score_definition_id: UUID | None,
+ recipients: list[str],
+ total_score_threshold: float | Decimal | None,
+ cde_score_threshold: float | Decimal | None,
+ ) -> Self:
+ ns = cls(
+ event=NotificationEvent.score_drop,
+ project_code=project_code,
+ score_definition_id=score_definition_id,
+ recipients=recipients,
+ settings={
+ "total_threshold": cls._value_to_threshold(total_score_threshold),
+ "cde_threshold": cls._value_to_threshold(cde_score_threshold),
+ },
+ )
+ ns.save()
+ return ns
diff --git a/testgen/common/models/profiling_run.py b/testgen/common/models/profiling_run.py
index 0343b99f..b7059a6d 100644
--- a/testgen/common/models/profiling_run.py
+++ b/testgen/common/models/profiling_run.py
@@ -1,7 +1,7 @@
from collections.abc import Iterable
from dataclasses import dataclass
from datetime import UTC, datetime
-from typing import Literal, NamedTuple, TypedDict
+from typing import Literal, NamedTuple, Self, TypedDict
from uuid import UUID, uuid4
import streamlit as st
@@ -46,7 +46,7 @@ class ProfilingRunSummary(EntityMinimal):
profiling_endtime: datetime
table_groups_name: str
status: ProfilingRunStatus
- progress: list[ProgressStep]
+ progress: list[ProgressStep]
process_id: int
log_message: str
table_group_schema: str
@@ -148,7 +148,10 @@ def select_minimal_where(
@classmethod
@st.cache_data(show_spinner=False)
def select_summary(
- cls, project_code: str, table_group_id: str | UUID | None = None, profiling_run_ids: list[str] | None = None
+ cls,
+ project_code: str,
+ table_group_id: str | UUID | None = None,
+ profiling_run_ids: list[str|UUID] | None = None,
) -> Iterable[ProfilingRunSummary]:
if (table_group_id and not is_uuid4(table_group_id)) or (
profiling_run_ids and not all(is_uuid4(run_id) for run_id in profiling_run_ids)
@@ -234,14 +237,18 @@ def has_running_process(cls, ids: list[str]) -> bool:
return process_count > 0
@classmethod
- def cancel_all_running(cls) -> None:
+ def cancel_all_running(cls) -> list[UUID]:
query = (
- update(cls).where(cls.status == "Running").values(status="Cancelled", profiling_endtime=datetime.now(UTC))
+ update(cls)
+ .where(cls.status == "Running")
+ .values(status="Cancelled", profiling_endtime=datetime.now(UTC))
+ .returning(cls.id)
)
db_session = get_current_session()
- db_session.execute(query)
+ rows = db_session.execute(query)
db_session.commit()
cls.clear_cache()
+ return [r.id for r in rows]
@classmethod
def cancel_run(cls, run_id: str | UUID) -> None:
@@ -294,3 +301,16 @@ def set_progress(self, key: ProgressKey, status: ProgressStatus, detail: str | N
self.progress = list(self._progress.values())
flag_modified(self, "progress")
+
+ def get_previous(self) -> Self | None:
+ query = (
+ select(ProfilingRun)
+ .where(
+ ProfilingRun.table_groups_id == self.table_groups_id,
+ ProfilingRun.status == "Complete",
+ ProfilingRun.profiling_starttime < self.profiling_starttime,
+ )
+ .order_by(desc(ProfilingRun.profiling_starttime))
+ .limit(1)
+ )
+ return get_current_session().scalar(query)
diff --git a/testgen/common/models/test_result.py b/testgen/common/models/test_result.py
new file mode 100644
index 00000000..de96c97e
--- /dev/null
+++ b/testgen/common/models/test_result.py
@@ -0,0 +1,80 @@
+import enum
+from collections import defaultdict
+from uuid import UUID, uuid4
+
+from sqlalchemy import Boolean, Column, Enum, ForeignKey, String, and_, or_, select
+from sqlalchemy.dialects import postgresql
+from sqlalchemy.orm import aliased
+
+from testgen.common.models import get_current_session
+from testgen.common.models.entity import Entity
+
+
+class TestResultStatus(enum.Enum):
+ Error = "Error"
+ Log = "Log"
+ Passed = "Passed"
+ Warning = "Warning"
+ Failed = "Failed"
+
+
+TestResultDiffType = tuple[TestResultStatus, TestResultStatus, list[UUID]]
+
+
+class TestResult(Entity):
+ __tablename__ = "test_results"
+
+ id: UUID = Column(postgresql.UUID(as_uuid=True), primary_key=True, nullable=False, default=uuid4)
+
+ test_suite_id: UUID = Column(postgresql.UUID(as_uuid=True), ForeignKey("test_suites.id"), nullable=False)
+ test_run_id: UUID = Column(postgresql.UUID(as_uuid=True), ForeignKey("test_runs.id"), nullable=False)
+
+ test_definition_id: UUID = Column(postgresql.UUID(as_uuid=True), ForeignKey("test_definitions.id"), nullable=False)
+ test_type: str = Column(String, ForeignKey("test_types.test_type"), nullable=False)
+ auto_gen: bool = Column(Boolean)
+
+ schema_name: str = Column(String, nullable=False)
+ table_name: str = Column(String)
+ column_names: str = Column(String)
+
+ status: TestResultStatus = Column("result_status", Enum(TestResultStatus))
+ message: str = Column("result_message", String)
+
+ # Note: not all table columns are implemented by this entity
+
+ @classmethod
+ def diff(cls, test_run_id_a: UUID, test_run_id_b: UUID) -> list[TestResultDiffType]:
+ alias_a = aliased(cls)
+ alias_b = aliased(cls)
+ query = select(
+ alias_a.status, alias_b.status, alias_b.test_definition_id,
+ ).join(
+ alias_b,
+ or_(
+ and_(
+ alias_a.auto_gen.is_(True),
+ alias_b.auto_gen.is_(True),
+ alias_a.test_suite_id == alias_b.test_suite_id,
+ alias_a.schema_name == alias_b.schema_name,
+ alias_a.table_name.isnot_distinct_from(alias_b.table_name),
+ alias_a.column_names.isnot_distinct_from(alias_b.column_names),
+ alias_a.test_type == alias_b.test_type,
+ ),
+ and_(
+ alias_a.auto_gen.isnot(True),
+ alias_b.auto_gen.isnot(True),
+ alias_a.test_definition_id == alias_b.test_definition_id,
+ ),
+ ),
+ full=True,
+ ).where(
+ or_(alias_a.test_run_id == test_run_id_a, alias_a.test_run_id.is_(None)),
+ or_(alias_b.test_run_id == test_run_id_b, alias_b.test_run_id.is_(None)),
+ alias_a.status != alias_b.status,
+ )
+
+ diff = defaultdict(list)
+ for run_a_status, run_b_status, result_id in get_current_session().execute(query):
+ diff[(run_a_status, run_b_status)].append(result_id)
+
+ return [(*statuses, id_list) for statuses, id_list in diff.items()]
diff --git a/testgen/common/models/test_run.py b/testgen/common/models/test_run.py
index 7c0701a8..01671315 100644
--- a/testgen/common/models/test_run.py
+++ b/testgen/common/models/test_run.py
@@ -1,7 +1,7 @@
from collections.abc import Iterable
from dataclasses import dataclass
from datetime import UTC, datetime
-from typing import Literal, NamedTuple, TypedDict
+from typing import Literal, NamedTuple, Self, TypedDict
from uuid import UUID, uuid4
import streamlit as st
@@ -12,6 +12,7 @@
from testgen.common.models import get_current_session
from testgen.common.models.entity import Entity, EntityMinimal
+from testgen.common.models.test_result import TestResultStatus
from testgen.common.models.test_suite import TestSuite
from testgen.utils import is_uuid4
@@ -46,6 +47,7 @@ class TestRunSummary(EntityMinimal):
test_endtime: datetime
table_groups_name: str
test_suite: str
+ project_name: str
status: TestRunStatus
progress: list[ProgressStep]
process_id: int
@@ -59,7 +61,6 @@ class TestRunSummary(EntityMinimal):
dismissed_ct: int
dq_score_testing: float
-
class LatestTestRun(NamedTuple):
id: str
run_time: datetime
@@ -129,11 +130,35 @@ def get_latest_run(cls, project_code: str) -> LatestTestRun | None:
return LatestTestRun(str(result["id"]), result["test_starttime"])
return None
+ def get_previous(self) -> Self | None:
+ query = (
+ select(TestRun)
+ .join(TestSuite)
+ .where(
+ TestRun.test_suite_id == self.test_suite_id,
+ TestRun.status == "Complete",
+ TestRun.test_starttime < self.test_starttime,
+ )
+ .order_by(desc(TestRun.test_starttime))
+ .limit(1)
+ )
+ return get_current_session().scalar(query)
+
+ @property
+ def ct_by_status(self):
+ return {
+ TestResultStatus.Error: self.error_ct,
+ TestResultStatus.Failed: self.failed_ct,
+ TestResultStatus.Warning: self.warning_ct,
+ TestResultStatus.Log: self.log_ct,
+ TestResultStatus.Passed: self.passed_ct,
+ }
+
@classmethod
@st.cache_data(show_spinner=False)
def select_summary(
cls,
- project_code: str,
+ project_code: str | None = None,
table_group_id: str | None = None,
test_suite_id: str | None = None,
test_run_ids: list[str] | None = None,
@@ -197,6 +222,7 @@ def select_summary(
test_runs.test_endtime,
table_groups.table_groups_name,
test_suites.test_suite,
+ projects.project_name,
test_runs.status,
test_runs.progress,
test_runs.process_id,
@@ -214,8 +240,9 @@ def select_summary(
INNER JOIN test_suites ON (test_runs.test_suite_id = test_suites.id)
INNER JOIN table_groups ON (test_suites.table_groups_id = table_groups.id)
INNER JOIN projects ON (test_suites.project_code = projects.project_code)
- WHERE test_suites.project_code = :project_code
- {"AND test_suites.table_groups_id = :table_group_id" if table_group_id else ""}
+ WHERE TRUE
+ {" AND test_suites.project_code = :project_code" if project_code else ""}
+ {" AND test_suites.table_groups_id = :table_group_id" if table_group_id else ""}
{" AND test_suites.id = :test_suite_id" if test_suite_id else ""}
{" AND test_runs.id IN :test_run_ids" if test_run_ids else ""}
ORDER BY test_runs.test_starttime DESC;
@@ -237,12 +264,18 @@ def has_running_process(cls, ids: list[str]) -> bool:
return process_count > 0
@classmethod
- def cancel_all_running(cls) -> None:
- query = update(cls).where(cls.status == "Running").values(status="Cancelled", test_endtime=datetime.now(UTC))
+ def cancel_all_running(cls) -> list[UUID]:
+ query = (
+ update(cls)
+ .where(cls.status == "Running")
+ .values(status="Cancelled", test_endtime=datetime.now(UTC))
+ .returning(cls.id)
+ )
db_session = get_current_session()
- db_session.execute(query)
+ rows = db_session.execute(query)
db_session.commit()
cls.clear_cache()
+ return [r.id for r in rows]
@classmethod
def cancel_run(cls, run_id: str | UUID) -> None:
diff --git a/testgen/common/notifications/__init__.py b/testgen/common/notifications/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/testgen/common/email.py b/testgen/common/notifications/base.py
similarity index 66%
rename from testgen/common/email.py
rename to testgen/common/notifications/base.py
index 1579ce05..1ff02e32 100644
--- a/testgen/common/email.py
+++ b/testgen/common/notifications/base.py
@@ -1,4 +1,8 @@
+import functools
+import inspect
import logging
+import operator
+import re
import smtplib
import ssl
from collections.abc import Mapping
@@ -20,6 +24,10 @@
)
+def smtp_configured() -> bool:
+ return all(getattr(settings, setting_name) is not None for setting_name in MANDATORY_SETTINGS)
+
+
class EmailTemplateException(Exception):
pass
@@ -28,8 +36,31 @@ class BaseEmailTemplate:
def __init__(self):
compiler = Compiler()
- self.compiled_subject = compiler.compile(self.get_subject_template())
- self.compiled_body = compiler.compile(self.get_body_template())
+ partials = {}
+
+ def op_helper(op, _, *args):
+ return getattr(operator, op)(*args)
+
+ helpers = {
+ op.replace("_", ""): functools.partial(op_helper, op)
+ for op in (
+ "eq", "ge", "gt", "le", "lt", "add", "sub", "and_", "or_", "contains",
+ )
+ }
+ helpers["len"] = lambda _, *args: len(*args)
+
+ for name, func in inspect.getmembers(self.__class__, predicate=callable):
+ if (match := re.match(r"get_(\w+)_template", name)) and match.group(1) not in ("subject", "body"):
+ partials[match.group(1)] = compiler.compile(func(self))
+ if match := re.match(r"(\w+)_helper", name):
+ helpers[match.group(1)] = func
+
+ self.compiled_subject = functools.partial(
+ compiler.compile(self.get_subject_template()), partials=partials, helpers=helpers,
+ )
+ self.compiled_body = functools.partial(
+ compiler.compile(self.get_body_template()), partials=partials, helpers=helpers,
+ )
def validate_settings(self):
missing_settings = [
@@ -71,7 +102,8 @@ def send_mime_message(self, recipients: list[str], message: MIMEMultipart) -> di
smtp_server.login(settings.SMTP_USERNAME, settings.SMTP_PASSWORD)
response = smtp_server.sendmail(settings.EMAIL_FROM_ADDRESS, recipients, message.as_string())
except Exception as e:
- LOG.error("Template '%s' failed to send email with: %s", self.__class__.__name__, e) # noqa: TRY400
+ LOG.error("Template '%s' failed to send email with: %s", self.__class__.__name__, e) # noqa: TRY400
+ raise EmailTemplateException("Failed sending email notifications") from e
else:
return response
diff --git a/testgen/common/notifications/notifications.py b/testgen/common/notifications/notifications.py
new file mode 100644
index 00000000..e26fdab9
--- /dev/null
+++ b/testgen/common/notifications/notifications.py
@@ -0,0 +1,411 @@
+import math
+from datetime import datetime
+
+from testgen.common.models.profiling_run import ProfilingRunStatus
+from testgen.common.models.test_definition import TestRunStatus
+from testgen.common.notifications.base import BaseEmailTemplate
+from testgen.utils import friendly_score
+
+
+class BaseNotificationTemplate(BaseEmailTemplate):
+
+ def pluralize_helper(self, count: int, singular: str, plural: str) -> str:
+ return singular if count == 1 else plural
+
+ def format_number_helper(self, number: int) -> str:
+ return "" if number is None else f"{number:,}"
+
+ def format_dt_helper(self, dt: datetime) -> str:
+ return "" if dt is None else dt.strftime("%b %d, %-I:%M %p UTC")
+
+ def format_duration_helper(self, start_time: datetime, end_time: datetime) -> str:
+ total_seconds = abs(end_time - start_time).total_seconds()
+ units = [
+ (math.floor(total_seconds / (3600 * 24)), "d"),
+ (math.floor((total_seconds % (3600 * 24)) / 3600), "h"),
+ (math.floor((total_seconds % 3600) / 60), "m"),
+ (round(total_seconds % 60), "s"),
+ ]
+ formatted = " ".join([ f"{unit[0]}{unit[1]}" for unit in units if unit[0] ])
+ return formatted.strip() or "< 1s"
+
+ def format_status_helper(self, status: TestRunStatus | ProfilingRunStatus) -> str:
+ return {
+ "Complete": "Completed",
+ "Cancelled": "Canceled",
+ }.get(status, status)
+
+ def format_score_helper(self, score: float) -> str:
+ return friendly_score(score)
+
+ def percentage_helper(self, value: int, total: int) -> int:
+ return round((value * 100) / total)
+
+ def truncate_helper(self, length: int, text: str | None) -> str:
+ if not text:
+ return "-"
+ return text if len(text) <= length else f"{text[:length-1]}…"
+
+ def get_main_content_template(self) -> str:
+ raise NotImplementedError
+
+ def get_extra_css_template(self) -> str:
+ return ""
+
+ def get_body_template(self) -> str:
+ return """
+
+
+
+
+
+ DataKitchen
+
+
+
+
+
+
+
+
+
+
+
+
+
+ |
+
+ |
+
+
+
+ |
+
+ {{>main_content}}
+ |
+
+
+
+
+
+
+
+
+ |
+
+
+
+
+"""
diff --git a/testgen/common/notifications/profiling_run.py b/testgen/common/notifications/profiling_run.py
new file mode 100644
index 00000000..e97e494b
--- /dev/null
+++ b/testgen/common/notifications/profiling_run.py
@@ -0,0 +1,327 @@
+import logging
+from urllib.parse import quote
+
+from sqlalchemy import select
+
+from testgen.common.models import get_current_session, with_database_session
+from testgen.common.models.hygiene_issue import HygieneIssue
+from testgen.common.models.notification_settings import (
+ ProfilingRunNotificationSettings,
+ ProfilingRunNotificationTrigger,
+)
+from testgen.common.models.profiling_run import ProfilingRun
+from testgen.common.models.project import Project
+from testgen.common.models.settings import PersistedSetting
+from testgen.common.models.table_group import TableGroup
+from testgen.common.notifications.notifications import BaseNotificationTemplate
+from testgen.utils import log_and_swallow_exception
+
+LOG = logging.getLogger("testgen")
+
+
+class ProfilingRunEmailTemplate(BaseNotificationTemplate):
+
+ def get_subject_template(self) -> str:
+ return (
+ "[TestGen] Profiling Run {{format_status profiling_run.status}}: {{table_groups_name}}"
+ "{{#if issue_count}}"
+ ' | {{format_number issue_count}} hygiene {{pluralize issue_count "issue" "issues"}}'
+ "{{/if}}"
+ )
+
+ def get_title_template(self):
+ return """
+ TestGen Profiling Run - {{format_status profiling_run.status}}
+ """
+
+ def get_main_content_template(self):
+ return """
+
+
+
+ | Project |
+ {{project_name}} |
+ Schema |
+ {{table_group_schema}} |
+
+ View results on TestGen >
+ |
+
+
+
+ | Table Group |
+ {{table_groups_name}} |
+ Tables |
+ {{format_number profiling_run.table_ct}} |
+
+
+ | Start Time |
+ {{format_dt profiling_run.start_time}} |
+ Columns |
+ {{format_number profiling_run.column_ct}} |
+
+
+ | Duration |
+ {{format_duration profiling_run.start_time profiling_run.end_time}} |
+
+
+
+
+
+
+ | Issues Summary |
+ {{#if (eq profiling_run.status 'Complete')}}
+
+ View {{format_number issue_count}} issues >
+ |
+ {{/if}}
+
+
+ |
+ {{#if (eq profiling_run.status 'Complete')}}
+ {{#if (eq notification_trigger 'on_changes')}}
+ Profiling run detected new hygiene issues.
+ {{/if}}
+ {{#if (eq notification_trigger 'always')}}
+ {{#if issue_count}}
+ Profiling run detected hygiene issues.
+ {{/if}}
+ {{/if}}
+ {{/if}}
+ {{#if (eq profiling_run.status 'Error')}}
+ Profiling encountered an error.
+ {{/if}}
+ {{#if (eq profiling_run.status 'Cancelled')}}
+ Profiling run was canceled.
+ {{/if}}
+ |
+
+ {{#if (eq profiling_run.status 'Complete')}}
+
+
+
+
+
+ {{#each hygiene_issues_summary}}
+ |
+ {{priority}}
+ {{format_number count.active}}
+ |
+ {{/each}}
+
+
+ |
+
+ {{/if}}
+ {{#if (eq profiling_run.status 'Error')}}
+
+ {{profiling_run.log_message}} |
+
+ {{/if}}
+
+
+ {{#each hygiene_issues_summary}}
+ {{>result_table .}}
+ {{/each}}
+ """
+
+ def get_result_table_template(self):
+ return """
+ {{#if count.total}}
+
+
+
+ |
+ {{label}} |
+
+
+ View {{format_number count.total}} {{label}} >
+
+ |
+
+ {{#if (len issues)}}
+
+ |
+ Table |
+ Columns |
+ Issue |
+ Details |
+
+ {{#each issues}}
+
+ | {{#if is_new}}●{{/if}} |
+ {{truncate 30 table_name}} |
+ {{truncate 30 column_name}} |
+ {{issue_name}} |
+ {{truncate 50 detail}} |
+
+ {{/each}}
+
+ |
+
+ {{#if truncated}}
+ + {{truncated}} more
+ {{/if}}
+ |
+
+ ●
+ indicates new issues
+ |
+
+ {{/if}}
+
+
+ {{/if}}
+ """
+
+ def get_extra_css_template(self) -> str:
+ return """
+ .tg-summary-header td {
+ padding: 10px 0 10px 0;
+ font-size: 14px;
+ text-color: rgba(0, 0, 0, 0.54);
+ }
+ .tg-summary-counts td {
+ height: 32px;
+ border-left-width: 4px;
+ border-left-style: solid;
+ padding-left: 8px;
+ padding-right: 24px;
+ line-height: 1.2;
+ }
+ .tg-summary-counts-label {
+ font-size: 12px;
+ text-color: rgba(0, 0, 0, 0.54);
+ }
+ .tg-summary-counts-count {
+ font-size: 16px;
+ }
+ """
+
+
+@log_and_swallow_exception
+@with_database_session
+def send_profiling_run_notifications(profiling_run: ProfilingRun, result_list_ct=20):
+ notifications = list(
+ ProfilingRunNotificationSettings.select(enabled=True, table_group_id=profiling_run.table_groups_id)
+ )
+ if not notifications:
+ return
+
+ previous_run = profiling_run.get_previous()
+ issues = list(
+ HygieneIssue.select_with_diff(
+ profiling_run.id,
+ previous_run.id if previous_run else None,
+ limit=result_list_ct,
+ )
+ )
+
+ triggers = {ProfilingRunNotificationTrigger.always}
+ if profiling_run.status in ("Error", "Cancelled") or {None, True} & {is_new for _, is_new in issues}:
+ triggers.add(ProfilingRunNotificationTrigger.on_changes)
+
+ notifications = [ns for ns in notifications if ns.trigger in triggers]
+ if not notifications:
+ return
+
+ profiling_run_issues_url = "".join(
+ (PersistedSetting.get("BASE_URL", ""), "/profiling-runs:hygiene?run_id=", str(profiling_run.id))
+ )
+
+ hygiene_issues_summary = []
+ counts = HygieneIssue.select_count_by_priority(profiling_run.id)
+ for priority, likelihood, label in (
+ ("Definite", "Definite", "definite issues"),
+ ("Likely", "Likely", "likely issues"),
+ ("Possible", "Possible", "possible issues"),
+ ("High", "Potential PII", "potential PII - high risk"),
+ ("Moderate", "Potential PII", "potential PII - moderate risk"),
+ ):
+ context_issues = [
+ {
+ "is_new": is_new,
+ "detail": issue.detail,
+ "table_name": issue.table_name,
+ "column_name": issue.column_name,
+ "issue_name": issue.type_.name,
+ }
+ for issue, is_new in issues
+ if issue.priority == priority
+ ]
+
+ hygiene_issues_summary.append(
+ {
+ "label": label,
+ "priority": priority,
+ "url": f"{profiling_run_issues_url}&likelihood={quote(likelihood)}",
+ "count": counts[priority],
+ "issues": context_issues,
+ "truncated": counts[priority].active - len(context_issues),
+ }
+ )
+
+ labels_query = (
+ select(Project.project_name, TableGroup.table_groups_name, TableGroup.table_group_schema)
+ .select_from(TableGroup)
+ .join(Project)
+ .where(TableGroup.id == profiling_run.table_groups_id)
+ )
+ context = {
+ "profiling_run": {
+ "id": str(profiling_run.id),
+ "issues_url": profiling_run_issues_url,
+ "results_url": "".join(
+ (PersistedSetting.get("BASE_URL", ""), "/profiling-runs:results?run_id=", str(profiling_run.id))
+ ),
+ "start_time": profiling_run.profiling_starttime,
+ "end_time": profiling_run.profiling_endtime,
+ "status": profiling_run.status,
+ "log_message": profiling_run.log_message,
+ "table_ct": profiling_run.table_ct,
+ "column_ct": profiling_run.column_ct,
+ },
+ "issue_count": sum(c.total for c in counts.values()),
+ "hygiene_issues_summary": hygiene_issues_summary,
+ **dict(get_current_session().execute(labels_query).one()),
+ }
+
+ for ns in notifications:
+ try:
+ ProfilingRunEmailTemplate().send(
+ ns.recipients, {**context, "notification_trigger": ns.trigger.value}
+ )
+ except Exception:
+ LOG.exception("Failed sending test run email notifications")
diff --git a/testgen/common/notifications/score_drop.py b/testgen/common/notifications/score_drop.py
new file mode 100644
index 00000000..62dfe817
--- /dev/null
+++ b/testgen/common/notifications/score_drop.py
@@ -0,0 +1,206 @@
+import logging
+from collections import defaultdict
+
+from sqlalchemy import select
+
+from testgen.common.models import get_current_session, with_database_session
+from testgen.common.models.notification_settings import ScoreDropNotificationSettings
+from testgen.common.models.project import Project
+from testgen.common.models.scores import ScoreDefinition
+from testgen.common.models.settings import PersistedSetting
+from testgen.common.notifications.notifications import BaseNotificationTemplate
+from testgen.utils import log_and_swallow_exception
+
+LOG = logging.getLogger("testgen")
+
+
+class ScoreDropEmailTemplate(BaseNotificationTemplate):
+
+ def score_color_helper(self, score: float) -> str:
+ if score >= 0.96:
+ return "green"
+ if score >= 0.91:
+ return "yellow"
+ if score >= 0.86:
+ return "orange"
+ return "red"
+
+ def get_subject_template(self) -> str:
+ return (
+ "[TestGen] Quality Score Dropped: {{ definition.name }}"
+ "{{#each diff}}{{#if notify}} | {{ label }}: {{ format_score current }}{{/if}}{{/each}}"
+ )
+
+ def get_title_template(self):
+ return "Quality Score dropped below threshold"
+
+ def get_main_content_template(self):
+ return """
+
+
+
+ | Project |
+ {{project_name}} |
+
+ View on TestGen >
+ |
+
+
+ | Scorecard |
+ {{definition.name}} |
+
+
+ |
+ {{#each diff}}
+ {{#if notify}}
+ {{label}} score dropped below {{threshold}}.
+ {{/if}}
+ {{/each}}
+ |
+
+
+
+
+ {{#each diff}}
+ |
+ {{format_score current}}
+ {{label}} Score
+ {{#if decrease}}
+ ↓ {{format_score decrease}}
+ {{/if}}
+ {{#if increase}}
+ ↑ {{format_score increase}}
+ {{/if}}
+ |
+ |
+ {{/each}}
+
+
+
"""
+
+ def get_extra_css_template(self) -> str:
+ return """
+ .score {
+ display: block;
+ width: 100px;
+ height: 100px;
+ border-radius: 50%;
+ border-width: 4px;
+ border-style: solid;
+ text-align: center;
+ font-size: 14px;
+ }
+
+ .score__value {
+ margin-top: 22px;
+ margin-bottom: 2px;
+ font-size: 18px;
+ }
+
+ .score__label {
+ font-size: 14px;
+ color: rgba(0, 0, 0, 0.6);
+ }
+ """
+
+
+@log_and_swallow_exception
+@with_database_session
+def send_score_drop_notifications(notification_data: list[tuple[ScoreDefinition, str, float, float]]):
+
+ if not notification_data:
+ return
+
+ query = select(
+ ScoreDropNotificationSettings,
+ Project.project_name,
+ ).join(
+ Project, ScoreDropNotificationSettings.project_code == Project.project_code
+ ).where(
+ ScoreDropNotificationSettings.enabled.is_(True),
+ ScoreDropNotificationSettings.score_definition_id.in_({d.id for d, *_ in notification_data}),
+ )
+ ns_per_score_id = defaultdict(list)
+ project_name = None
+ for (ns, project_name) in get_current_session().execute(query).fetchall():
+ ns_per_score_id[ns.score_definition_id].append(ns)
+ project_name = project_name
+
+ diff_per_score_id = defaultdict(list)
+ for definition, *data in notification_data:
+ diff_per_score_id[definition.id].append((definition, *data))
+
+ for score_id in diff_per_score_id.keys() & ns_per_score_id.keys():
+ score_diff = diff_per_score_id[score_id]
+ definition = score_diff[0][0]
+ diff_per_cat = {cat: (prev, curr) for _, cat, prev, curr in score_diff}
+
+ for ns in ns_per_score_id[score_id]:
+
+ threshold_by_cat = {
+ "score": ns.total_score_threshold,
+ "cde_score": ns.cde_score_threshold,
+ }
+
+ context_diff = [
+ {
+ "category": cat,
+ "label": {"score": "Total", "cde_score": "CDE"}[cat],
+ "prev": diff[0],
+ "current": diff[1],
+ "threshold": threshold_by_cat[cat],
+ "decrease": max(diff[0] - diff[1], 0),
+ "increase": max(diff[1] - diff[0], 0),
+ "notify": (
+ diff[0] > diff[1]
+ and threshold_by_cat[cat] is not None
+ and diff[1] * 100 < threshold_by_cat[cat]
+ ),
+ }
+ for cat, diff in diff_per_cat.items()
+ ]
+
+ if not any(d["notify"] for d in context_diff):
+ continue
+
+ context = {
+ "project_name": project_name,
+ "definition": definition,
+ "scorecard_url": "".join(
+ (
+ PersistedSetting.get("BASE_URL", ""),
+ "/quality-dashboard:score-details?definition_id=",
+ str(definition.id),
+ )
+ ),
+ "diff": context_diff,
+ }
+
+ try:
+ ScoreDropEmailTemplate().send(ns.recipients, context)
+ except Exception:
+ LOG.exception("Failed sending test run email notifications")
+
+
+@log_and_swallow_exception
+def collect_score_notification_data(
+ notification_data: list[tuple[ScoreDefinition, str, float, float]],
+ definition: ScoreDefinition,
+ fresh_score_card: dict,
+) -> None:
+ notification_data.extend(
+ [
+ (definition, r.category, r.score, fresh_score_card[r.category])
+ for r in definition.results
+ if r.category in ("score", "cde_score") and r.score is not None and fresh_score_card[r.category] is not None
+ ]
+ )
diff --git a/testgen/common/notifications/test_run.py b/testgen/common/notifications/test_run.py
new file mode 100644
index 00000000..7a24578f
--- /dev/null
+++ b/testgen/common/notifications/test_run.py
@@ -0,0 +1,344 @@
+import logging
+
+from sqlalchemy import case, literal, select
+
+from testgen.common.models import get_current_session, with_database_session
+from testgen.common.models.notification_settings import TestRunNotificationSettings, TestRunNotificationTrigger
+from testgen.common.models.settings import PersistedSetting
+from testgen.common.models.test_definition import TestType
+from testgen.common.models.test_result import TestResult, TestResultStatus
+from testgen.common.models.test_run import TestRun
+from testgen.common.notifications.notifications import BaseNotificationTemplate
+from testgen.utils import log_and_swallow_exception
+
+LOG = logging.getLogger("testgen")
+
+
+class TestRunEmailTemplate(BaseNotificationTemplate):
+
+ def get_subject_template(self) -> str:
+ return (
+ "[TestGen] Test Run {{format_status test_run.status}}: {{test_run.test_suite}}"
+ "{{#with test_run}}"
+ '{{#if failed_ct}} | {{format_number failed_ct}} {{pluralize failed_ct "failure" "failures"}}{{/if}}'
+ '{{#if warning_ct}} | {{format_number warning_ct}} {{pluralize warning_ct "warning" "warnings"}}{{/if}}'
+ '{{#if error_ct}} | {{format_number error_ct}} {{pluralize error_ct "error" "errors"}}{{/if}}'
+ "{{/with}}"
+ )
+
+ def get_title_template(self):
+ return """
+ TestGen Test Run - {{format_status test_run.status}}
+ """
+
+ def get_main_content_template(self):
+ return """
+
+
+
+ | Project |
+ {{test_run.project_name}} |
+
+
+ | Table Group |
+ {{test_run.table_groups_name}} |
+
+
+ | Test Suite |
+ {{test_run.test_suite}} |
+
+
+ | Start Time |
+ {{format_dt test_run.test_starttime}} |
+
+
+ | Duration |
+ {{format_duration test_run.test_starttime test_run.test_endtime}} |
+
+
+
+
+
+
+ | Results Summary |
+ {{#if (eq test_run.status 'Complete')}}
+
+ View on TestGen >
+ |
+ {{/if}}
+
+
+ |
+ {{#if (eq test_run.status 'Complete')}}
+ {{#if (eq notification_trigger 'on_changes')}}
+ Test run has new failures, warnings, or errors.
+ {{/if}}
+ {{#if (eq notification_trigger 'on_failures')}}
+ Test run has failures or errors.
+ {{/if}}
+ {{#if (eq notification_trigger 'on_warnings')}}
+ Test run has failures, warnings, or errors.
+ {{/if}}
+ {{/if}}
+ {{#if (eq test_run.status 'Error')}}
+ Test execution encountered an error.
+ {{/if}}
+ {{#if (eq test_run.status 'Cancelled')}}
+ Test run was canceled.
+ {{/if}}
+ |
+
+ {{#if (eq test_run.status 'Complete')}}
+
+ |
+
+
+
+ ●
+ Passed: {{format_number test_run.passed_ct}}
+
+
+ ●
+ Warning: {{format_number test_run.warning_ct}}
+
+
+ ●
+ Failed: {{format_number test_run.failed_ct}}
+
+
+ ●
+ Error: {{format_number test_run.error_ct}}
+
+
+ ●
+ Log: {{format_number test_run.log_ct}}
+
+
+ |
+
+ {{/if}}
+ {{#if (eq test_run.status 'Error')}}
+
+ {{test_run.log_message}} |
+
+ {{/if}}
+
+
+ {{#each test_result_summary}}
+ {{>result_table .}}
+ {{/each}}
+ """
+
+ def get_result_table_template(self):
+ return """
+ {{#if total}}
+
+
+
+ |
+ {{label}} |
+
+
+ View {{format_number total}} {{label}} >
+
+ |
+
+
+ |
+ Table |
+ Columns/Focus |
+ Test Type |
+ Details |
+
+ {{#each result_list}}
+
+ | {{#if is_new}}●{{/if}} |
+ {{truncate 30 table_name}} |
+ {{truncate 30 column_names}} |
+ {{test_type}} |
+ {{truncate 50 message}} |
+
+ {{/each}}
+
+ |
+
+ {{#if truncated}}
+ + {{truncated}} more
+ {{/if}}
+ |
+
+ ●
+ indicates new {{label}}
+ |
+
+
+
+ {{/if}}
+ """
+
+ def get_extra_css_template(self) -> str:
+ return """
+ .tg-summary-bar {
+ width: 350px;
+ border-radius: 4px;
+ overflow: hidden;
+ }
+
+ .tg-summary-bar td {
+ height: 10px;
+ padding: 0;
+ line-height: 10px;
+ font-size: 0;
+ }
+
+ .tg-summary-bar--caption {
+ margin-top: 4px;
+ color: var(--caption-text-color);
+ font-size: 13px;
+ font-style: italic;
+ line-height: 1;
+ }
+
+ .tg-summary-bar--legend {
+ width: auto;
+ margin-right: 8px;
+ }
+
+ .tg-summary-bar--legend-dot {
+ margin-right: 2px;
+ font-style: normal;
+ }
+ """
+
+
+@log_and_swallow_exception
+@with_database_session
+def send_test_run_notifications(test_run: TestRun, result_list_ct=20, result_status_min=5):
+
+ notifications = list(TestRunNotificationSettings.select(enabled=True, test_suite_id=test_run.test_suite_id))
+ if not notifications:
+ return
+
+ changed_td_id_list = []
+ if previous_run := test_run.get_previous():
+ for _, status, td_id_list in TestResult.diff(previous_run.id, test_run.id):
+ if status in (TestResultStatus.Failed, TestResultStatus.Warning, TestResultStatus.Error):
+ changed_td_id_list.extend(td_id_list)
+
+ triggers = {TestRunNotificationTrigger.always}
+ if test_run.status in ("Error", "Cancelled"):
+ triggers.update(TestRunNotificationTrigger)
+ else:
+ if test_run.error_ct + test_run.failed_ct:
+ triggers.update({TestRunNotificationTrigger.on_failures, TestRunNotificationTrigger.on_warnings})
+ elif test_run.warning_ct:
+ triggers.add(TestRunNotificationTrigger.on_warnings)
+ if changed_td_id_list:
+ triggers.add(TestRunNotificationTrigger.on_changes)
+
+ notifications = [ns for ns in notifications if ns.trigger in triggers]
+ if not notifications:
+ return
+
+ result_list_by_status = {}
+ summary_statuses = (
+ (TestResultStatus.Failed, "failures"),
+ (TestResultStatus.Warning, "warnings"),
+ (TestResultStatus.Error, "errors"),
+ )
+
+ changed_case = case(
+ (TestResult.test_definition_id.in_(changed_td_id_list), literal(True)),
+ else_=literal(False),
+ )
+ result_count_by_status = {
+ status: min(result_status_min, test_run.ct_by_status[status])
+ for status, _ in summary_statuses
+ }
+
+ for status, _ in summary_statuses:
+ result_count_by_status[status] += min(
+ (
+ result_list_ct - sum(result_count_by_status.values()),
+ test_run.ct_by_status[status] - result_count_by_status[status],
+ )
+ )
+
+ if not result_count_by_status[status]:
+ continue
+
+ query = (
+ select(
+ TestResult.table_name,
+ TestResult.column_names,
+ TestResult.message,
+ changed_case.label("is_new"),
+ TestType.test_name_short.label("test_type"),
+ )
+ .join(TestType, TestType.test_type == TestResult.test_type)
+ .where(TestResult.test_run_id == test_run.id, TestResult.status == status)
+ .order_by(changed_case.desc())
+ .limit(result_count_by_status[status])
+ )
+
+ result_list_by_status[status] = [{**r} for r in get_current_session().execute(query)]
+
+ tr_summary, = TestRun.select_summary(test_run_ids=[test_run.id])
+
+ context = {
+ "test_run": tr_summary,
+ "test_run_url": "".join(
+ (
+ PersistedSetting.get("BASE_URL", ""),
+ "/test-runs:results?run_id=",
+ str(test_run.id),
+ )
+ ),
+ "test_run_id": str(test_run.id),
+ "test_result_summary": [
+ {
+ "status": status.value,
+ "label": label,
+ "total": test_run.ct_by_status[status],
+ "truncated": test_run.ct_by_status[status] - len(result_list),
+ "result_list": result_list,
+ }
+ for status, label in summary_statuses
+ if (result_list := result_list_by_status.get(status, None))
+ ]
+ }
+
+ for ns in notifications:
+ try:
+ TestRunEmailTemplate().send(
+ ns.recipients, {**context, "notification_trigger": ns.trigger.value}
+ )
+ except Exception:
+ LOG.exception("Failed sending test run email notifications")
diff --git a/testgen/template/dbsetup/030_initialize_new_schema_structure.sql b/testgen/template/dbsetup/030_initialize_new_schema_structure.sql
index 2df5365e..1880e323 100644
--- a/testgen/template/dbsetup/030_initialize_new_schema_structure.sql
+++ b/testgen/template/dbsetup/030_initialize_new_schema_structure.sql
@@ -863,3 +863,33 @@ CREATE TABLE settings (
INSERT INTO tg_revision (component, revision)
VALUES ('metadata_db', 0);
+
+CREATE TABLE notification_settings (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ project_code VARCHAR(30) NOT NULL,
+
+ event VARCHAR(20) NOT NULL,
+ enabled BOOLEAN NOT NULL DEFAULT TRUE,
+ recipients JSONB NOT NULL DEFAULT '[]'::jsonb,
+
+ test_suite_id UUID NULL DEFAULT NULL,
+ table_group_id UUID NULL DEFAULT NULL,
+ score_definition_id UUID NULL DEFAULT NULL,
+
+ settings JSONB NOT NULL DEFAULT '{}'::jsonb,
+
+ CONSTRAINT fk_notification_settings_test_suite
+ FOREIGN KEY (test_suite_id)
+ REFERENCES test_suites (id)
+ ON DELETE CASCADE,
+
+ CONSTRAINT fk_notification_settings_table_group
+ FOREIGN KEY (table_group_id)
+ REFERENCES table_groups (id)
+ ON DELETE CASCADE,
+
+ CONSTRAINT fk_notification_settings_score_definition
+ FOREIGN KEY (score_definition_id)
+ REFERENCES score_definitions (id)
+ ON DELETE CASCADE
+);
diff --git a/testgen/template/dbsetup_test_types/test_types_CUSTOM.yaml b/testgen/template/dbsetup_test_types/test_types_CUSTOM.yaml
index bdbbc883..9c404a15 100644
--- a/testgen/template/dbsetup_test_types/test_types_CUSTOM.yaml
+++ b/testgen/template/dbsetup_test_types/test_types_CUSTOM.yaml
@@ -23,7 +23,7 @@ test_types:
default_parm_prompts: |-
Custom SQL Query Returning Error Records
default_parm_help: |-
- Query should return records indicating one or more errors. The test passes if no records are returned. Results of the query will be shown when you click `Review Source Data` for a failed test, so be sure to include enough data in your results to follow-up. \n\nA query can refer to any tables in the database. You must hard-code the schema or use `{DATA_SCHEMA}` to represent the schema defined for the Table Group.
+ Query should return records indicating one or more errors. The test passes if no records are returned. Results of the query will be shown when you click `Review Source Data` for a failed test, so be sure to include enough data in your results to follow-up. A query can refer to any tables in the database. You must hard-code the schema or use `{DATA_SCHEMA}` to represent the schema defined for the Table Group.
default_severity: Fail
run_type: QUERY
test_scope: custom
diff --git a/testgen/template/dbupgrade/0161_incremental_upgrade.sql b/testgen/template/dbupgrade/0161_incremental_upgrade.sql
new file mode 100644
index 00000000..d520d324
--- /dev/null
+++ b/testgen/template/dbupgrade/0161_incremental_upgrade.sql
@@ -0,0 +1,31 @@
+SET SEARCH_PATH TO {SCHEMA_NAME};
+
+CREATE TABLE notification_settings (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ project_code VARCHAR(30) NOT NULL,
+
+ event VARCHAR(20) NOT NULL,
+ enabled BOOLEAN NOT NULL DEFAULT TRUE,
+ recipients JSONB NOT NULL DEFAULT '[]'::jsonb,
+
+ test_suite_id UUID NULL DEFAULT NULL,
+ table_group_id UUID NULL DEFAULT NULL,
+ score_definition_id UUID NULL DEFAULT NULL,
+
+ settings JSONB NOT NULL DEFAULT '{}'::jsonb,
+
+ CONSTRAINT fk_notification_settings_test_suite
+ FOREIGN KEY (test_suite_id)
+ REFERENCES test_suites (id)
+ ON DELETE CASCADE,
+
+ CONSTRAINT fk_notification_settings_table_group
+ FOREIGN KEY (table_group_id)
+ REFERENCES table_groups (id)
+ ON DELETE CASCADE,
+
+ CONSTRAINT fk_notification_settings_score_definition
+ FOREIGN KEY (score_definition_id)
+ REFERENCES score_definitions (id)
+ ON DELETE CASCADE
+);
diff --git a/testgen/template/execution/update_historic_thresholds.sql b/testgen/template/execution/update_historic_thresholds.sql
index 51d4340a..4d3fbbeb 100644
--- a/testgen/template/execution/update_historic_thresholds.sql
+++ b/testgen/template/execution/update_historic_thresholds.sql
@@ -1,27 +1,71 @@
-WITH stats AS (
- SELECT
- d.id AS test_definition_id,
- COALESCE(
- MIN(r.result_signal) FILTER (WHERE d.history_calculation = 'Value'),
- MIN(r.result_signal::NUMERIC) FILTER (WHERE d.history_calculation = 'Minimum')::VARCHAR,
- MAX(r.result_signal::NUMERIC) FILTER (WHERE d.history_calculation = 'Maximum')::VARCHAR,
- SUM(r.result_signal::NUMERIC) FILTER (WHERE d.history_calculation = 'Sum')::VARCHAR,
- AVG(r.result_signal::NUMERIC) FILTER (WHERE d.history_calculation = 'Average')::VARCHAR
- ) as calc_signal
- FROM test_definitions d
- INNER JOIN LATERAL (
- SELECT result_signal
- FROM test_results tr
- WHERE tr.test_definition_id = d.id
- ORDER BY tr.test_time DESC
- LIMIT CASE WHEN d.history_calculation = 'Value' THEN 1 ELSE d.history_lookback END
- ) AS r ON TRUE
- WHERE d.test_suite_id = :TEST_SUITE_ID
- AND d.test_active = 'Y'
- AND d.history_lookback IS NOT NULL
- GROUP BY d.id, d.history_calculation, d.history_lookback
+WITH filtered_defs AS (
+ -- Step 1: Filter definitions first to minimize join surface area
+ SELECT id,
+ test_suite_id,
+ schema_name,
+ table_name,
+ column_name,
+ test_type,
+ history_calculation,
+ CASE WHEN history_calculation = 'Value' THEN 1 ELSE COALESCE(history_lookback, 1) END AS lookback
+ FROM test_definitions
+ WHERE test_suite_id = :TEST_SUITE_ID
+ AND test_active = 'Y'
+ AND history_calculation IS NOT NULL
+ AND history_lookback IS NOT NULL
+),
+normalized_results AS (
+ -- Step 2: Normalize definition IDs for autogenerated tests
+ SELECT CASE
+ WHEN r.auto_gen THEN d.id
+ ELSE r.test_definition_id
+ END AS test_definition_id,
+ r.test_time,
+ r.result_signal
+ FROM test_results r
+ LEFT JOIN filtered_defs d ON r.auto_gen = TRUE
+ AND r.test_suite_id = d.test_suite_id
+ AND r.schema_name = d.schema_name
+ AND r.table_name IS NOT DISTINCT FROM d.table_name
+ AND r.column_names IS NOT DISTINCT FROM d.column_name
+ AND r.test_type = d.test_type
+ WHERE r.test_suite_id = :TEST_SUITE_ID
+),
+ranked_results AS (
+ -- Step 3: Use a Window Function to get the N most recent results
+ SELECT n.test_definition_id,
+ n.result_signal,
+ CASE
+ WHEN n.result_signal ~ '^-?[0-9]*\.?[0-9]+$' THEN n.result_signal::NUMERIC
+ ELSE NULL
+ END AS signal_numeric,
+ ROW_NUMBER() OVER (PARTITION BY n.test_definition_id ORDER BY n.test_time DESC) AS rank
+ FROM normalized_results n
+ WHERE n.test_definition_id IN (SELECT id FROM filtered_defs)
+),
+stats AS (
+ -- Step 4: Aggregate only the rows within the lookback range
+ SELECT d.id AS test_definition_id,
+ d.history_calculation,
+ MAX(CASE WHEN rr.rank = 1 THEN rr.result_signal END) AS val,
+ MIN(rr.signal_numeric) AS min,
+ MAX(rr.signal_numeric) AS max,
+ SUM(rr.signal_numeric) AS sum,
+ AVG(rr.signal_numeric) AS avg
+ FROM filtered_defs d
+ JOIN ranked_results rr ON d.id = rr.test_definition_id
+ WHERE rr.rank <= d.lookback
+ GROUP BY d.id,
+ d.history_calculation
)
UPDATE test_definitions t
-SET baseline_value = s.calc_signal
+SET baseline_value = CASE
+ WHEN s.history_calculation = 'Value' THEN s.val
+ WHEN s.history_calculation = 'Minimum' THEN s.min::VARCHAR
+ WHEN s.history_calculation = 'Maximum' THEN s.max::VARCHAR
+ WHEN s.history_calculation = 'Sum' THEN s.sum::VARCHAR
+ WHEN s.history_calculation = 'Average' THEN s.avg::VARCHAR
+ ELSE NULL
+ END
FROM stats s
-WHERE t.id = s.test_definition_id;
\ No newline at end of file
+WHERE t.id = s.test_definition_id;
diff --git a/testgen/ui/components/frontend/js/components/empty_state.js b/testgen/ui/components/frontend/js/components/empty_state.js
index 67d7b677..1ac1f55e 100644
--- a/testgen/ui/components/frontend/js/components/empty_state.js
+++ b/testgen/ui/components/frontend/js/components/empty_state.js
@@ -54,6 +54,10 @@ const EMPTY_STATE_MESSAGE = {
line1: 'Track data quality scores',
line2: 'Filter or select columns to assess the quality of your data assets across different categories.',
},
+ notifications: {
+ line1: '',
+ line2: 'Configure an SMTP email server for TestGen to get alerts on profiling runs, test runs, and quality scorecards.',
+ },
};
const EmptyState = (/** @type Properties */ props) => {
diff --git a/testgen/ui/components/frontend/js/components/expansion_panel.js b/testgen/ui/components/frontend/js/components/expansion_panel.js
index 777cdf0d..40f38bf5 100644
--- a/testgen/ui/components/frontend/js/components/expansion_panel.js
+++ b/testgen/ui/components/frontend/js/components/expansion_panel.js
@@ -3,6 +3,7 @@
* @type {object}
* @property {string} title
* @property {string?} testId
+ * @property {bool} expanded
*/
import van from '../van.min.js';
@@ -12,14 +13,14 @@ import { Icon } from './icon.js';
const { div, span } = van.tags;
/**
- *
+ *
* @param {Options} options
- * @param {...HTMLElement} children
+ * @param {...HTMLElement} children
*/
const ExpansionPanel = (options, ...children) => {
loadStylesheet('expansion-panel', stylesheet);
- const expanded = van.state(false);
+ const expanded = van.state(options.expanded ?? false);
const icon = van.derive(() => expanded.val ? 'keyboard_arrow_up' : 'keyboard_arrow_down');
const expansionClass = van.derive(() => expanded.val ? '' : 'collapsed');
diff --git a/testgen/ui/components/frontend/js/components/input.js b/testgen/ui/components/frontend/js/components/input.js
index 5b77975f..130aba5c 100644
--- a/testgen/ui/components/frontend/js/components/input.js
+++ b/testgen/ui/components/frontend/js/components/input.js
@@ -1,12 +1,12 @@
/**
* @import { Properties as TooltipProperties } from './tooltip.js';
* @import { Validator } from '../form_validators.js';
- *
+ *
* @typedef InputState
* @type {object}
* @property {boolean} valid
* @property {string[]} errors
- *
+ *
* @typedef Properties
* @type {object}
* @property {string?} id
@@ -32,6 +32,7 @@
* @property {string?} class
* @property {string?} testId
* @property {any?} prefix
+ * @property {number} step
* @property {Array?} validators
*/
import van from '../van.min.js';
@@ -136,6 +137,7 @@ const Input = (/** @type Properties */ props) => {
name: props.name ?? '',
type: inputType,
disabled: props.disabled,
+ ...(inputType.val === 'number' ? {step: getValue(props.step)} : {}),
...(props.readonly ? {readonly: true} : {}),
...(props.passwordSuggestions ?? true ? {} : {autocomplete: 'off', 'data-op-ignore': true}),
placeholder: () => getValue(props.placeholder) ?? '',
@@ -187,7 +189,7 @@ const Input = (/** @type Properties */ props) => {
);
},
),
- () =>
+ () =>
isDirty.val && firstError.val
? small({ class: 'tg-input--error' }, firstError)
: '',
diff --git a/testgen/ui/components/frontend/js/components/textarea.js b/testgen/ui/components/frontend/js/components/textarea.js
index 18165312..828d8c86 100644
--- a/testgen/ui/components/frontend/js/components/textarea.js
+++ b/testgen/ui/components/frontend/js/components/textarea.js
@@ -13,6 +13,8 @@
* @property {function(string, InputState)?} onChange
* @property {string?} style
* @property {string?} class
+ * @property {number?} width
+ * @property {number?} height
* @property {string?} testId
*/
import van from '../van.min.js';
diff --git a/testgen/ui/components/frontend/js/components/tooltip.js b/testgen/ui/components/frontend/js/components/tooltip.js
index 6663afb1..e3b23a39 100644
--- a/testgen/ui/components/frontend/js/components/tooltip.js
+++ b/testgen/ui/components/frontend/js/components/tooltip.js
@@ -3,11 +3,13 @@
// https://cdn.jsdelivr.net/npm/vanjs-ui@0.10.0/dist/van-ui.nomodule.js
/**
+ * @typedef {'top-left' | 'top' | 'top-right' | 'right' | 'bottom-right' | 'bottom' | 'bottom-left' | 'left'} TooltipPosition
+ *
* @typedef Properties
* @type {object}
* @property {string} text
* @property {boolean} show
- * @property {('top-left' | 'top' | 'top-right' | 'right' | 'bottom-right' | 'bottom' | 'bottom-left' | 'left')?} position
+ * @property {TooltipPosition?} position
* @property {number} width
* @property {string?} style
*/
diff --git a/testgen/ui/components/frontend/js/components/truncated_text.js b/testgen/ui/components/frontend/js/components/truncated_text.js
new file mode 100644
index 00000000..c5d50241
--- /dev/null
+++ b/testgen/ui/components/frontend/js/components/truncated_text.js
@@ -0,0 +1,39 @@
+/**
+ * @import { TooltipPosition } from './tooltip.js';
+ *
+ * @typedef TruncatedTextOptions
+ * @type {object}
+ * @property {number} max
+ * @property {string?} class
+ * @property {TooltipPosition?} tooltipPosition
+ */
+import van from '../van.min.js';
+import { withTooltip } from './tooltip.js';
+import { caseInsensitiveSort } from '../display_utils.js';
+
+const { div, span, i } = van.tags;
+
+/**
+ * @param {TruncatedTextOptions} options
+ * @param {string[]} children
+ */
+const TruncatedText = ({ max, ...options }, ...children) => {
+ const sortedChildren = [...children.sort((a, b) => a.length - b.length)];
+ const tooltipText = children.sort(caseInsensitiveSort).join(', ');
+
+ return div(
+ { class: () => `${options.class ?? ''}`, style: 'position: relative;' },
+ span(sortedChildren.slice(0, max).join(', ')),
+ sortedChildren.length > max
+ ? withTooltip(
+ i({class: 'text-caption'}, ` + ${sortedChildren.length - max} more`),
+ {
+ text: tooltipText,
+ position: options.tooltipPosition,
+ }
+ )
+ : '',
+ );
+};
+
+export { TruncatedText };
diff --git a/testgen/ui/components/frontend/js/form_validators.js b/testgen/ui/components/frontend/js/form_validators.js
index 0d9d78ad..635b8b6a 100644
--- a/testgen/ui/components/frontend/js/form_validators.js
+++ b/testgen/ui/components/frontend/js/form_validators.js
@@ -14,7 +14,7 @@ function required(value) {
}
/**
- * @param {(v: any) => bool} condition
+ * @param {(v: any) => bool} condition
* @returns {Validator}
*/
function requiredIf(condition) {
@@ -37,8 +37,8 @@ function noSpaces(value) {
}
/**
- *
- * @param {number} min
+ *
+ * @param {number} min
* @returns {Validator}
*/
function minLength(min) {
@@ -51,8 +51,8 @@ function minLength(min) {
}
/**
- *
- * @param {number} max
+ *
+ * @param {number} max
* @returns {Validator}
*/
function maxLength(max) {
@@ -64,10 +64,43 @@ function maxLength(max) {
};
}
+/**
+ * @param {number} min
+ * @param {number} max
+ * @param {number} [precision]
+ * @returns {Validator}
+ */
+function numberBetween(min, max, precision = null) {
+ return (value) => {
+ const valueNumber = parseFloat(value);
+ if (isNaN(valueNumber)) {
+ return 'Value must be a numeric type.';
+ }
+
+ if (valueNumber < min || valueNumber > max) {
+ return `Value must be between ${min} and ${max}.`;
+ }
+
+ if (precision !== null) {
+ const strValue = value.toString();
+ const decimalPart = strValue.includes('.') ? strValue.split('.')[1] : '';
+
+ if (decimalPart.length > precision) {
+ if (precision === 0) {
+ return 'Value must be an integer.';
+ } else {
+ return `Value must have at most ${precision} digits after the decimal point.`;
+ }
+ }
+ }
+ };
+}
+
+
/**
* To use with FileInput, enforce a cap on file size
* allowed to upload.
- *
+ *
* @param {number} limit
* @returns {Validator}
*/
@@ -90,6 +123,7 @@ function sizeLimit(limit) {
export {
maxLength,
minLength,
+ numberBetween,
noSpaces,
required,
requiredIf,
diff --git a/testgen/ui/components/frontend/js/main.js b/testgen/ui/components/frontend/js/main.js
index 3fb3bca0..0cf78e3a 100644
--- a/testgen/ui/components/frontend/js/main.js
+++ b/testgen/ui/components/frontend/js/main.js
@@ -35,6 +35,7 @@ import { TableGroupDeleteConfirmation } from './pages/table_group_delete_confirm
import { RunProfilingDialog } from './pages/run_profiling_dialog.js';
import { ConfirmationDialog } from './pages/confirmation_dialog.js';
import { TestDefinitionSummary } from './pages/test_definition_summary.js';
+import { NotificationSettings } from './pages/notification_settings.js';
let currentWindowVan = van;
let topWindowVan = window.top.van;
@@ -68,6 +69,7 @@ const TestGenComponent = (/** @type {string} */ id, /** @type {object} */ props)
run_profiling_dialog: RunProfilingDialog,
confirm_dialog: ConfirmationDialog,
test_definition_summary: TestDefinitionSummary,
+ notification_settings: NotificationSettings,
};
if (Object.keys(window.testgen.plugins).includes(id)) {
diff --git a/testgen/ui/components/frontend/js/pages/notification_settings.js b/testgen/ui/components/frontend/js/pages/notification_settings.js
new file mode 100644
index 00000000..77c1374b
--- /dev/null
+++ b/testgen/ui/components/frontend/js/pages/notification_settings.js
@@ -0,0 +1,365 @@
+/**
+ * @typedef NotificationItem
+ * @type {object}
+ * @property {String?} scope
+ * @property {String?} total_score_threshold
+ * @property {String?} cde_score_threshold
+ * @property {string[]} recipients
+ * @property {string} trigger
+ * @property {boolean} enabled
+ *
+ * @typedef Permissions
+ * @type {object}
+ * @property {boolean} can_edit
+ *
+ * @typedef Result
+ * @type {object}
+ * @property {boolean} success
+ * @property {string} message
+ *
+ * @typedef Properties
+ * @type {object}
+ * @property {Boolean} smtp_configured
+ * @property {String} event
+ * @property {NotificationItem[]} items
+ * @property {Permissions} permissions
+ * @property {String} scope_label
+ * @property {import('../components/select.js').Option[]} scope_options
+ * @property {import('../components/select.js').Option[]} trigger_options
+ * @property {Boolean} cde_enabled;
+ * @property {Boolean} total_enabled;
+ * @property {Result?} result
+ */
+import van from '../van.min.js';
+import { Button } from '../components/button.js';
+import { Streamlit } from '../streamlit.js';
+import { emitEvent, getValue, loadStylesheet } from '../utils.js';
+import { ExpansionPanel } from '../components/expansion_panel.js';
+import { Select } from '../components/select.js';
+import { Alert } from '../components/alert.js';
+import { Textarea } from '../components/textarea.js';
+import { Icon } from '../components/icon.js';
+import { TruncatedText } from '../components/truncated_text.js';
+import { Input } from '../components/input.js';
+import { numberBetween } from '../form_validators.js';
+import { EmptyState, EMPTY_STATE_MESSAGE } from '../components/empty_state.js';
+
+const minHeight = 500;
+const { div, span, b } = van.tags;
+
+const NotificationSettings = (/** @type Properties */ props) => {
+ loadStylesheet('notification-settings', stylesheet);
+ window.testgen.isPage = true;
+
+ if (!getValue(props.smtp_configured)) {
+ Streamlit.setFrameHeight(400);
+ return EmptyState({
+ label: 'Email server not configured.',
+ message: EMPTY_STATE_MESSAGE.notifications,
+ class: 'notifications--empty',
+ link: {
+ label: 'View documentation',
+ href: 'https://docs.datakitchen.io/articles/dataops-testgen-help/configure-email-server',
+ open_new: true,
+ },
+ });
+ }
+
+ const nsItems = van.derive(() => {
+ const items = getValue(props.items);
+ Streamlit.setFrameHeight(Math.max(minHeight, 70 * items.length || 150));
+ return items;
+ });
+
+ const event = getValue(props.event);
+ const cdeScoreEnabled = getValue(props.cde_enabled);
+ const totalScoreEnabled = getValue(props.total_enabled);
+ const scopeOptions = getValue(props.scope_options);
+
+ const scopeLabel = (scope) => {
+ const match = scopeOptions.find(([key]) => key === scope);
+ return match ? match[1] : '';
+ }
+
+ const triggerOptions = getValue(props.trigger_options);
+
+ const triggerLabel = (trigger) => {
+ const match = triggerOptions.find(([key]) => key === trigger);
+ return match ? match[1] : '';
+ }
+
+ const newNotificationItemForm = {
+ id: van.state(null),
+ scope: van.state(null),
+ recipientsString: van.state(''),
+ trigger: van.state(triggerOptions ? triggerOptions[0][0] : null),
+ totalScoreThreshold: van.state(0),
+ cdeScoreThreshold: van.state(0),
+ isEdit: van.state(false),
+ };
+
+ const resetForm = () => {
+ newNotificationItemForm.id.val = null ;
+ newNotificationItemForm.scope.val = null;
+ newNotificationItemForm.recipientsString.val = '';
+ newNotificationItemForm.trigger.val = triggerOptions ? triggerOptions[0][0]: null;
+ newNotificationItemForm.totalScoreThreshold.val = 0;
+ newNotificationItemForm.cdeScoreThreshold.val = 0;
+ newNotificationItemForm.isEdit.val = false;
+ }
+
+ van.derive(() => {
+ if (getValue(props.result)?.success) {
+ resetForm();
+ }
+ });
+
+ const NotificationItem = (
+ /** @type NotificationItem */ item,
+ /** @type number[] */ columns,
+ /** @type Permissions */ permissions,
+ ) => {
+ const showTotalScore = totalScoreEnabled && item.total_score_threshold !== '0.0';
+ const showCdeScore = cdeScoreEnabled && item.cde_score_threshold !== '0.0';
+ return div(
+ { class: () => `table-row flex-row ${newNotificationItemForm.isEdit.val && newNotificationItemForm.id.val === item.id ? 'notifications--editing-row' : ''}` },
+ event === 'score_drop'
+ ? div(
+ { style: `flex: ${columns[0]}%`, class: 'flex-column fx-gap-1 score-threshold' },
+ showTotalScore ? div('Total score: ', b(item.total_score_threshold)) : '',
+ showCdeScore ? div(`${showTotalScore ? 'or ' : ''}CDE score: `, b(item.cde_score_threshold)) : '',
+ )
+ : div(
+ { style: `flex: ${columns[0]}%` },
+ div(scopeLabel(item.scope)),
+ div({ class: 'text-caption mt-1' }, triggerLabel(item.trigger)),
+ ),
+ div(
+ { style: `flex: ${columns[1]}%` },
+ TruncatedText({ max: 6 }, ...item.recipients),
+ ),
+ div(
+ { class: 'flex-row fx-gap-2', style: `flex: ${columns[2]}%` },
+ permissions.can_edit
+ ? (newNotificationItemForm.isEdit.val && newNotificationItemForm.id.val === item.id
+ ? div(
+ { class: 'flex-row fx-gap-1' },
+ Icon({ size: 18, classes: 'notifications--editing' }, 'edit'),
+ span({ class: 'notifications--editing' }, 'Editing'),
+ )
+ : [
+ item.enabled
+ ? Button({
+ type: 'stroked',
+ icon: 'pause',
+ tooltip: 'Pause notification',
+ style: 'height: 32px;',
+ onclick: () => emitEvent('PauseNotification', { payload: item }),
+ })
+ : Button({
+ type: 'stroked',
+ icon: 'play_arrow',
+ tooltip: 'Resume notification',
+ style: 'height: 32px;',
+ onclick: () => emitEvent('ResumeNotification', { payload: item }),
+ }),
+ Button({
+ type: 'stroked',
+ icon: 'edit',
+ tooltip: 'Edit notification',
+ style: 'height: 32px;',
+ onclick: () => {
+ newNotificationItemForm.isEdit.val = true;
+ newNotificationItemForm.id.val = item.id;
+ newNotificationItemForm.recipientsString.val = item.recipients.join(', ');
+ if (event === 'score_drop') {
+ newNotificationItemForm.totalScoreThreshold.val = item.total_score_threshold;
+ newNotificationItemForm.cdeScoreThreshold.val = item.cde_score_threshold;
+ } else {
+ newNotificationItemForm.scope.val = item.scope;
+ newNotificationItemForm.trigger.val = item.trigger;
+ }
+ },
+ }),
+ Button({
+ type: 'stroked',
+ icon: 'delete',
+ tooltip: 'Delete notification',
+ tooltipPosition: 'top-left',
+ style: 'height: 32px;',
+ onclick: () => emitEvent('DeleteNotification', { payload: item }),
+ }),
+ ]) : null,
+ ),
+ );
+ }
+
+ const columns = [30, 50, 20];
+ const domId = 'notifications-table';
+
+ return div(
+ { id: domId, class: 'flex-column fx-gap-2', style: 'height: 100%; overflow-y: auto;' },
+ () => ExpansionPanel(
+ {
+ title: newNotificationItemForm.isEdit.val
+ ? span({ class: 'notifications--editing' }, 'Edit Notification')
+ : 'Add Notification',
+ testId: 'notification-item-editor',
+ expanded: newNotificationItemForm.isEdit.val,
+ },
+ div(
+ { class: 'flex-row fx-gap-4 fx-align-flex-start' },
+ div(
+ { class: 'flex-column fx-gap-2', style: 'flex: 40%' },
+ ...(event === 'score_drop' ? [
+ () => totalScoreEnabled
+ ? Input({
+ label: 'When total score drops below',
+ value: newNotificationItemForm.totalScoreThreshold,
+ type: 'number',
+ step: 0.1,
+ onChange: (value) => newNotificationItemForm.totalScoreThreshold.val = value,
+ validators: [
+ numberBetween(0, 100, 1),
+ ],
+ })
+ : '',
+ () => cdeScoreEnabled
+ ? Input({
+ label: `${totalScoreEnabled ? 'or w' : 'W'}hen CDE score drops below`,
+ value: newNotificationItemForm.cdeScoreThreshold,
+ type: 'number',
+ step: 0.1,
+ onChange: (value) => newNotificationItemForm.cdeScoreThreshold.val = value,
+ validators: [
+ numberBetween(0, 100, 1),
+ ],
+ })
+ : '',
+ ] : [
+ () => Select({
+ label: getValue(props.scope_label),
+ options: scopeOptions.map(([value, label]) => ({
+ label: label, value: value
+ })),
+ value: newNotificationItemForm.scope,
+ onChange: (value) => newNotificationItemForm.scope.val = value,
+ portalClass: 'short-select-portal',
+ }),
+ () => Select({
+ label: 'When',
+ options: triggerOptions.map(([value, label]) => ({
+ label: label, value: value
+ })),
+ value: newNotificationItemForm.trigger,
+ onChange: (value) => newNotificationItemForm.trigger.val = value,
+ portalClass: 'short-select-portal',
+ }),
+ ]),
+ ),
+ div(
+ { style: 'flex: 60%; height: 100%' },
+ () => Textarea({
+ label: 'Recipients',
+ help: 'List of email addresses, separated by commas or newlines',
+ placeholder: 'Email addresses separated by commas or newlines',
+ height: 100,
+ value: newNotificationItemForm.recipientsString,
+ onChange: (value) => newNotificationItemForm.recipientsString.val = value,
+ }),
+ ),
+ ),
+ div(
+ { class: 'flex-row fx-justify-content-flex-end fx-gap-2 mt-3' },
+ () => newNotificationItemForm.isEdit.val
+ ? Button({
+ type: 'stroked',
+ label: 'Cancel',
+ width: 'auto',
+ onclick: resetForm,
+ })
+ : '',
+ Button({
+ type: 'stroked',
+ label: newNotificationItemForm.isEdit.val ? 'Save Changes' : 'Add Notification',
+ width: 'auto',
+ onclick: () => emitEvent(
+ newNotificationItemForm.isEdit.val ? 'UpdateNotification' : 'AddNotification',
+ {
+ payload: {
+ id: newNotificationItemForm.isEdit.val ? newNotificationItemForm.id.val : null,
+ scope: newNotificationItemForm.scope.val,
+ recipients: [...new Set(newNotificationItemForm.recipientsString.val.split(/[,;\n ]+/).filter(s => s.length > 0))],
+ ...(event === 'score_drop' ?
+ {
+ total_score_threshold: newNotificationItemForm.totalScoreThreshold.val,
+ cde_score_threshold: newNotificationItemForm.cdeScoreThreshold.val,
+ } : {
+ trigger: newNotificationItemForm.trigger.val,
+ }
+ ),
+ }
+ }
+ ),
+ }),
+ ),
+ ),
+ () => {
+ const result = getValue(props.result);
+ return result?.message
+ ? div( // Wrapper div needed, otherwise new Alert does not appear after closing previous one
+ Alert({
+ type: result.success ? 'success' : 'error',
+ class: 'mt-3',
+ closeable: true,
+ timeout: result.success ? 2000 : 5000,
+ }, result.message)
+ )
+ : '';
+ },
+ div(
+ { class: 'table fx-flex' },
+ div(
+ { class: 'table-header flex-row' },
+ span(
+ { style: `flex: ${columns[0]}%` },
+ event === 'score_drop' ? 'Score Drop Threshold' : `${props.scope_label.val} | Trigger`,
+ ),
+ span(
+ { style: `flex: ${columns[1]}%` },
+ 'Recipients',
+ ),
+ span(
+ { style: `flex: ${columns[2]}%` },
+ 'Actions',
+ ),
+ ),
+ () => nsItems.val?.length
+ ? div(
+ nsItems.val.map(item => NotificationItem(item, columns, getValue(props.permissions))),
+ )
+ : div({ class: 'mt-5 mb-3 ml-3 text-secondary', style: 'text-align: center;' }, 'No notifications defined yet.'),
+ ),
+ );
+}
+
+const stylesheet = new CSSStyleSheet();
+stylesheet.replace(`
+.notifications--empty.tg-empty-state {
+ margin-top: 0;
+}
+.notifications--editing-row {
+ background-color: var(--select-hover-background);
+}
+.notifications--editing {
+ color: var(--purple);
+}
+.short-select-portal {
+ max-height: 250px !important;
+}
+.score-threshold b {
+ font-weight: 500;
+}
+`);
+
+export { NotificationSettings };
diff --git a/testgen/ui/components/frontend/js/pages/profiling_runs.js b/testgen/ui/components/frontend/js/pages/profiling_runs.js
index 0b4e2948..5d39c061 100644
--- a/testgen/ui/components/frontend/js/pages/profiling_runs.js
+++ b/testgen/ui/components/frontend/js/pages/profiling_runs.js
@@ -1,13 +1,13 @@
/**
- * @import { FilterOption, ProjectSummary } from '../types.js'; *
- *
+ * @import { FilterOption, ProjectSummary } from '../types.js'; *
+ *
* @typedef ProgressStep
* @type {object}
* @property {'data_chars'|'col_profiling'|'freq_analysis'|'hygiene_issues'} key
* @property {'Pending'|'Running'|'Completed'|'Warning'} status
* @property {string} label
* @property {string} detail
- *
+ *
* @typedef ProfilingRun
* @type {object}
* @property {string} id
@@ -29,7 +29,7 @@
* @property {number} anomalies_possible_ct
* @property {number} anomalies_dismissed_ct
* @property {string} dq_score_profiling
- *
+ *
* @typedef Permissions
* @type {object}
* @property {boolean} can_edit
@@ -72,7 +72,7 @@ const ProfilingRuns = (/** @type Properties */ props) => {
Streamlit.setFrameHeight(1);
window.testgen.isPage = true;
- const columns = ['5%', '15%', '20%', '20%', '30%', '10%'];
+ const columns = ['5%', '20%', '15%', '20%', '30%', '10%'];
const userCanEdit = getValue(props.permissions)?.can_edit ?? false;
const pageIndex = van.state(0);
@@ -118,7 +118,7 @@ const ProfilingRuns = (/** @type Properties */ props) => {
() => profilingRuns.val.length
? div(
div(
- { class: 'table pb-0' },
+ { class: 'table pb-0', style: 'overflow-y: auto;' },
() => {
const selectedItems = profilingRuns.val.filter(i => selectedRuns[i.id]?.val ?? false);
const someRunSelected = selectedItems.length > 0;
@@ -157,7 +157,7 @@ const ProfilingRuns = (/** @type Properties */ props) => {
}
return span(
- { style: `flex: ${columns[0]}` },
+ { style: `flex: 0 0 ${columns[0]}` },
userCanEdit
? Checkbox({
checked: allSelected,
@@ -169,23 +169,23 @@ const ProfilingRuns = (/** @type Properties */ props) => {
);
},
span(
- { style: `flex: ${columns[1]}` },
+ { style: `flex: 0 0 ${columns[1]}` },
'Start Time | Table Group',
),
span(
- { style: `flex: ${columns[2]}` },
+ { style: `flex: 0 0 ${columns[2]}` },
'Status | Duration',
),
span(
- { style: `flex: ${columns[3]}` },
+ { style: `flex: 0 0 ${columns[3]}` },
'Schema',
),
span(
- { style: `flex: ${columns[4]}`, class: 'tg-profiling-runs--issues' },
+ { style: `flex: 0 0 ${columns[4]}`, class: 'tg-profiling-runs--issues' },
'Hygiene Issues',
),
span(
- { style: `flex: ${columns[5]}` },
+ { style: `flex: 0 0 ${columns[5]}` },
'Profiling Score',
),
),
@@ -232,10 +232,20 @@ const Toolbar = (
}),
div(
{ class: 'flex-row fx-gap-4' },
+ Button({
+ icon: 'notifications',
+ type: 'stroked',
+ label: 'Notifications',
+ tooltip: 'Configure email notifications for profiling runs',
+ tooltipPosition: 'bottom',
+ width: 'fit-content',
+ style: 'background: var(--dk-card-background);',
+ onclick: () => emitEvent('RunNotificationsClicked', {}),
+ }),
Button({
icon: 'today',
type: 'stroked',
- label: 'Profiling Schedules',
+ label: 'Schedules',
tooltip: 'Manage when profiling should run for table groups',
tooltipPosition: 'bottom',
width: 'fit-content',
@@ -277,7 +287,7 @@ const ProfilingRunItem = (
{ class: 'table-row flex-row', 'data-testid': 'profiling-run-item' },
userCanEdit
? div(
- { style: `flex: ${columns[0]}; font-size: 16px;` },
+ { style: `flex: 0 0 ${columns[0]}; font-size: 16px;` },
Checkbox({
checked: selected,
onChange: (checked) => selected.val = checked,
@@ -286,7 +296,7 @@ const ProfilingRunItem = (
)
: '',
div(
- { style: `flex: ${columns[1]}` },
+ { style: `flex: 0 0 ${columns[1]}; max-width: ${columns[1]}; word-wrap: break-word;` },
div({ 'data-testid': 'profiling-run-item-starttime' }, formatTimestamp(item.profiling_starttime)),
div(
{ class: 'text-caption mt-1', 'data-testid': 'profiling-run-item-tablegroup' },
@@ -294,7 +304,7 @@ const ProfilingRunItem = (
),
),
div(
- { style: `flex: ${columns[2]}` },
+ { style: `flex: 0 0 ${columns[2]}; max-width: ${columns[2]};` },
div(
{ class: 'flex-row' },
ProfilingRunStatus(item),
@@ -305,14 +315,14 @@ const ProfilingRunItem = (
onclick: () => emitEvent('RunCanceled', { payload: item }),
}) : null,
),
- item.profiling_endtime
+ item.profiling_endtime
? div(
{ class: 'text-caption mt-1', 'data-testid': 'profiling-run-item-duration' },
formatDuration(item.profiling_starttime, item.profiling_endtime),
- )
+ )
: div(
{ class: 'text-caption mt-1' },
- item.status === 'Running' && runningStep
+ item.status === 'Running' && runningStep
? [
div(
runningStep.label,
@@ -327,7 +337,7 @@ const ProfilingRunItem = (
),
),
div(
- { style: `flex: ${columns[3]}` },
+ { style: `flex: 0 0 ${columns[3]}; max-width: ${columns[3]};` },
div({ 'data-testid': 'profiling-run-item-schema' }, item.table_group_schema),
div(
{
@@ -335,7 +345,7 @@ const ProfilingRunItem = (
style: item.status === 'Complete' && !item.column_ct ? 'color: var(--red);' : '',
'data-testid': 'profiling-run-item-counts',
},
- item.column_ct !== null
+ item.column_ct !== null
? div(
`${formatNumber(item.table_ct || 0)} tables, ${formatNumber(item.column_ct || 0)} columns`,
item.record_ct !== null ?
@@ -359,7 +369,7 @@ const ProfilingRunItem = (
}) : null,
),
div(
- { class: 'pr-3 tg-profiling-runs--issues', style: `flex: ${columns[4]}` },
+ { class: 'pr-3 tg-profiling-runs--issues', style: `flex: 0 0 ${columns[4]}; max-width: ${columns[4]};` },
item.anomaly_ct ? SummaryCounts({
items: [
{ label: 'Definite', value: item.anomalies_definite_ct, color: 'red' },
@@ -379,7 +389,7 @@ const ProfilingRunItem = (
}) : null,
),
div(
- { style: `flex: ${columns[5]}; font-size: 16px;` },
+ { style: `flex: 0 0 ${columns[5]}; max-width: ${columns[5]}; font-size: 16px;` },
item.column_ct && item.dq_score_profiling
? item.dq_score_profiling
: '--',
diff --git a/testgen/ui/components/frontend/js/pages/score_details.js b/testgen/ui/components/frontend/js/pages/score_details.js
index 1b5e9a1c..d80e7290 100644
--- a/testgen/ui/components/frontend/js/pages/score_details.js
+++ b/testgen/ui/components/frontend/js/pages/score_details.js
@@ -61,6 +61,7 @@ const ScoreDetails = (/** @type {Properties} */ props) => {
const score = getValue(props.score);
return userCanEdit ? div(
{ class: 'flex-row tg-test-suites--card-actions' },
+ Button({ type: 'icon', icon: 'notifications', tooltip: 'Configure Notifications', onclick: () => emitEvent('EditNotifications', {}) }),
Button({ type: 'icon', icon: 'edit', tooltip: 'Edit Scorecard', onclick: () => emitEvent('LinkClicked', { href: 'quality-dashboard:explorer', params: { definition_id: score.id } }) }),
Button({ type: 'icon', icon: 'delete', tooltip: 'Delete Scorecard', onclick: () => emitEvent('DeleteScoreRequested', { payload: score.id }) }),
) : '';
diff --git a/testgen/ui/components/frontend/js/pages/table_group_list.js b/testgen/ui/components/frontend/js/pages/table_group_list.js
index 7ee53555..e23212fe 100644
--- a/testgen/ui/components/frontend/js/pages/table_group_list.js
+++ b/testgen/ui/components/frontend/js/pages/table_group_list.js
@@ -2,11 +2,11 @@
* @import { ProjectSummary } from '../types.js';
* @import { TableGroup } from '../components/table_group_form.js';
* @import { Connection } from '../components/connection_form.js';
- *
+ *
* @typedef Permissions
* @type {object}
* @property {boolean} can_edit
- *
+ *
* @typedef Properties
* @type {object}
* @property {ProjectSummary} project_summary
@@ -26,11 +26,10 @@ import { getValue, emitEvent, loadStylesheet, resizeFrameHeightToElement, resize
import { EMPTY_STATE_MESSAGE, EmptyState } from '../components/empty_state.js';
import { Select } from '../components/select.js';
import { Icon } from '../components/icon.js';
-import { withTooltip } from '../components/tooltip.js';
import { Input } from '../components/input.js';
-import { caseInsensitiveSort } from '../display_utils.js';
+import { TruncatedText } from '../components/truncated_text.js';
-const { div, h4, i, span } = van.tags;
+const { div, h4, span } = van.tags;
/**
* @param {Properties} props
@@ -114,7 +113,7 @@ const TableGroupList = (props) => {
Caption({content: 'Explicit Table List', style: 'margin-bottom: 4px;'}),
tableGroup.profiling_table_set
? TruncatedText(
- {max: 3},
+ {max: 3, tooltipPosition: 'top-right'},
...tableGroup.profiling_table_set.split(',').map(t => t.trim())
)
: '--',
@@ -211,12 +210,12 @@ const TableGroupList = (props) => {
}
/**
- *
+ *
* @param {Permissions} permissions
* @param {Connection[]} connections
* @param {string?} selectedConnection
* @param {string?} tableGroupNameFilter
- * @returns
+ * @returns
*/
const Toolbar = (permissions, connections, selectedConnection, tableGroupNameFilter) => {
const connection = van.state(selectedConnection || null);
@@ -258,10 +257,20 @@ const Toolbar = (permissions, connections, selectedConnection, tableGroupNameFil
),
div(
{ class: 'flex-row fx-gap-4' },
+ Button({
+ icon: 'notifications',
+ type: 'stroked',
+ label: 'Notifications',
+ tooltip: 'Configure email notifications for profiling runs',
+ tooltipPosition: 'bottom',
+ width: 'fit-content',
+ style: 'background: var(--dk-card-background);',
+ onclick: () => emitEvent('RunNotificationsClicked', {}),
+ }),
Button({
icon: 'today',
type: 'stroked',
- label: 'Profiling Schedules',
+ label: 'Schedules',
tooltip: 'Manage when profiling should run for table groups',
tooltipPosition: 'bottom',
width: 'fit-content',
@@ -282,34 +291,6 @@ const Toolbar = (permissions, connections, selectedConnection, tableGroupNameFil
);
}
-/**
- * @typedef TruncatedTextOptions
- * @type {object}
- * @property {number} max
- * @property {string?} class
- *
- * @param {TruncatedTextOptions} options
- * @param {string[]} children
- */
-const TruncatedText = ({ max, ...options }, ...children) => {
- const sortedChildren = [...children.sort((a, b) => a.length - b.length)];
- const tooltipText = children.sort(caseInsensitiveSort).join(', ');
-
- return div(
- { class: () => `${options.class ?? ''}`, style: 'position: relative;' },
- span(sortedChildren.slice(0, max).join(', ')),
- sortedChildren.length > max
- ? withTooltip(
- i({class: 'text-caption'}, ` + ${sortedChildren.length - max} more`),
- {
- text: tooltipText,
- position: 'top-right',
- }
- )
- : '',
- );
-};
-
const stylesheet = new CSSStyleSheet();
stylesheet.replace(`
.tg-tablegroups {
diff --git a/testgen/ui/components/frontend/js/pages/test_runs.js b/testgen/ui/components/frontend/js/pages/test_runs.js
index 31aa7413..7f53a6e1 100644
--- a/testgen/ui/components/frontend/js/pages/test_runs.js
+++ b/testgen/ui/components/frontend/js/pages/test_runs.js
@@ -1,13 +1,13 @@
/**
- * @import { FilterOption, ProjectSummary } from '../types.js';
- *
+* @import { FilterOption, ProjectSummary } from '../types.js';
+ *
* @typedef ProgressStep
* @type {object}
* @property {'data_chars'|'validation'|'QUERY'|'CAT'|'METADATA'} key
* @property {'Pending'|'Running'|'Completed'|'Warning'} status
* @property {string} label
* @property {string} detail
- *
+ *
* @typedef TestRun
* @type {object}
* @property {string} test_run_id
@@ -239,10 +239,20 @@ const Toolbar = (
),
div(
{ class: 'flex-row fx-gap-4' },
+ Button({
+ icon: 'notifications',
+ type: 'stroked',
+ label: 'Notifications',
+ tooltip: 'Configure email notifications for test runs',
+ tooltipPosition: 'bottom',
+ width: 'fit-content',
+ style: 'background: var(--dk-card-background);',
+ onclick: () => emitEvent('RunNotificationsClicked', {}),
+ }),
Button({
icon: 'today',
type: 'stroked',
- label: 'Test Run Schedules',
+ label: 'Schedules',
tooltip: 'Manage when test suites should run',
tooltipPosition: 'bottom',
width: 'fit-content',
@@ -324,7 +334,7 @@ const TestRunItem = (
)
: div(
{ class: 'text-caption mt-1' },
- item.status === 'Running' && runningStep
+ item.status === 'Running' && runningStep
? [
div(
runningStep.label,
diff --git a/testgen/ui/components/frontend/js/pages/test_suites.js b/testgen/ui/components/frontend/js/pages/test_suites.js
index 85e84abf..3fa1468f 100644
--- a/testgen/ui/components/frontend/js/pages/test_suites.js
+++ b/testgen/ui/components/frontend/js/pages/test_suites.js
@@ -59,10 +59,20 @@ const TestSuites = (/** @type Properties */ props) => {
}),
div(
{ class: 'flex-row fx-gap-4' },
+ Button({
+ icon: 'notifications',
+ type: 'stroked',
+ label: 'Notifications',
+ tooltip: 'Configure email notifications for test runs',
+ tooltipPosition: 'bottom',
+ width: 'fit-content',
+ style: 'background: var(--dk-card-background);',
+ onclick: () => emitEvent('RunNotificationsClicked', {}),
+ }),
Button({
icon: 'today',
type: 'stroked',
- label: 'Test Run Schedules',
+ label: 'Schedules',
tooltip: 'Manage when test suites should run',
tooltipPosition: 'bottom',
width: 'fit-content',
diff --git a/testgen/ui/components/widgets/testgen_component.py b/testgen/ui/components/widgets/testgen_component.py
index 7ab385ec..8161a0b7 100644
--- a/testgen/ui/components/widgets/testgen_component.py
+++ b/testgen/ui/components/widgets/testgen_component.py
@@ -21,6 +21,7 @@
"connections",
"table_group_wizard",
"help_menu",
+ "notification_settings",
]
diff --git a/testgen/ui/navigation/router.py b/testgen/ui/navigation/router.py
index 243b6916..15c7813d 100644
--- a/testgen/ui/navigation/router.py
+++ b/testgen/ui/navigation/router.py
@@ -2,12 +2,14 @@
import logging
import time
+from urllib.parse import urlparse
import streamlit as st
import testgen.ui.navigation.page
from testgen.common.mixpanel_service import MixpanelService
from testgen.common.models.project import Project
+from testgen.common.models.settings import PersistedSetting
from testgen.ui.session import session
from testgen.utils.singleton import Singleton
@@ -25,14 +27,23 @@ def __init__(
self._routes = {route.path: route(self) for route in routes} if routes else {}
self._pending_navigation: dict | None = None
+ def _init_session(self):
+ # Clear cache on initial load or page refresh
+ st.cache_data.clear()
+
+ try:
+ parsed_url = urlparse(st.context.url)
+ PersistedSetting.set("BASE_URL", f"{parsed_url.scheme}://{parsed_url.netloc}")
+ except Exception as e:
+ LOG.exception("Error capturing the base URL")
+
def run(self) -> None:
streamlit_pages = [route.streamlit_page for route in self._routes.values()]
current_page = st.navigation(streamlit_pages, position="hidden")
if not session.initialized:
- # Clear cache on initial load or page refresh
- st.cache_data.clear()
+ self._init_session()
session.initialized = True
# This hack is needed because the auth cookie is not set if navigation happens immediately after login
diff --git a/testgen/ui/pdf/hygiene_issue_report.py b/testgen/ui/pdf/hygiene_issue_report.py
index ba5d97a1..05d03058 100644
--- a/testgen/ui/pdf/hygiene_issue_report.py
+++ b/testgen/ui/pdf/hygiene_issue_report.py
@@ -4,6 +4,7 @@
from reportlab.lib.styles import ParagraphStyle
from reportlab.platypus import CondPageBreak, KeepTogether, Paragraph, Table, TableStyle
+from testgen.common.models.settings import PersistedSetting
from testgen.settings import ISSUE_REPORT_SOURCE_DATA_LOOKUP_LIMIT
from testgen.ui.pdf.dataframe_table import DataFrameTableBuilder
from testgen.ui.pdf.style import (
@@ -22,7 +23,6 @@
)
from testgen.ui.pdf.templates import DatakitchenTemplate
from testgen.ui.queries.source_data_queries import get_hygiene_issue_source_data
-from testgen.utils import get_base_url
SECTION_MIN_AVAILABLE_HEIGHT = 120
@@ -139,7 +139,7 @@ def build_summary_table(document, hi_data):
),
(
Paragraph(
- f"""
+ f"""
View on TestGen >
""",
style=PARA_STYLE_LINK,
diff --git a/testgen/ui/pdf/test_result_report.py b/testgen/ui/pdf/test_result_report.py
index 9fd471f5..a621ee7c 100644
--- a/testgen/ui/pdf/test_result_report.py
+++ b/testgen/ui/pdf/test_result_report.py
@@ -10,6 +10,7 @@
TableStyle,
)
+from testgen.common.models.settings import PersistedSetting
from testgen.settings import ISSUE_REPORT_SOURCE_DATA_LOOKUP_LIMIT
from testgen.ui.pdf.dataframe_table import TABLE_STYLE_DATA, DataFrameTableBuilder
from testgen.ui.pdf.style import (
@@ -31,7 +32,6 @@
from testgen.ui.queries.test_result_queries import (
get_test_result_history,
)
-from testgen.utils import get_base_url
SECTION_MIN_AVAILABLE_HEIGHT = 120
@@ -152,7 +152,7 @@ def build_summary_table(document, tr_data):
),
(
Paragraph(
- f"""
+ f"""
View on TestGen >
""",
style=PARA_STYLE_LINK,
diff --git a/testgen/ui/views/dialogs/manage_notifications.py b/testgen/ui/views/dialogs/manage_notifications.py
new file mode 100644
index 00000000..d830a7c3
--- /dev/null
+++ b/testgen/ui/views/dialogs/manage_notifications.py
@@ -0,0 +1,142 @@
+import logging
+from functools import wraps
+from itertools import count
+from typing import Any
+from uuid import UUID
+
+import streamlit as st
+
+from testgen.common.models import with_database_session
+from testgen.common.models.notification_settings import NotificationSettings, NotificationSettingsValidationError
+from testgen.common.models.settings import PersistedSetting
+from testgen.ui.components import widgets
+from testgen.ui.session import session, temp_value
+
+LOG = logging.getLogger("testgen")
+
+
+class NotificationSettingsDialogBase:
+
+ title: str = "Manage Email Notifications"
+
+ def __init__(self,
+ ns_class: type[NotificationSettings],
+ ns_attrs: dict[str, Any] | None = None,
+ component_props: dict[str, Any] | None = None,
+ ):
+ self.ns_class = ns_class
+ self.ns_attrs = ns_attrs or {}
+ self.component_props = component_props or {}
+ self.get_result, self.set_result = temp_value("notification_settings_dialog:result")
+ self._result_idx = iter(count())
+
+ def open(self) -> None:
+ return st.dialog(title=self.title)(self.render)()
+
+ @staticmethod
+ def event_handler(*, success_message=None, error_message="Something went wrong."):
+
+ def decorator(method):
+
+ @wraps(method)
+ def wrapper(self, *args, **kwargs):
+ try:
+ with_database_session(method)(self, *args, **kwargs)
+ except NotificationSettingsValidationError as e:
+ success = False
+ message = str(e)
+ except Exception:
+ LOG.exception("Action %s failed with:", method.__name__)
+ success = False
+ message = error_message
+ else:
+ success = True
+ message = success_message
+
+ # The ever-changing "idx" is useful to force refreshing the component
+ self.set_result({"success": success, "message": message, "idx": next(self._result_idx)})
+ st.rerun(scope="fragment")
+
+ return wrapper
+ return decorator
+
+ @event_handler(success_message="Notification deleted")
+ def on_delete_item(self, item):
+ if ns := self.ns_class.get(item["id"]):
+ ns.delete()
+
+ def _update_item(self, item_id: UUID | str, item_data: dict[str, Any]):
+ ns = self.ns_class.get(item_id)
+ for key, value in item_data.items():
+ if key != "id" and value != getattr(ns, key):
+ setattr(ns, key, value)
+ ns.save()
+
+ def _item_to_model_attrs(self, item: dict[str, Any]) -> dict[str, Any]:
+ raise NotImplementedError
+
+ def _model_to_item_attrs(self, model: NotificationSettings) -> dict[str, Any]:
+ raise NotImplementedError
+
+ @event_handler(success_message="Notification added")
+ def on_add_item(self, item):
+ attrs = self._item_to_model_attrs(item)
+ self.ns_class.create(**self.ns_attrs, recipients=item["recipients"], **attrs)
+
+ @event_handler(success_message="Notification updated")
+ def on_update_item(self, item):
+ self._update_item(item["id"], {"recipients": item["recipients"], **self._item_to_model_attrs(item)})
+
+ @event_handler()
+ def on_pause_item(self, item):
+ self._update_item(item["id"], {"enabled": False})
+
+ @event_handler()
+ def on_resume_item(self, item):
+ self._update_item(item["id"], {"enabled": True})
+
+ def _get_component_props(self) -> dict[str, Any]:
+ raise NotImplementedError
+
+ @with_database_session
+ def render(self) -> None:
+ user_can_edit = session.auth.user_has_permission("edit")
+ result = self.get_result()
+
+ ns_json_list = []
+ select_col = [ # noqa: RUF015
+ attr
+ for attr in ("score_definition_id", "test_suite_id", "table_group_id", "project_code")
+ if attr in self.ns_attrs
+ ][0]
+ for ns in self.ns_class.select(**{select_col: self.ns_attrs[select_col]}):
+ ns_json = {
+ "id": str(ns.id),
+ "enabled": ns.enabled,
+ "recipients": ns.recipients,
+ **self._model_to_item_attrs(ns),
+ }
+ ns_json_list.append(ns_json)
+
+ widgets.css_class("m-dialog")
+ widgets.testgen_component(
+ "notification_settings",
+ props={
+ "smtp_configured": PersistedSetting.get("SMTP_CONFIGURED"),
+ "items": ns_json_list,
+ "event": self.ns_class.__mapper_args__["polymorphic_identity"].value,
+ "permissions": {"can_edit": user_can_edit},
+ "result": result,
+ "scope_options": [],
+ "scope_label": None,
+ **self.component_props,
+ **self._get_component_props(),
+ },
+ event_handlers={
+ "AddNotification": self.on_add_item,
+ "UpdateNotification": self.on_update_item,
+ "DeleteNotification": self.on_delete_item,
+ "PauseNotification": self.on_pause_item,
+ "ResumeNotification": self.on_resume_item,
+ },
+ )
diff --git a/testgen/ui/views/hygiene_issues.py b/testgen/ui/views/hygiene_issues.py
index 8eb1f533..a452be7a 100644
--- a/testgen/ui/views/hygiene_issues.py
+++ b/testgen/ui/views/hygiene_issues.py
@@ -10,6 +10,7 @@
from testgen.common import date_service
from testgen.common.mixpanel_service import MixpanelService
from testgen.common.models import with_database_session
+from testgen.common.models.hygiene_issue import HygieneIssue
from testgen.common.models.profiling_run import ProfilingRun
from testgen.ui.components import widgets as testgen
from testgen.ui.components.widgets.download_dialog import (
@@ -26,7 +27,6 @@
from testgen.ui.services.database_service import (
execute_db_query,
fetch_df_from_db,
- fetch_one_from_db,
)
from testgen.ui.session import session
from testgen.ui.views.dialogs.profiling_results_dialog import view_profiling_button
@@ -490,39 +490,26 @@ def get_anomaly_disposition(profile_run_id: str) -> pd.DataFrame:
@st.cache_data(show_spinner=False)
def get_profiling_anomaly_summary(profile_run_id: str) -> list[dict]:
- query = """
- SELECT
- schema_name,
- COUNT(DISTINCT s.table_name) as table_ct,
- COUNT(DISTINCT s.column_name) as column_ct,
- COUNT(*) as issue_ct,
- SUM(CASE WHEN COALESCE(s.disposition, 'Confirmed') = 'Confirmed'
- AND t.issue_likelihood = 'Definite' THEN 1 ELSE 0 END) as definite_ct,
- SUM(CASE WHEN COALESCE(s.disposition, 'Confirmed') = 'Confirmed'
- AND t.issue_likelihood = 'Likely' THEN 1 ELSE 0 END) as likely_ct,
- SUM(CASE WHEN COALESCE(s.disposition, 'Confirmed') = 'Confirmed'
- AND t.issue_likelihood = 'Possible' THEN 1 ELSE 0 END) as possible_ct,
- SUM(CASE WHEN COALESCE(s.disposition, 'Confirmed')
- IN ('Dismissed', 'Inactive')
- AND t.issue_likelihood <> 'Potential PII' THEN 1 ELSE 0 END) as dismissed_ct,
- SUM(CASE WHEN COALESCE(s.disposition, 'Confirmed') = 'Confirmed' AND t.issue_likelihood = 'Potential PII' AND s.detail LIKE 'Risk: HIGH%%' THEN 1 ELSE 0 END) as pii_high_ct,
- SUM(CASE WHEN COALESCE(s.disposition, 'Confirmed') = 'Confirmed' AND t.issue_likelihood = 'Potential PII' AND s.detail LIKE 'Risk: MODERATE%%' THEN 1 ELSE 0 END) as pii_moderate_ct,
- SUM(CASE WHEN COALESCE(s.disposition, 'Confirmed') IN ('Dismissed', 'Inactive') AND t.issue_likelihood = 'Potential PII' THEN 1 ELSE 0 END) as pii_dismissed_ct
- FROM profile_anomaly_results s
- LEFT JOIN profile_anomaly_types t ON (s.anomaly_id = t.id)
- WHERE s.profile_run_id = :profile_run_id
- GROUP BY schema_name;
- """
- result = fetch_one_from_db(query, {"profile_run_id": profile_run_id})
+
+ count_by_priority = HygieneIssue.select_count_by_priority(profile_run_id)
return [
- { "label": "Definite", "value": result.definite_ct, "color": "red" },
- { "label": "Likely", "value": result.likely_ct, "color": "orange" },
- { "label": "Possible", "value": result.possible_ct, "color": "yellow" },
- { "label": "Dismissed", "value": result.dismissed_ct, "color": "grey" },
- { "label": "High", "value": result.pii_high_ct, "color": "red", "type": "PII" },
- { "label": "Moderate", "value": result.pii_moderate_ct, "color": "orange", "type": "PII" },
- { "label": "Dismissed", "value": result.pii_dismissed_ct, "color": "grey", "type": "PII" },
+ {"label": "Definite", "value": count_by_priority["Definite"].active, "color": "red"},
+ {"label": "Likely", "value": count_by_priority["Likely"].active, "color": "orange"},
+ {"label": "Possible", "value": count_by_priority["Possible"].active, "color": "yellow"},
+ {
+ "label": "Dismissed",
+ "value": sum(count_by_priority[p].inactive for p in ("Definite", "Likely", "Possible")),
+ "color": "grey",
+ },
+ {"label": "High", "value": count_by_priority["High"].active, "color": "red", "type": "PII"},
+ {"label": "Moderate", "value": count_by_priority["Moderate"].active, "color": "orange", "type": "PII"},
+ {
+ "label": "Dismissed",
+ "value": sum(count_by_priority[p].inactive for p in ("High", "Moderate")),
+ "color": "grey",
+ "type": "PII",
+ },
]
@@ -564,7 +551,7 @@ def source_data_dialog(selected_row):
st.markdown(f"#### {selected_row['anomaly_name']}")
st.caption(selected_row["anomaly_description"])
-
+
st.markdown("#### Hygiene Issue Detail")
st.caption(selected_row["detail"])
diff --git a/testgen/ui/views/profiling_runs.py b/testgen/ui/views/profiling_runs.py
index f0b442bb..583ddb4c 100644
--- a/testgen/ui/views/profiling_runs.py
+++ b/testgen/ui/views/profiling_runs.py
@@ -8,16 +8,22 @@
import testgen.common.process_service as process_service
import testgen.ui.services.form_service as fm
from testgen.common.models import with_database_session
+from testgen.common.models.notification_settings import (
+ ProfilingRunNotificationSettings,
+ ProfilingRunNotificationTrigger,
+)
from testgen.common.models.profiling_run import ProfilingRun
from testgen.common.models.project import Project
from testgen.common.models.scheduler import RUN_PROFILE_JOB_KEY
from testgen.common.models.table_group import TableGroup, TableGroupMinimal
+from testgen.common.notifications.profiling_run import send_profiling_run_notifications
from testgen.ui.components import widgets as testgen
from testgen.ui.components.widgets import testgen_component
from testgen.ui.navigation.menu import MenuItem
from testgen.ui.navigation.page import Page
from testgen.ui.navigation.router import Router
from testgen.ui.session import session, temp_value
+from testgen.ui.views.dialogs.manage_notifications import NotificationSettingsDialogBase
from testgen.ui.views.dialogs.manage_schedules import ScheduleDialog
from testgen.ui.views.dialogs.run_profiling_dialog import run_profiling_dialog
from testgen.utils import friendly_score, to_int
@@ -74,6 +80,7 @@ def render(self, project_code: str, table_group_id: str | None = None, **_kwargs
},
on_change_handlers={
"FilterApplied": on_profiling_runs_filtered,
+ "RunNotificationsClicked": manage_notifications(project_code),
"RunSchedulesClicked": lambda *_: ProfilingScheduleDialog().open(project_code),
"RunProfilingClicked": lambda *_: run_profiling_dialog(project_code, table_group_id, allow_selection=True),
"RefreshData": refresh_data,
@@ -119,10 +126,53 @@ def get_job_arguments(self, arg_value: str) -> tuple[list[typing.Any], dict[str,
return [], {"table_group_id": str(arg_value)}
+class ProfilingRunNotificationSettingsDialog(NotificationSettingsDialogBase):
+
+ title = "Profiling Notifications"
+
+ def _item_to_model_attrs(self, item: dict[str, typing.Any]) -> dict[str, typing.Any]:
+ return {
+ "trigger": ProfilingRunNotificationTrigger(item["trigger"]),
+ "table_group_id": item["scope"],
+ }
+
+ def _model_to_item_attrs(self, model: ProfilingRunNotificationSettings) -> dict[str, typing.Any]:
+ return {
+ "trigger": model.trigger.value if model.trigger else None,
+ "scope": str(model.table_group_id) if model.table_group_id else None,
+ }
+
+ def _get_component_props(self) -> dict[str, typing.Any]:
+ table_group_options = [
+ (str(tg.id), tg.table_groups_name)
+ for tg in TableGroup.select_minimal_where(TableGroup.project_code == self.ns_attrs["project_code"])
+ ]
+ table_group_options.insert(0, (None, "All Table Groups"))
+ trigger_labels = {
+ ProfilingRunNotificationTrigger.always.value: "Always",
+ ProfilingRunNotificationTrigger.on_changes.value: "On new hygiene issues",
+ }
+ trigger_options = [(t.value, trigger_labels[t.value]) for t in ProfilingRunNotificationTrigger]
+ return {
+ "scope_label": "Table Group",
+ "scope_options": table_group_options,
+ "trigger_options": trigger_options,
+ }
+
+
+def manage_notifications(project_code):
+
+ def open_dialog(*_):
+ ProfilingRunNotificationSettingsDialog(ProfilingRunNotificationSettings, {"project_code": project_code}).open(),
+
+ return open_dialog
+
+
def on_cancel_run(profiling_run: dict) -> None:
process_status, process_message = process_service.kill_profile_run(to_int(profiling_run["process_id"]))
if process_status:
ProfilingRun.cancel_run(profiling_run["id"])
+ send_profiling_run_notifications(ProfilingRun.get(profiling_run["id"]))
fm.reset_post_updates(str_message=f":{'green' if process_status else 'red'}[{process_message}]", as_toast=True)
@@ -172,6 +222,7 @@ def on_delete_confirmed(*_args) -> None:
process_status, _ = process_service.kill_profile_run(to_int(profiling_run.process_id))
if process_status:
ProfilingRun.cancel_run(profiling_run.id)
+ send_profiling_run_notifications(ProfilingRun.get(profiling_run.id))
ProfilingRun.cascade_delete(profiling_run_ids)
st.rerun()
except Exception:
diff --git a/testgen/ui/views/score_details.py b/testgen/ui/views/score_details.py
index f457bc91..6362082d 100644
--- a/testgen/ui/views/score_details.py
+++ b/testgen/ui/views/score_details.py
@@ -1,7 +1,8 @@
import logging
import typing
+from decimal import Decimal
from io import BytesIO
-from typing import ClassVar
+from typing import Any, ClassVar
import pandas as pd
import streamlit as st
@@ -9,6 +10,11 @@
from testgen.commands.run_refresh_score_cards_results import run_recalculate_score_card
from testgen.common.mixpanel_service import MixpanelService
from testgen.common.models import with_database_session
+from testgen.common.models.notification_settings import (
+ NotificationEvent,
+ NotificationSettings,
+ ScoreDropNotificationSettings,
+)
from testgen.common.models.scores import (
Categories,
ScoreCategory,
@@ -24,6 +30,7 @@
from testgen.ui.pdf import hygiene_issue_report, test_result_report
from testgen.ui.queries.scoring_queries import get_all_score_cards, get_score_card_issue_reports
from testgen.ui.session import session, temp_value
+from testgen.ui.views.dialogs.manage_notifications import NotificationSettingsDialogBase
from testgen.ui.views.dialogs.profiling_results_dialog import profiling_results_dialog
from testgen.utils import format_score_card, format_score_card_breakdown, format_score_card_issues
@@ -69,7 +76,7 @@ def render(
if not category or category not in typing.get_args(Categories):
category = (
score_definition.category.value
- if score_definition.category
+ if score_definition.category
else ScoreCategory.dq_dimension.value
)
@@ -80,7 +87,6 @@ def render(
else "score"
)
- score_card = None
score_breakdown = None
issues = None
with st.spinner(text="Loading data :gray[:small[(This might take a few minutes)]] ..."):
@@ -114,6 +120,7 @@ def render(
},
event_handlers={
"DeleteScoreRequested": delete_score_card,
+ "EditNotifications": manage_notifications(score_definition),
},
on_change_handlers={
"CategoryChanged": select_category,
@@ -205,6 +212,16 @@ def delete_score_card(definition_id: str) -> None:
Router().navigate("quality-dashboard", { "project_code": score_definition.project_code })
+def manage_notifications(score_definition):
+ def open_dialog(*_):
+ ScoreDropNotificationSettingsDialog(
+ ScoreDropNotificationSettings,
+ ns_attrs={"project_code": score_definition.project_code, "score_definition_id": score_definition.id},
+ component_props={"cde_enabled": score_definition.cde_score, "total_enabled": score_definition.total_score},
+ ).open()
+ return open_dialog
+
+
def recalculate_score_history(definition_id: str) -> None:
try:
score_definition = ScoreDefinition.get(definition_id)
@@ -213,3 +230,28 @@ def recalculate_score_history(definition_id: str) -> None:
except:
LOG.exception(f"Failure recalculating history for scorecard id={definition_id}")
st.toast("Recalculating the trend failed. Try again", icon=":material/error:")
+
+
+class ScoreDropNotificationSettingsDialog(NotificationSettingsDialogBase):
+
+ title = "Scorecard Notifications"
+
+ def _item_to_model_attrs(self, item: dict[str, Any]) -> dict[str, Any]:
+ model_data = {
+ attr: Decimal(item[attr])
+ for attr in ("total_score_threshold", "cde_score_threshold")
+ if attr in item
+ }
+ return model_data
+
+ def _model_to_item_attrs(self, model: NotificationSettings) -> dict[str, Any]:
+ item_data = {
+ attr: str(getattr(model, attr))
+ for attr in ("total_score_threshold", "cde_score_threshold")
+ }
+ return item_data
+
+ def _get_component_props(self) -> dict[str, Any]:
+ return {
+ "event": NotificationEvent.score_drop.value,
+ }
diff --git a/testgen/ui/views/table_groups.py b/testgen/ui/views/table_groups.py
index 3bf9ca98..409a65a4 100644
--- a/testgen/ui/views/table_groups.py
+++ b/testgen/ui/views/table_groups.py
@@ -19,7 +19,7 @@
from testgen.ui.session import session, temp_value
from testgen.ui.views.connections import FLAVOR_OPTIONS, format_connection
from testgen.ui.views.dialogs.run_profiling_dialog import run_profiling_dialog
-from testgen.ui.views.profiling_runs import ProfilingScheduleDialog
+from testgen.ui.views.profiling_runs import ProfilingScheduleDialog, manage_notifications
LOG = logging.getLogger("testgen")
PAGE_TITLE = "Table Groups"
@@ -86,6 +86,7 @@ def on_edit_table_group_clicked(table_group_id: str) -> None:
},
on_change_handlers={
"RunSchedulesClicked": lambda *_: ProfilingScheduleDialog().open(project_code),
+ "RunNotificationsClicked": manage_notifications(project_code),
"AddTableGroupClicked": on_add_table_group_clicked,
"EditTableGroupClicked": on_edit_table_group_clicked,
"DeleteTableGroupClicked": partial(self.delete_table_group_dialog, project_code),
@@ -299,7 +300,7 @@ def _format_table_group_list(
"name": connection["connection_name"],
"flavor": asdict(flavor),
}
-
+
formatted_list.append(formatted_table_group)
return formatted_list
diff --git a/testgen/ui/views/test_definitions.py b/testgen/ui/views/test_definitions.py
index bfa3a2a2..ce35d955 100644
--- a/testgen/ui/views/test_definitions.py
+++ b/testgen/ui/views/test_definitions.py
@@ -483,11 +483,12 @@ def show_test_form(
if dynamic_attributes_labels_raw:
dynamic_attributes_labels = dynamic_attributes_labels_raw.split(",")
- dynamic_attributes_help_raw = selected_test_type_row["default_parm_help"]
- if not dynamic_attributes_help_raw:
- dynamic_attributes_help_raw = "No help is available"
# Split on pipe -- could contain commas
- dynamic_attributes_help = dynamic_attributes_help_raw.split("|")
+ dynamic_attributes_help = (
+ selected_test_type_row["default_parm_help"].split("|")
+ if selected_test_type_row["default_parm_help"]
+ else None
+ )
if mode == "edit":
st.text_input(label="Test Type", value=test_type_display, disabled=True),
@@ -700,7 +701,7 @@ def render_dynamic_attribute(attribute: str, container: DeltaGenerator):
help_text = (
dynamic_attributes_help[index]
if dynamic_attributes_help and len(dynamic_attributes_help) > index
- else "Help text is not available."
+ else None
)
if attribute == "custom_query":
@@ -710,7 +711,7 @@ def render_dynamic_attribute(attribute: str, container: DeltaGenerator):
elif test_type == "CUSTOM":
custom_query_placeholder = "EXAMPLE: SELECT product, SUM(qty_sold) as sum_sold, SUM(qty_shipped) as qty_shipped \n FROM {DATA_SCHEMA}.sales_history \n GROUP BY product \n HAVING SUM(qty_shipped) > SUM(qty_sold)"
- test_definition[attribute] = st.text_area(
+ test_definition[attribute] = container.text_area(
label=label_text,
value=custom_query,
placeholder=custom_query_placeholder,
@@ -766,6 +767,7 @@ def render_dynamic_attribute(attribute: str, container: DeltaGenerator):
if test_scope != "tablegroup":
st.divider()
+ mid_container = st.container()
mid_left_column, mid_right_column = st.columns([0.5, 0.5])
if has_match_attributes:
@@ -775,7 +777,7 @@ def render_dynamic_attribute(attribute: str, container: DeltaGenerator):
render_dynamic_attribute(f"match_{attribute}", mid_right_column)
if "custom_query" in dynamic_attributes:
- render_dynamic_attribute("custom_query", mid_left_column)
+ render_dynamic_attribute("custom_query", mid_container)
total_length = len(leftover_attributes)
half_length = round(total_length / 2)
@@ -953,13 +955,13 @@ def validate_form(test_scope, test_definition, column_name_label):
def prompt_for_test_type():
- col0, col1, col2, col3, col4, col5 = st.columns([0.1, 0.2, 0.2, 0.2, 0.2, 0.1])
+ col0, col1, col2, col3, col4 = st.columns([0.2, 0.2, 0.2, 0.2, 0.2])
col0.write("Show Types")
- include_referential=col1.checkbox(":green[⧉] Referential", True),
- include_table=col2.checkbox(":green[⊞] Table", True),
- include_column=col3.checkbox(":green[≣] Column", True),
- include_custom=col4.checkbox(":green[⛭] Custom", True),
+ include_referential=col1.checkbox(":green[⧉] Referential", True)
+ include_table=col2.checkbox(":green[⊞] Table", True)
+ include_column=col3.checkbox(":green[≣] Column", True)
+ include_custom=col4.checkbox(":green[⛭] Custom", True)
# always exclude tablegroup scopes from showing
include_all = not any([include_referential, include_table, include_column, include_custom])
diff --git a/testgen/ui/views/test_results.py b/testgen/ui/views/test_results.py
index f91029c4..b181ac62 100644
--- a/testgen/ui/views/test_results.py
+++ b/testgen/ui/views/test_results.py
@@ -600,7 +600,7 @@ def render_selected_details(
if selected_item["measure_uom_description"]:
st.caption(selected_item["measure_uom_description"])
if selected_item["result_message"]:
- st.caption(selected_item["result_message"])
+ st.caption(selected_item["result_message"].replace("*", "\\*"))
fm.render_grid_select(dfh, show_hist_columns, selection_mode="disabled", key="test_history")
with pg_col2:
ut_tab1, ut_tab2 = st.tabs(["History", "Test Definition"])
@@ -809,8 +809,9 @@ def source_data_dialog(selected_row):
st.markdown("#### Test Parameters")
testgen.caption(selected_row["input_parameters"], styles="max-height: 75px; overflow: auto;")
- st.markdown("#### Result Detail")
- st.caption(selected_row["result_message"])
+ if selected_row["result_message"]:
+ st.markdown("#### Result Detail")
+ st.caption(selected_row["result_message"].replace("*", "\\*"))
st.markdown("#### SQL Query")
if selected_row["test_type"] == "CUSTOM":
diff --git a/testgen/ui/views/test_runs.py b/testgen/ui/views/test_runs.py
index a1a802e3..950384a4 100644
--- a/testgen/ui/views/test_runs.py
+++ b/testgen/ui/views/test_runs.py
@@ -2,23 +2,30 @@
import typing
from collections.abc import Iterable
from functools import partial
+from typing import Any
import streamlit as st
import testgen.common.process_service as process_service
import testgen.ui.services.form_service as fm
from testgen.common.models import with_database_session
+from testgen.common.models.notification_settings import (
+ TestRunNotificationSettings,
+ TestRunNotificationTrigger,
+)
from testgen.common.models.project import Project
from testgen.common.models.scheduler import RUN_TESTS_JOB_KEY
from testgen.common.models.table_group import TableGroup
from testgen.common.models.test_run import TestRun
from testgen.common.models.test_suite import TestSuite, TestSuiteMinimal
+from testgen.common.notifications.test_run import send_test_run_notifications
from testgen.ui.components import widgets as testgen
from testgen.ui.components.widgets import testgen_component
from testgen.ui.navigation.menu import MenuItem
from testgen.ui.navigation.page import Page
from testgen.ui.navigation.router import Router
from testgen.ui.session import session, temp_value
+from testgen.ui.views.dialogs.manage_notifications import NotificationSettingsDialogBase
from testgen.ui.views.dialogs.manage_schedules import ScheduleDialog
from testgen.ui.views.dialogs.run_tests_dialog import run_tests_dialog
from testgen.utils import friendly_score, to_int
@@ -85,6 +92,7 @@ def render(self, project_code: str, table_group_id: str | None = None, test_suit
on_change_handlers={
"FilterApplied": on_test_runs_filtered,
"RunSchedulesClicked": lambda *_: TestRunScheduleDialog().open(project_code),
+ "RunNotificationsClicked": manage_notifications(project_code),
"RunTestsClicked": lambda *_: run_tests_dialog(project_code, None, test_suite_id),
"RefreshData": refresh_data,
"RunsDeleted": partial(on_delete_runs, project_code, table_group_id, test_suite_id),
@@ -107,6 +115,50 @@ def refresh_data(*_) -> None:
TestRun.select_summary.clear()
+def manage_notifications(project_code):
+
+ def open_dialog(*_):
+ TestRunNotificationSettingsDialog(TestRunNotificationSettings, {"project_code": project_code}).open(),
+
+ return open_dialog
+
+
+class TestRunNotificationSettingsDialog(NotificationSettingsDialogBase):
+
+ title = "Test Run Notifications"
+
+ def _item_to_model_attrs(self, item: dict[str, Any]) -> dict[str, Any]:
+ return {
+ "trigger": TestRunNotificationTrigger(item["trigger"]),
+ "test_suite_id": item["scope"],
+ }
+
+ def _model_to_item_attrs(self, model: TestRunNotificationSettings) -> dict[str, Any]:
+ return {
+ "trigger": model.trigger.value if model.trigger else None,
+ "scope": str(model.test_suite_id) if model.test_suite_id else None,
+ }
+
+ def _get_component_props(self) -> dict[str, Any]:
+ test_suite_options = [
+ (str(ts.id), ts.test_suite)
+ for ts in TestSuite.select_minimal_where(TestSuite.project_code == self.ns_attrs["project_code"])
+ ]
+ test_suite_options.insert(0, (None, "All Test Suites"))
+ trigger_labels = {
+ TestRunNotificationTrigger.always.value: "Always",
+ TestRunNotificationTrigger.on_failures.value: "On test failures",
+ TestRunNotificationTrigger.on_warnings.value: "On test failures and warnings",
+ TestRunNotificationTrigger.on_changes.value: "On new test failures and warnings",
+ }
+ trigger_options = [(t.value, trigger_labels[t.value]) for t in TestRunNotificationTrigger]
+ return {
+ "scope_label": "Test Suite",
+ "scope_options": test_suite_options,
+ "trigger_options": trigger_options,
+ }
+
+
class TestRunScheduleDialog(ScheduleDialog):
title = "Test Run Schedules"
@@ -134,6 +186,7 @@ def on_cancel_run(test_run: dict) -> None:
process_status, process_message = process_service.kill_test_run(to_int(test_run["process_id"]))
if process_status:
TestRun.cancel_run(test_run["test_run_id"])
+ send_test_run_notifications(TestRun.get(test_run["test_run_id"]))
fm.reset_post_updates(str_message=f":{'green' if process_status else 'red'}[{process_message}]", as_toast=True)
@@ -182,10 +235,10 @@ def on_delete_confirmed(*_args) -> None:
process_status, _ = process_service.kill_test_run(to_int(test_run.process_id))
if process_status:
TestRun.cancel_run(test_run.test_run_id)
+ send_test_run_notifications(TestRun.get(test_run.test_run_id))
TestRun.cascade_delete(test_run_ids)
st.rerun()
except Exception:
LOG.exception("Failed to delete test run")
result = {"success": False, "message": "Unable to delete the test run, try again."}
st.rerun(scope="fragment")
-
\ No newline at end of file
diff --git a/testgen/ui/views/test_suites.py b/testgen/ui/views/test_suites.py
index 0345c42a..9a8109ea 100644
--- a/testgen/ui/views/test_suites.py
+++ b/testgen/ui/views/test_suites.py
@@ -18,7 +18,7 @@
from testgen.ui.session import session
from testgen.ui.views.dialogs.generate_tests_dialog import generate_tests_dialog
from testgen.ui.views.dialogs.run_tests_dialog import run_tests_dialog
-from testgen.ui.views.test_runs import TestRunScheduleDialog
+from testgen.ui.views.test_runs import TestRunScheduleDialog, manage_notifications
from testgen.utils import to_dataframe
PAGE_ICON = "rule"
@@ -47,7 +47,7 @@ def render(self, project_code: str, table_group_id: str | None = None, **_kwargs
user_can_edit = session.auth.user_has_permission("edit")
test_suites = TestSuite.select_summary(project_code, table_group_id)
project_summary = Project.get_summary(project_code)
-
+
testgen.testgen_component(
"test_suites",
props={
@@ -72,6 +72,7 @@ def render(self, project_code: str, table_group_id: str | None = None, **_kwargs
"EditActionClicked": partial(edit_test_suite_dialog, project_code, table_groups),
"DeleteActionClicked": delete_test_suite_dialog,
"RunTestsClicked": lambda test_suite_id: run_tests_dialog(project_code, TestSuite.get_minimal(test_suite_id)),
+ "RunNotificationsClicked": manage_notifications(project_code),
"GenerateTestsClicked": lambda test_suite_id: generate_tests_dialog(TestSuite.get_minimal(test_suite_id)),
},
)
diff --git a/testgen/utils/__init__.py b/testgen/utils/__init__.py
index 2b295ff7..4dfc9d39 100644
--- a/testgen/utils/__init__.py
+++ b/testgen/utils/__init__.py
@@ -1,23 +1,25 @@
from __future__ import annotations
+import logging
from collections.abc import Iterable
from datetime import UTC, datetime
from decimal import Decimal
+from functools import wraps
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from testgen.common.models.scores import ScoreCard
import json
-import urllib.parse
from typing import Any, TypeVar
from uuid import UUID
import pandas as pd
-import streamlit as st
T = TypeVar("T")
+LOG = logging.getLogger("testgen")
+
def to_int(value: float | int) -> int:
if pd.notnull(value):
@@ -42,9 +44,9 @@ def to_dataframe(
def is_uuid4(value: str) -> bool:
- if isinstance(value, UUID):
+ if isinstance(value, UUID):
return True
-
+
try:
uuid = UUID(value, version=4)
except Exception:
@@ -58,18 +60,12 @@ def try_json(value: str | None, default: T | None) -> T:
return json.loads(value)
except:
return default
-
+
def get_exception_message(exception: Exception) -> str:
return exception.args[0].rstrip() if exception.args and isinstance(exception.args[0], str) else str(exception)
-# https://github.com/streamlit/streamlit/issues/798#issuecomment-1647759949
-def get_base_url() -> str:
- session = st.runtime.get_instance()._session_mgr.list_active_sessions()[0]
- return urllib.parse.urlunparse([session.client.request.protocol, session.client.request.host, "", "", "", ""])
-
-
def make_json_safe(value: Any) -> str | bool | int | float | None:
if isinstance(value, UUID):
return str(value)
@@ -102,7 +98,7 @@ def chunk_queries(queries: list[str], join_string: str, max_query_length: int) -
chunked_queries.append(current_chunk)
return chunked_queries
-
+
def score(profiling_score_: float, tests_score_: float) -> float:
tests_score = _pandas_default(tests_score_, 0.0)
@@ -209,7 +205,7 @@ def friendly_score(score: float) -> str:
score = 100 * score
if score == 100:
return "100"
-
+
rounded = round(score, 1)
if rounded == 0:
return "< 0.1"
@@ -225,7 +221,7 @@ def friendly_score_impact(impact: float) -> str:
if impact == 100:
return "100"
-
+
rounded = round(impact, 2)
if rounded == 0:
return "< 0.01"
@@ -233,3 +229,15 @@ def friendly_score_impact(impact: float) -> str:
return "> 99.99"
return str(rounded)
+
+
+def log_and_swallow_exception(func):
+
+ @wraps(func)
+ def wrapped(*args, **kwargs):
+ try:
+ func(*args, **kwargs)
+ except Exception:
+ LOG.exception("Error executing '%s.%s':", func.__module__, func.__name__)
+
+ return wrapped
diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py
new file mode 100644
index 00000000..4f88336a
--- /dev/null
+++ b/tests/unit/conftest.py
@@ -0,0 +1,19 @@
+from unittest.mock import patch
+
+import pytest
+
+
+@pytest.fixture(autouse=True)
+def patched_settings():
+ settings = {
+ "BASE_URL": "http://tg-base-url",
+ }
+ with patch("testgen.common.models.settings.PersistedSetting.get") as mock:
+ mock.side_effect = settings.get
+ yield mock
+
+
+@pytest.fixture(autouse=True)
+def db_session_mock():
+ with patch("testgen.common.models.Session") as factory_mock:
+ yield factory_mock().__enter__()
diff --git a/tests/unit/test_common_email.py b/tests/unit/test_common_email.py
index f0e94cee..907a4ba2 100644
--- a/tests/unit/test_common_email.py
+++ b/tests/unit/test_common_email.py
@@ -2,7 +2,7 @@
import pytest
-from testgen.common.email import BaseEmailTemplate, EmailTemplateException
+from testgen.common.notifications.base import BaseEmailTemplate, EmailTemplateException
class TestEmailTemplate(BaseEmailTemplate):
@@ -16,13 +16,13 @@ def get_body_template(self) -> str:
@pytest.fixture
def smtp_mock():
- with patch("testgen.common.email.smtplib.SMTP_SSL") as mock:
+ with patch("testgen.common.notifications.base.smtplib.SMTP_SSL") as mock:
yield mock
@pytest.fixture
def def_settings():
- with patch("testgen.common.email.settings") as mock:
+ with patch("testgen.common.notifications.base.settings") as mock:
mock.EMAIL_FROM_ADDRESS = "from@email"
mock.SMTP_ENDPOINT = "smtp-endpoint"
mock.SMTP_PORT = 333
diff --git a/tests/unit/test_profiling_run_notifications.py b/tests/unit/test_profiling_run_notifications.py
new file mode 100644
index 00000000..b1fc911d
--- /dev/null
+++ b/tests/unit/test_profiling_run_notifications.py
@@ -0,0 +1,164 @@
+from itertools import count
+from unittest.mock import ANY, Mock, call, patch
+
+import pytest
+
+from testgen.common.models.hygiene_issue import IssueCount
+from testgen.common.models.notification_settings import (
+ ProfilingRunNotificationSettings,
+ ProfilingRunNotificationTrigger,
+)
+from testgen.common.models.profiling_run import ProfilingRun
+from testgen.common.notifications.profiling_run import send_profiling_run_notifications
+
+
+def create_ns(**kwargs):
+ with patch("testgen.common.notifications.profiling_run.ProfilingRunNotificationSettings.save"):
+ return ProfilingRunNotificationSettings.create("proj", None, **kwargs)
+
+
+@pytest.fixture
+def ns_select_result():
+ return [
+ create_ns(recipients=["always@example.com"], trigger=ProfilingRunNotificationTrigger.always),
+ create_ns(recipients=["on_changes@example.com"], trigger=ProfilingRunNotificationTrigger.on_changes),
+ ]
+
+
+@pytest.fixture
+def ns_select_patched(ns_select_result):
+ with patch("testgen.common.notifications.profiling_run.ProfilingRunNotificationSettings.select") as mock:
+ mock.return_value = ns_select_result
+ yield mock
+
+
+def create_hygiene_issue_list(length):
+ priorities = ("Definite", "Likely", "Possible", "High", "Moderate")
+ tr_list = []
+ for idx in range(length):
+ hi = Mock()
+ priority = priorities[min(4, idx // 3)]
+ hi.priority = priority
+ hi.table_name = "table-name"
+ hi.column_name = "col-name"
+ hi.type_.name = "issue-type"
+ hi.detail = "issue-detail"
+ tr_list.append(hi)
+
+ return tr_list
+
+
+@pytest.fixture
+def hi_select_mock():
+ with patch("testgen.common.notifications.profiling_run.HygieneIssue.select_with_diff") as mock:
+ yield mock
+
+
+@pytest.fixture
+def hi_count_mock():
+ with patch("testgen.common.notifications.profiling_run.HygieneIssue.select_count_by_priority") as mock:
+ yield mock
+
+
+
+@pytest.fixture
+def send_mock():
+ with patch("testgen.common.notifications.profiling_run.ProfilingRunEmailTemplate.send") as mock:
+ yield mock
+
+
+@pytest.fixture
+def get_prev_mock():
+ with patch("testgen.common.notifications.profiling_run.ProfilingRun.get_previous") as mock:
+ yield mock
+
+
+@pytest.mark.parametrize(
+ ("profiling_run_status", "has_prev_run", "issue_count", "new_issue_count", "expected_triggers"),
+ (
+ ("Error", True, 25, 0, ("always", "on_changes")),
+ ("Error", True, 0, 0, ("always", "on_changes")),
+ ("Cancelled", True, 50, 10, ("always", "on_changes")),
+ ("Complete", True, 50, 10, ("always", "on_changes")),
+ ("Complete", True, 15, 0, ("always",)),
+ ("Complete", False, 15, 15, ("always", "on_changes")),
+ ),
+)
+def test_send_profiling_run_notification(
+ profiling_run_status,
+ has_prev_run,
+ issue_count,
+ new_issue_count,
+ expected_triggers,
+ db_session_mock,
+ ns_select_patched,
+ get_prev_mock,
+ hi_select_mock,
+ hi_count_mock,
+ send_mock,
+):
+ profiling_run = ProfilingRun(id="pr-id", table_groups_id="tg-id", status=profiling_run_status)
+ get_prev_mock.return_value = ProfilingRun(id="pr-prev-id") if has_prev_run else None
+ new_count = iter(count())
+ priorities = ("Definite", "Likely", "Possible", "High", "Moderate")
+ hi_list = [
+ (hi, not has_prev_run or next(new_count) < new_issue_count)
+ for hi in create_hygiene_issue_list(issue_count)
+ ]
+ hi_select_mock.return_value = hi_list[:20]
+ hi_count_dict = {p: IssueCount() for p in priorities}
+ for hi, _ in hi_list:
+ hi_count_dict[hi.priority].total += 1
+ hi_count_mock.return_value = hi_count_dict
+ db_session_mock.execute().one.return_value = (
+ ("project_name", "proj-name"),
+ ("table_groups_name", "t-group-name"),
+ ("table_group_schema", "t-group-schema"),
+ )
+
+ send_profiling_run_notifications(profiling_run)
+
+ get_prev_mock.assert_called()
+ ns_select_patched.assert_called_once_with(enabled=True, table_group_id="tg-id")
+ hi_select_mock.assert_called_once_with("pr-id", "pr-prev-id" if has_prev_run else None, limit=20)
+ hi_count_mock.assert_called_once_with("pr-id")
+
+ send_mock.assert_has_calls(
+ [
+ call(
+ ANY,
+ {
+ "profiling_run": {
+ "id": "pr-id",
+ "issues_url": "http://tg-base-url/profiling-runs:hygiene?run_id=pr-id",
+ "results_url": "http://tg-base-url/profiling-runs:results?run_id=pr-id",
+ "start_time": None,
+ "end_time": None,
+ "status": profiling_run_status,
+ "log_message": None,
+ "table_ct": None,
+ "column_ct": None,
+ },
+ "new_issue_count": new_issue_count,
+ "hygiene_issues_summary": ANY,
+ "notification_trigger": trigger,
+ "project_name": "proj-name",
+ "table_groups_name": "t-group-name",
+ "table_group_schema": "t-group-schema",
+ },
+ )
+ for trigger in expected_triggers
+ ],
+ any_order=True,
+ )
+ assert send_mock.call_count == len(expected_triggers)
+
+ summary = send_mock.call_args_list[0].args[1]["hygiene_issues_summary"]
+
+ assert len(summary) == len(priorities)
+ assert sum(s["count"].total for s in summary) == issue_count
+ assert sum(s["truncated"] for s in summary) == max(0, issue_count - 20)
+ assert sum(len(s["issues"]) for s in summary) == min(issue_count, 20)
+ assert all(s.get("label") is not None for s in summary)
+ assert all(s.get("priority") in priorities for s in summary)
+ assert all(s.get("url") is not None for s in summary)
diff --git a/tests/unit/test_score_drop_notifications.py b/tests/unit/test_score_drop_notifications.py
new file mode 100644
index 00000000..796ecee6
--- /dev/null
+++ b/tests/unit/test_score_drop_notifications.py
@@ -0,0 +1,182 @@
+from itertools import chain
+from unittest.mock import ANY, call, patch
+
+import pytest
+
+from testgen.common.models.notification_settings import ScoreDropNotificationSettings
+from testgen.common.models.scores import ScoreDefinition, ScoreDefinitionResult
+from testgen.common.notifications.score_drop import collect_score_notification_data, send_score_drop_notifications
+
+
+def create_ns(**kwargs):
+ with patch("testgen.common.notifications.score_drop.ScoreDropNotificationSettings.save"):
+ return ScoreDropNotificationSettings.create("proj", **kwargs)
+
+
+@pytest.fixture
+def ns_select_result():
+ return [
+ create_ns(
+ recipients=["cde_99@example.com"],
+ score_definition_id="sd-1",
+ total_score_threshold=None,
+ cde_score_threshold=99,
+ ),
+ create_ns(
+ recipients=["total_99@example.com"],
+ score_definition_id="sd-1",
+ total_score_threshold=99,
+ cde_score_threshold=None,
+ ),
+ create_ns(
+ recipients=["both_97@example.com"],
+ score_definition_id="sd-1",
+ total_score_threshold=97,
+ cde_score_threshold=97,
+ ),
+ create_ns(
+ recipients=["both_94@example.com"],
+ score_definition_id="sd-1",
+ total_score_threshold=94,
+ cde_score_threshold=94,
+ ),
+ ]
+
+@pytest.fixture
+def send_mock():
+ with patch("testgen.common.notifications.score_drop.ScoreDropEmailTemplate.send") as mock:
+ yield mock
+
+
+@pytest.fixture
+def select_mock():
+ with patch("testgen.common.notifications.score_drop.select") as mock:
+ yield mock
+
+
+@pytest.fixture
+def score_definition():
+
+ def result_list(sd):
+ return [
+ ScoreDefinitionResult(definition_id=sd.id, score=0.98, category=cat)
+ for cat in chain(
+ ("Uniqueness", "Accuracy", "Consistency"),
+ ["cde_score"] if sd.cde_score else [],
+ ["score"] if sd.total_score else [],
+ )
+ ]
+
+ with patch.object(ScoreDefinition, "results", new=property(result_list)):
+ sd = ScoreDefinition(
+ id="sd-1",
+ project_code="test-proj",
+ name="My Score",
+ cde_score=True,
+ total_score=True,
+ )
+ yield sd
+
+
+@pytest.mark.parametrize(
+ "def_total, def_cde, fresh_total, fresh_cde, expected_categories",
+ (
+ (True, True, 0.99, 0.99, ["score", "cde_score"]),
+ (True, False, 0.99, 0.99, ["score"]),
+ (True, True, None, 0.99, ["cde_score"]),
+ (True, False, None, 0.99, []),
+ ),
+)
+def test_collect_score_notification_data(
+ def_total, def_cde, fresh_total, fresh_cde, expected_categories, score_definition
+):
+ score_definition.total_score = def_total
+ score_definition.cde_score = def_cde
+ fresh_score = {"score": fresh_total, "cde_score": fresh_cde, "Accuracy": 0.99, "SomethingElse": 0.99}
+
+ data = []
+ collect_score_notification_data(data, score_definition, fresh_score)
+
+ assert len(data) == len(expected_categories)
+ data_per_category = {d[1]: d for d in data}
+ for cat in expected_categories:
+ sd, cat_data, prev, fresh = data_per_category[cat]
+ assert sd == score_definition
+ assert cat_data == cat
+ assert prev == 0.98
+ assert fresh == 0.99
+
+
+def test_send_score_drop_notifications_no_data(select_mock):
+ send_score_drop_notifications([])
+ select_mock.assert_not_called()
+
+
+def test_send_score_drop_notifications_no_match(
+ score_definition, select_mock, db_session_mock, ns_select_result, send_mock,
+):
+ data = [
+ (score_definition, "score", 1.0, 0.1),
+ (score_definition, "cde_score", 1.0, 0.1)
+ ]
+ for ns in ns_select_result:
+ ns.score_definition_id = "sd-x"
+ db_session_mock.execute().fetchall.return_value = ns_select_result
+
+ send_score_drop_notifications(data)
+
+ send_mock.assert_not_called()
+
+
+@pytest.mark.parametrize(
+ "total_prev, total_fresh, cde_prev, cde_fresh, triggers",
+ (
+ (1.00, 0.99, 1.00, 0.99, ()),
+ (0.92, 0.99, 0.92, 0.99, ()),
+ (1.00, 1.00, 1.00, 0.98, ((False, True),)),
+ (1.00, 0.98, 1.00, 1.00, ((True, False),)),
+ (1.00, 0.98, 1.00, 0.98, ((False, True), (True, False))),
+ (1.00, 0.97, 1.00, 0.97, ((False, True), (True, False))),
+ (1.00, 0.96, 1.00, 0.96, ((False, True), (True, False), (True, True))),
+ (1.00, 0.94, 1.00, 0.94, ((False, True), (True, False), (True, True))),
+ (1.00, 0.93, 1.00, 0.93, ((False, True), (True, False), (True, True), (True, True))),
+ (0.89, 0.82, 0.87, 0.82, ((False, True), (True, False), (True, True), (True, True))),
+ )
+)
+def test_send_score_drop_notifications(
+ total_prev, total_fresh, cde_prev, cde_fresh, triggers, score_definition, db_session_mock, ns_select_result,
+ send_mock,
+):
+ data = [
+ (score_definition, "score", total_prev, total_fresh),
+ (score_definition, "cde_score", cde_prev, cde_fresh)
+ ]
+ db_session_mock.execute().fetchall.return_value = [(ns, "Test Project") for ns in ns_select_result]
+
+ send_score_drop_notifications(data)
+
+ expected_common_diff = {"threshold": ANY, "increase": ANY, "decrease": ANY}
+ expected_total_diff = {
+ "category": "score", "label": "Total", "prev": total_prev, "current": total_fresh, **expected_common_diff
+ }
+ expected_cde_diff = {
+ "category": "cde_score", "label": "CDE", "prev": cde_prev, "current": cde_fresh, **expected_common_diff
+ }
+ send_mock.assert_has_calls(
+ [
+ call(
+ [ANY],
+ {
+ "project_name": "Test Project",
+ "definition": score_definition,
+ "scorecard_url": "http://tg-base-url/quality-dashboard:score-details?definition_id=sd-1",
+ "diff": [
+ {**expected_total_diff, "notify": total_triggers},
+ {**expected_cde_diff, "notify": cde_triggers},
+ ]
+ }
+ )
+ for total_triggers, cde_triggers in triggers
+ ]
+ )
+ assert send_mock.call_count == len(triggers)
diff --git a/tests/unit/test_test_run_notifications.py b/tests/unit/test_test_run_notifications.py
new file mode 100644
index 00000000..e1817eb4
--- /dev/null
+++ b/tests/unit/test_test_run_notifications.py
@@ -0,0 +1,203 @@
+import uuid
+from unittest.mock import ANY, Mock, call, patch
+
+import pytest
+
+from testgen.common.models.notification_settings import TestRunNotificationSettings, TestRunNotificationTrigger
+from testgen.common.models.test_result import TestResultStatus
+from testgen.common.models.test_run import TestRun
+from testgen.common.notifications.test_run import send_test_run_notifications
+
+
+def create_ns(**kwargs):
+ with patch("testgen.common.notifications.test_run.TestRunNotificationSettings.save"):
+ return TestRunNotificationSettings.create("proj", None, **kwargs)
+
+
+def create_diff(failed=0, error=0, warning=0):
+ return [
+ (TestResultStatus.Passed, status, [uuid.uuid4() for _ in range(count)])
+ for status, count in (
+ (TestResultStatus.Failed, failed),
+ (TestResultStatus.Warning, warning),
+ (TestResultStatus.Error, error),
+ )
+ if count > 0
+ ]
+
+
+def create_test_result_list(length):
+ tr_list = []
+ for idx in range(length):
+ mock = Mock()
+ mock._as_dict.return_value = {
+ "table_name": "tr-table",
+ "column_names": "tr-columns",
+ "message": f"tr-message-{idx}",
+ "is_new": True,
+ "test_type": "tr-type",
+ }
+ tr_list.append(mock)
+
+ return tr_list
+
+
+@pytest.fixture
+def ns_select_result():
+ return [
+ create_ns(recipients=["always@example.com"], trigger=TestRunNotificationTrigger.always),
+ create_ns(recipients=["on_failures@example.com"], trigger=TestRunNotificationTrigger.on_failures),
+ create_ns(recipients=["on_warnings@example.com"], trigger=TestRunNotificationTrigger.on_warnings),
+ create_ns(recipients=["on_changes@example.com"], trigger=TestRunNotificationTrigger.on_changes),
+ ]
+
+
+@pytest.fixture
+def ns_select_patched(ns_select_result):
+ with patch("testgen.common.notifications.test_run.TestRunNotificationSettings.select") as mock:
+ mock.return_value = ns_select_result
+ yield mock
+
+
+@pytest.fixture
+def send_mock():
+ with patch("testgen.common.notifications.test_run.TestRunEmailTemplate.send") as mock:
+ yield mock
+
+
+@pytest.fixture
+def get_prev_mock():
+ with patch("testgen.common.notifications.test_run.TestRun.get_previous") as mock:
+ yield mock
+
+
+@pytest.fixture
+def diff_mock():
+ with patch("testgen.common.notifications.test_run.TestResult.diff") as mock:
+ yield mock
+
+
+@pytest.fixture
+def select_mock():
+ with patch("testgen.common.notifications.test_run.select") as mock:
+ yield mock
+
+
+@pytest.fixture
+def select_summary_mock():
+ with patch("testgen.common.notifications.test_run.TestRun.select_summary") as mock:
+ yield mock
+
+
+
+@pytest.mark.parametrize(
+ (
+ "test_run_status", "failed_ct", "warning_ct", "error_ct", "diff_mock_args",
+ "failed_expected", "warning_expected", "error_expected", "expected_triggers"
+ ),
+ [
+ ("Complete", 0, 0, 0, {}, 0, 0, 0, ["always"]),
+ ("Complete", 1, 1, 1, {}, 1, 1, 1, ["always", "on_failures", "on_warnings"]),
+ ("Complete", 50, 50, 50, {"failed": 2, "warning": 3}, 10, 5, 5, [
+ "always", "on_failures", "on_warnings", "on_changes",
+ ]),
+ ("Complete", 0, 0, 50, {"error": 50}, 0, 0, 20, ["always", "on_failures", "on_warnings", "on_changes"]),
+ ("Complete", 50, 0, 0, None, 20, 0, 0, ["always", "on_failures", "on_warnings"]),
+ ("Complete", 50, 0, 10, {"failed": 5}, 15, 0, 5, ["always", "on_failures", "on_warnings", "on_changes"]),
+ ("Error", 0, 0, 0, {}, 0, 0, 0, ["always", "on_failures", "on_warnings", "on_changes"]),
+ ("Error", 20, 10, 0, None, 15, 5, 0, ["always", "on_failures", "on_warnings", "on_changes"]),
+ ("Cancelled", 0, 0, 0, {}, 0, 0, 0, ["always", "on_failures", "on_warnings", "on_changes"]),
+ ("Cancelled", 30, 20, 0, {}, 15, 5, 0, ["always", "on_failures", "on_warnings", "on_changes"]),
+ ]
+)
+def test_send_test_run_notification(
+ test_run_status,
+ failed_ct,
+ warning_ct,
+ error_ct,
+ diff_mock_args,
+ failed_expected,
+ warning_expected,
+ error_expected,
+ expected_triggers,
+ ns_select_patched,
+ get_prev_mock,
+ diff_mock,
+ send_mock,
+ db_session_mock,
+ select_mock,
+ select_summary_mock,
+):
+
+ test_run = TestRun(
+ id="tr-id",
+ status=test_run_status,
+ test_suite_id="ts-id",
+ failed_ct=failed_ct,
+ warning_ct=warning_ct,
+ error_ct=error_ct,
+ )
+
+ db_session_mock.execute.side_effect = [
+ [{} for _ in range(ct)]
+ for ct in (failed_expected, warning_expected, error_expected)
+ if ct > 0
+ ]
+ if diff_mock_args is None:
+ get_prev_mock.return_value = None
+ else:
+ diff_mock.return_value = create_diff(**diff_mock_args)
+ get_prev_mock.return_value = TestRun(id="tr-prev-id")
+ summary = object()
+ select_summary_mock.return_value = [summary]
+
+ send_test_run_notifications(test_run)
+
+ ns_select_patched.assert_called_once_with(enabled=True, test_suite_id="ts-id")
+
+ if diff_mock_args is None:
+ diff_mock.assert_not_called()
+ else:
+ diff_mock.assert_called_once_with("tr-prev-id", "tr-id")
+
+ select_mock.assert_has_calls(
+ [
+ call().join().where().order_by().limit(ct)
+ for ct in (failed_expected, warning_expected, error_expected)
+ if ct > 0
+ ],
+ any_order=True,
+ )
+
+ expected_context = {
+ "test_run": summary,
+ "test_run_url": "http://tg-base-url/test-runs:results?run_id=tr-id",
+ "test_run_id": "tr-id",
+ "test_result_summary": ANY,
+ }
+
+ send_mock.assert_has_calls(
+ [
+ call(
+ ANY,
+ {**expected_context, "notification_trigger": trigger}
+ )
+ for trigger in expected_triggers
+ ],
+ )
+ assert send_mock.call_count == len(expected_triggers)
+
+ test_result_summary = list(send_mock.mock_calls[0].args[1]["test_result_summary"])
+ for status, total, expected in (
+ (TestResultStatus.Failed, failed_ct, failed_expected),
+ (TestResultStatus.Warning, warning_ct, warning_expected),
+ (TestResultStatus.Error, error_ct, error_expected),
+ ):
+ if expected:
+ result_list = test_result_summary.pop(0)
+ assert result_list["status"] == status.value
+ assert result_list["label"]
+ assert result_list["truncated"] == total - expected
+ assert result_list["total"] == total
+ assert len(result_list["result_list"]) == expected
+ assert not test_result_summary