diff --git a/src/snowflake/cli/_plugins/dcm/commands.py b/src/snowflake/cli/_plugins/dcm/commands.py index 9620190aa4..5754d17df3 100644 --- a/src/snowflake/cli/_plugins/dcm/commands.py +++ b/src/snowflake/cli/_plugins/dcm/commands.py @@ -11,14 +11,25 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Optional +import json +from pathlib import Path +from dataclasses import dataclass, field +from typing import Dict, List, Optional import typer +from snowflake.cli._plugins.dcm.manager import AnalysisType from snowflake.cli._plugins.dcm.manager import DCMProjectManager +from snowflake.cli._plugins.dcm.utils import ( + TestResultFormat, + export_test_results, + format_refresh_results, + format_test_failures, +) from snowflake.cli._plugins.object.command_aliases import add_object_command_aliases from snowflake.cli._plugins.object.commands import scope_option from snowflake.cli._plugins.object.manager import ObjectManager from snowflake.cli.api.commands.flags import ( + IdentifierType, IfExistsOption, IfNotExistsOption, OverrideableOption, @@ -47,6 +58,7 @@ is_hidden=FeatureFlag.ENABLE_SNOWFLAKE_PROJECTS.is_disabled, ) + dcm_identifier = identifier_argument(sf_object="DCM Project", example="MY_PROJECT") variables_flag = variables_option( 'Variables for the execution context; for example: `-D "="`.' @@ -112,6 +124,11 @@ def deploy( variables: Optional[List[str]] = variables_flag, configuration: Optional[str] = configuration_flag, alias: Optional[str] = alias_option, + skip_plan: bool = typer.Option( + False, + "--skip-plan", + help="Skips planning step", + ), **options, ): """ @@ -122,13 +139,15 @@ def deploy( with cli_console.spinner() as spinner: spinner.add_task(description=f"Deploying dcm project {identifier}", total=None) - result = manager.execute( + if skip_plan: + cli_console.warning("Skipping planning step") + result = manager.deploy( project_identifier=identifier, configuration=configuration, from_stage=effective_stage, variables=variables, alias=alias, - output_path=None, + skip_plan=skip_plan, ) return QueryJsonValueResult(result) @@ -152,11 +171,10 @@ def plan( with cli_console.spinner() as spinner: spinner.add_task(description=f"Planning dcm project {identifier}", total=None) - result = manager.execute( + result = manager.plan( project_identifier=identifier, configuration=configuration, from_stage=effective_stage, - dry_run=True, variables=variables, output_path=output_path, ) @@ -235,6 +253,258 @@ def drop_deployment( ) +@app.command(requires_connection=True) +def test( + identifier: FQN = dcm_identifier, + export_format: Optional[List[TestResultFormat]] = typer.Option( + None, + "--result-format", + help="Export test results in specified format(s) into directory set with `--output-path`. Can be specified multiple times for multiple formats.", + show_default=False, + ), + output_path: Optional[Path] = typer.Option( + None, + "--output-path", + help="Directory where test result files will be saved. Defaults to current directory.", + show_default=False, + ), + **options, +): + """ + Test all expectations set for tables, views and dynamic tables defined + in DCM project. + """ + with cli_console.spinner() as spinner: + spinner.add_task(description=f"Testing dcm project {identifier}", total=None) + result = DCMProjectManager().test(project_identifier=identifier) + + row = result.fetchone() + if not row: + return MessageResult("No data.") + + result_data = row[0] + result_json = ( + json.loads(result_data) if isinstance(result_data, str) else result_data + ) + + expectations = result_json.get("expectations", []) + + if not expectations: + return MessageResult("No expectations defined in the project.") + + if export_format: + if output_path is None: + output_path = Path().cwd() + saved_files = export_test_results(result_json, export_format, output_path) + if saved_files: + cli_console.step(f"Test results exported to {output_path.resolve()}.") + + if result_json.get("status") == "EXPECTATION_VIOLATED": + failed_expectations = [ + exp for exp in expectations if exp.get("expectation_violated", False) + ] + total_tests = len(expectations) + failed_count = len(failed_expectations) + error_message = format_test_failures( + failed_expectations, total_tests, failed_count + ) + raise CliError(error_message) + + return MessageResult(f"All {len(expectations)} expectation(s) passed successfully.") + + +@app.command(requires_connection=True) +def refresh( + identifier: FQN = dcm_identifier, + **options, +): + """ + Refreshes dynamic tables defined in DCM project. + """ + with cli_console.spinner() as spinner: + spinner.add_task(description=f"Refreshing dcm project {identifier}", total=None) + result = DCMProjectManager().refresh(project_identifier=identifier) + + row = result.fetchone() + if not row: + return MessageResult("No data.") + + result_data = row[0] + result_json = ( + json.loads(result_data) if isinstance(result_data, str) else result_data + ) + + refreshed_tables = result_json.get("refreshed_tables", []) + message = format_refresh_results(refreshed_tables) + + return MessageResult(message) + + +@app.command(requires_connection=True) +def preview( + identifier: FQN = dcm_identifier, + object_identifier: FQN = typer.Option( + ..., + "--object", + help="FQN of table/view/etc to be previewed.", + show_default=False, + click_type=IdentifierType(), + ), + from_location: Optional[str] = from_option, + variables: Optional[List[str]] = variables_flag, + configuration: Optional[str] = configuration_flag, + limit: Optional[int] = typer.Option( + None, + "--limit", + help="The maximum number of rows to be returned.", + show_default=False, + ), + **options, +): + """ + Returns rows from any table, view, dynamic table. + + Examples: + \nsnow dcm preview MY_PROJECT --configuration DEV --object MY_DB.PUBLIC.MY_VIEW --limit 2 + """ + manager = DCMProjectManager() + effective_stage = _get_effective_stage(identifier, from_location) + + with cli_console.spinner() as spinner: + spinner.add_task( + description=f"Previewing {object_identifier}.", + total=None, + ) + result = manager.preview( + project_identifier=identifier, + object_identifier=object_identifier, + configuration=configuration, + from_stage=effective_stage, + variables=variables, + limit=limit, + ) + + return QueryResult(result) + + +@app.command(requires_connection=True) +def analyze( + identifier: FQN = dcm_identifier, + from_location: Optional[str] = from_option, + variables: Optional[List[str]] = variables_flag, + configuration: Optional[str] = configuration_flag, + analysis_type: Optional[AnalysisType] = typer.Option( + None, + "--type", + help="Type of analysis to perform.", + show_default=False, + case_sensitive=False, + ), + output_path: Optional[str] = output_path_option( + help="Path where the analysis result will be stored. Can be a stage path (starting with '@') or a local directory path." + ), + **options, +): + """ + Analyzes a DCM Project. + """ + manager = DCMProjectManager() + effective_stage = _get_effective_stage(identifier, from_location) + + with cli_console.spinner() as spinner: + spinner.add_task(description=f"Analyzing dcm project {identifier}", total=None) + result = manager.analyze( + project_identifier=identifier, + configuration=configuration, + from_stage=effective_stage, + variables=variables, + analysis_type=analysis_type, + output_path=output_path, + ) + + row = result.fetchone() + if not row: + return MessageResult("No data.") + + result_data = row[0] + result_json = ( + json.loads(result_data) if isinstance(result_data, str) else result_data + ) + + summary = _analyze_result_summary(result_json) + + if summary.has_errors: + error_message = _format_error_message(summary) + raise CliError(error_message) + + return MessageResult( + f"✓ Analysis complete: {summary.total_files} file(s) analyzed, " + f"{summary.total_definitions} definition(s) found. No errors detected." + ) + + +@dataclass +class AnalysisSummary: + total_files: int = 0 + total_definitions: int = 0 + files_with_errors: int = 0 + total_errors: int = 0 + errors_by_file: Dict[str, List[str]] = field(default_factory=dict) + has_errors: bool = False + + +def _analyze_result_summary(result_json) -> AnalysisSummary: + summary = AnalysisSummary() + + if not isinstance(result_json, dict): + return summary + + files = result_json.get("files", []) + summary.total_files = len(files) + + for file_info in files: + source_path = file_info.get("sourcePath", "unknown") + file_errors = [] + + # Check file-level errors + for error in file_info.get("errors", []): + error_msg = error.get("message", "Unknown error") + file_errors.append(error_msg) + summary.total_errors += 1 + + # Check definition-level errors + definitions = file_info.get("definitions", []) + summary.total_definitions += len(definitions) + + for definition in definitions: + for error in definition.get("errors", []): + error_msg = error.get("message", "Unknown error") + file_errors.append(error_msg) + summary.total_errors += 1 + + if file_errors: + summary.errors_by_file[source_path] = file_errors + summary.files_with_errors += 1 + summary.has_errors = True + + return summary + + +def _format_error_message(summary: AnalysisSummary) -> str: + lines = [ + f"Analysis found {summary.total_errors} error(s) in {summary.files_with_errors} file(s):", + "", + ] + + for file_path, errors in summary.errors_by_file.items(): + lines.append(f" {file_path}:") + for error in errors: + lines.append(f" • {error}") + lines.append("") + + return "\n".join(lines).rstrip() + + def _get_effective_stage(identifier: FQN, from_location: Optional[str]): manager = DCMProjectManager() if not from_location: diff --git a/src/snowflake/cli/_plugins/dcm/manager.py b/src/snowflake/cli/_plugins/dcm/manager.py index 717239b4cf..1217cacac5 100644 --- a/src/snowflake/cli/_plugins/dcm/manager.py +++ b/src/snowflake/cli/_plugins/dcm/manager.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from contextlib import contextmanager, nullcontext +from enum import Enum from pathlib import Path from typing import Generator, List @@ -38,6 +39,10 @@ DCM_PROJECT_TYPE = "dcm_project" +class AnalysisType(str, Enum): + DEPENDENCIES = "dependencies" + + class DCMProjectManager(SqlExecutionMixin): @contextmanager def _collect_output( @@ -77,36 +82,40 @@ def _collect_output( else: cli_console.step(f"Plan output saved to: {output_path}") - def execute( + def deploy( self, project_identifier: FQN, from_stage: str, configuration: str | None = None, variables: List[str] | None = None, - dry_run: bool = False, alias: str | None = None, + skip_plan: bool = False, + ): + query = f"EXECUTE DCM PROJECT {project_identifier.sql_identifier} DEPLOY" + if alias: + query += f' AS "{alias}"' + query += self._get_configuration_and_variables_query(configuration, variables) + query += self._get_from_stage_query(from_stage) + if skip_plan: + query += f" SKIP PLAN" + return self.execute_query(query=query) + + def plan( + self, + project_identifier: FQN, + from_stage: str, + configuration: str | None = None, + variables: List[str] | None = None, output_path: str | None = None, ): - with self._collect_output(project_identifier, output_path) if ( - output_path and dry_run - ) else nullcontext() as output_stage: - query = f"EXECUTE DCM PROJECT {project_identifier.sql_identifier}" - if dry_run: - query += " PLAN" - else: - query += " DEPLOY" - if alias: - query += f' AS "{alias}"' - if configuration or variables: - query += f" USING" - if configuration: - query += f" CONFIGURATION {configuration}" - if variables: - query += StageManager.parse_execute_variables( - parse_key_value_variables(variables) - ).removeprefix(" using") - stage_path = StagePath.from_stage_str(from_stage) - query += f" FROM {stage_path.absolute_path()}" + with self._collect_output( + project_identifier, output_path + ) if output_path else nullcontext() as output_stage: + query = f"EXECUTE DCM PROJECT {project_identifier.sql_identifier} PLAN" + query += self._get_configuration_and_variables_query( + configuration, variables + ) + query += self._get_from_stage_query(from_stage) if output_stage is not None: query += f" OUTPUT_PATH {output_stage}" result = self.execute_query(query=query) @@ -136,6 +145,75 @@ def drop_deployment( query += f' "{deployment_name}"' return self.execute_query(query=query) + def test(self, project_identifier: FQN): + query = f"EXECUTE DCM PROJECT {project_identifier.sql_identifier} TEST ALL" + return self.execute_query(query=query) + + def refresh(self, project_identifier: FQN): + query = f"EXECUTE DCM PROJECT {project_identifier.sql_identifier} REFRESH ALL" + return self.execute_query(query=query) + + def preview( + self, + project_identifier: FQN, + object_identifier: FQN, + from_stage: str, + configuration: str | None = None, + variables: List[str] | None = None, + limit: int | None = None, + ): + query = f"EXECUTE DCM PROJECT {project_identifier.sql_identifier} PREVIEW {object_identifier.sql_identifier}" + query += self._get_configuration_and_variables_query(configuration, variables) + query += self._get_from_stage_query(from_stage) + if limit is not None: + query += f" LIMIT {limit}" + return self.execute_query(query=query) + + def analyze( + self, + project_identifier: FQN, + from_stage: str, + configuration: str | None = None, + variables: List[str] | None = None, + analysis_type: AnalysisType | None = None, + output_path: str | None = None, + ): + with self._collect_output( + project_identifier, output_path + ) if output_path else nullcontext() as output_stage: + query = f"EXECUTE DCM PROJECT {project_identifier.sql_identifier} ANALYZE" + if analysis_type: + query += f" {analysis_type.value.upper()}" + query += self._get_configuration_and_variables_query( + configuration, variables + ) + query += self._get_from_stage_query(from_stage) + if output_stage is not None: + query += f" OUTPUT_PATH {output_stage}" + result = self.execute_query(query=query) + + return result + + @staticmethod + def _get_from_stage_query(from_stage: str) -> str: + stage_path = StagePath.from_stage_str(from_stage) + return f" FROM {stage_path.absolute_path()}" + + @staticmethod + def _get_configuration_and_variables_query( + configuration: str | None, variables: List[str] | None + ) -> str: + query = "" + if configuration or variables: + query += f" USING" + if configuration: + query += f" CONFIGURATION {configuration}" + if variables: + query += StageManager.parse_execute_variables( + parse_key_value_variables(variables) + ).removeprefix(" using") + return query + @staticmethod def sync_local_files( project_identifier: FQN, source_directory: str | None = None diff --git a/src/snowflake/cli/_plugins/dcm/utils.py b/src/snowflake/cli/_plugins/dcm/utils.py new file mode 100644 index 0000000000..fbfbc0ddc9 --- /dev/null +++ b/src/snowflake/cli/_plugins/dcm/utils.py @@ -0,0 +1,240 @@ +# Copyright (c) 2024 Snowflake Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +from enum import Enum +from pathlib import Path +from typing import Any, Dict, List +from xml.etree import ElementTree + + +class TestResultFormat(str, Enum): + JSON = "json" + JUNIT = "junit" + TAP = "tap" + + +def format_test_failures( + failed_expectations: list, total_tests: int, failed_count: int +) -> str: + """Format test failures into a nice error message.""" + lines = [ + "Failed expectations:", + ] + + for failed in failed_expectations: + table_name = failed.get("table_name", "Unknown") + expectation_name = failed.get("expectation_name", "Unknown") + metric_name = failed.get("metric_name", "Unknown") + expectation_expr = failed.get("expectation_expression", "N/A") + value = failed.get("value", "N/A") + + lines.append(f" Table: {table_name}") + lines.append(f" Expectation: {expectation_name}") + lines.append(f" Metric: {metric_name}") + lines.append(f" Expression: {expectation_expr}") + lines.append(f" Actual value: {value}") + lines.append("") + + passed_tests = total_tests - failed_count + lines.append( + f"Tests completed: {passed_tests} passed, {failed_count} failed out of {total_tests} total." + ) + + return "\n".join(lines) + + +def _normalize_table_name(table_name: str) -> str: + """Normalize table name to lowercase with hyphens for file naming.""" + return table_name.lower().replace(".", "-").replace("_", "-") + + +def _group_expectations_by_table( + expectations: List[Dict[str, Any]] +) -> Dict[str, List[Dict[str, Any]]]: + """Group expectations by table name.""" + grouped: dict[str, list[dict[str, Any]]] = {} + for expectation in expectations: + table_name = expectation.get("table_name", "unknown") + if table_name not in grouped: + grouped[table_name] = [] + grouped[table_name].append(expectation) + return grouped + + +def export_test_results_as_json(result_data: Dict[str, Any], output_path: Path) -> None: + """Export test results as JSON format.""" + with open(output_path, "w") as f: + json.dump(result_data, f, indent=2) + + +def export_test_results_as_junit( + result_data: Dict[str, Any], output_dir: Path +) -> List[Path]: + """Export test results as JUnit XML format, one file per table.""" + expectations = result_data.get("expectations", []) + grouped = _group_expectations_by_table(expectations) + + junit_dir = output_dir / "junit" + junit_dir.mkdir(parents=True, exist_ok=True) + + saved_files = [] + + for table_name, table_expectations in grouped.items(): + normalized_name = _normalize_table_name(table_name) + output_path = junit_dir / f"{normalized_name}.xml" + + testsuites = ElementTree.Element("testsuites") + testsuite = ElementTree.SubElement( + testsuites, + "testsuite", + name=f"DCM Tests - {table_name}", + tests=str(len(table_expectations)), + failures=str( + sum( + 1 + for e in table_expectations + if e.get("expectation_violated", False) + ) + ), + errors="0", + skipped="0", + ) + + for expectation in table_expectations: + expectation_name = expectation.get("expectation_name", "Unknown") + metric_name = expectation.get("metric_name", "Unknown") + + testcase = ElementTree.SubElement( + testsuite, + "testcase", + name=expectation_name, + classname=table_name, + ) + + if expectation.get("expectation_violated", False): + failure = ElementTree.SubElement( + testcase, + "failure", + message=f"Expectation '{expectation_name}' violated", + type="AssertionError", + ) + expectation_expr = expectation.get("expectation_expression", "N/A") + value = expectation.get("value", "N/A") + failure.text = ( + f"Metric: {metric_name}\n" + f"Expression: {expectation_expr}\n" + f"Actual value: {value}" + ) + + tree = ElementTree.ElementTree(testsuites) + ElementTree.indent(tree, space=" ") + tree.write(output_path, encoding="utf-8", xml_declaration=True) + saved_files.append(output_path) + + return saved_files + + +def export_test_results_as_tap( + result_data: Dict[str, Any], output_dir: Path +) -> List[Path]: + """Export test results as TAP (Test Anything Protocol) format, one file per table.""" + expectations = result_data.get("expectations", []) + grouped = _group_expectations_by_table(expectations) + + tap_dir = output_dir / "tap" + tap_dir.mkdir(parents=True, exist_ok=True) + + saved_files = [] + + for table_name, table_expectations in grouped.items(): + normalized_name = _normalize_table_name(table_name) + output_path = tap_dir / f"{normalized_name}.tap" + + lines = [f"1..{len(table_expectations)}"] + + for idx, expectation in enumerate(table_expectations, start=1): + expectation_name = expectation.get("expectation_name", "Unknown") + metric_name = expectation.get("metric_name", "Unknown") + + if expectation.get("expectation_violated", False): + lines.append(f"not ok {idx} - {expectation_name}") + lines.append(f" ---") + lines.append(f" message: Expectation '{expectation_name}' violated") + lines.append(f" severity: fail") + lines.append(f" data:") + lines.append(f" table: {table_name}") + lines.append(f" metric: {metric_name}") + lines.append( + f" expression: {expectation.get('expectation_expression', 'N/A')}" + ) + lines.append(f" actual_value: {expectation.get('value', 'N/A')}") + lines.append(f" ...") + else: + lines.append(f"ok {idx} - {expectation_name}") + + with open(output_path, "w") as f: + f.write("\n".join(lines) + "\n") + + saved_files.append(output_path) + + return saved_files + + +def export_test_results( + result_data: Dict[str, Any], + formats: List[TestResultFormat], + output_dir: Path, +) -> List[Path]: + """ + Export test results in multiple formats. + + Args: + result_data: The test result data from the backend + formats: List of formats to export to + output_dir: Directory to save the results + + Returns: + List of paths where results were saved + """ + output_dir.mkdir(parents=True, exist_ok=True) + saved_files = [] + + for format_type in formats: + if format_type == TestResultFormat.JSON: + output_path = output_dir / "test_result.json" + export_test_results_as_json(result_data, output_path) + saved_files.append(output_path) + elif format_type == TestResultFormat.JUNIT: + files = export_test_results_as_junit(result_data, output_dir) + saved_files.extend(files) + elif format_type == TestResultFormat.TAP: + files = export_test_results_as_tap(result_data, output_dir) + saved_files.extend(files) + + return saved_files + + +def format_refresh_results(refreshed_tables: list) -> str: + """Format refresh results into a concise user-friendly message.""" + if not refreshed_tables: + return "No dynamic tables found in the project." + + total_tables = len(refreshed_tables) + refreshed_count = sum( + 1 for table in refreshed_tables if table.get("refreshed_dt_count", 0) > 0 + ) + up_to_date_count = total_tables - refreshed_count + + return f"{refreshed_count} dynamic table(s) refreshed. {up_to_date_count} dynamic table(s) up-to-date." diff --git a/src/snowflake/cli/api/output/types.py b/src/snowflake/cli/api/output/types.py index de3d461595..55470cc143 100644 --- a/src/snowflake/cli/api/output/types.py +++ b/src/snowflake/cli/api/output/types.py @@ -146,9 +146,13 @@ def __init__(self, cursor: SnowflakeCursor): def _prepare_payload(self, cursor): results = list(QueryResult(cursor).result) if results: - # Return value of the first tuple - return json.loads(list(results[0].items())[0][1]) - return None + # Parse the JSON value from the first tuple + parsed_value = json.loads(list(results[0].items())[0][1]) + # If it's a list, yield each element; if it's a dict, yield the single dict + if isinstance(parsed_value, list): + yield from parsed_value + else: + yield parsed_value class MessageResult(CommandResult): diff --git a/tests/__snapshots__/test_help_messages.ambr b/tests/__snapshots__/test_help_messages.ambr index 443db9fe70..be02453ee2 100644 --- a/tests/__snapshots__/test_help_messages.ambr +++ b/tests/__snapshots__/test_help_messages.ambr @@ -7191,6 +7191,162 @@ +------------------------------------------------------------------------------+ + ''' +# --- +# name: test_help_messages[dcm.analyze] + ''' + + Usage: root dcm analyze [OPTIONS] IDENTIFIER + + Analyzes a DCM Project. + + +- Arguments ------------------------------------------------------------------+ + | * identifier TEXT Identifier of the DCM Project; for example: | + | MY_PROJECT | + | [required] | + +------------------------------------------------------------------------------+ + +- Options --------------------------------------------------------------------+ + | --from TEXT Source location: stage path | + | (starting with '@') or local | + | directory path. Omit to use current | + | directory. | + | --variable -D TEXT Variables for the execution | + | context; for example: -D | + | "=". | + | --configuration TEXT Configuration of the DCM Project to | + | use. If not specified default | + | configuration is used. | + | --type [dependencies] Type of analysis to perform. | + | --output-path TEXT Path where the analysis result will | + | be stored. Can be a stage path | + | (starting with '@') or a local | + | directory path. | + | --help -h Show this message and exit. | + +------------------------------------------------------------------------------+ + +- Connection configuration ---------------------------------------------------+ + | --connection,--environment -c TEXT Name of the connection, as | + | defined in your config.toml | + | file. Default: default. | + | --host TEXT Host address for the | + | connection. Overrides the | + | value specified for the | + | connection. | + | --port INTEGER Port for the connection. | + | Overrides the value specified | + | for the connection. | + | --account,--accountname TEXT Name assigned to your | + | Snowflake account. Overrides | + | the value specified for the | + | connection. | + | --user,--username TEXT Username to connect to | + | Snowflake. Overrides the | + | value specified for the | + | connection. | + | --password TEXT Snowflake password. Overrides | + | the value specified for the | + | connection. | + | --authenticator TEXT Snowflake authenticator. | + | Overrides the value specified | + | for the connection. | + | --workload-identity-provider TEXT Workload identity provider | + | (AWS, AZURE, GCP, OIDC). | + | Overrides the value specified | + | for the connection | + | --private-key-file,--privat… TEXT Snowflake private key file | + | path. Overrides the value | + | specified for the connection. | + | --token TEXT OAuth token to use when | + | connecting to Snowflake. | + | --token-file-path TEXT Path to file with an OAuth | + | token to use when connecting | + | to Snowflake. | + | --database,--dbname TEXT Database to use. Overrides | + | the value specified for the | + | connection. | + | --schema,--schemaname TEXT Database schema to use. | + | Overrides the value specified | + | for the connection. | + | --role,--rolename TEXT Role to use. Overrides the | + | value specified for the | + | connection. | + | --warehouse TEXT Warehouse to use. Overrides | + | the value specified for the | + | connection. | + | --temporary-connection -x Uses a connection defined | + | with command line parameters, | + | instead of one defined in | + | config | + | --mfa-passcode TEXT Token to use for multi-factor | + | authentication (MFA) | + | --enable-diag Whether to generate a | + | connection diagnostic report. | + | --diag-log-path TEXT Path for the generated | + | report. Defaults to system | + | temporary directory. | + | --diag-allowlist-path TEXT Path to a JSON file that | + | contains allowlist | + | parameters. | + | --oauth-client-id TEXT Value of client id provided | + | by the Identity Provider for | + | Snowflake integration. | + | --oauth-client-secret TEXT Value of the client secret | + | provided by the Identity | + | Provider for Snowflake | + | integration. | + | --oauth-authorization-url TEXT Identity Provider endpoint | + | supplying the authorization | + | code to the driver. | + | --oauth-token-request-url TEXT Identity Provider endpoint | + | supplying the access tokens | + | to the driver. | + | --oauth-redirect-uri TEXT URI to use for authorization | + | code redirection. | + | --oauth-scope TEXT Scope requested in the | + | Identity Provider | + | authorization request. | + | --oauth-disable-pkce Disables Proof Key for Code | + | Exchange (PKCE). Default: | + | False. | + | --oauth-enable-refresh-toke… Enables a silent | + | re-authentication when the | + | actual access token becomes | + | outdated. Default: False. | + | --oauth-enable-single-use-r… Whether to opt-in to | + | single-use refresh token | + | semantics. Default: False. | + | --client-store-temporary-cr… Store the temporary | + | credential. | + +------------------------------------------------------------------------------+ + +- Global configuration -------------------------------------------------------+ + | --format [TABLE|JSON|JSON_EXT| Specifies the output | + | CSV] format. | + | [default: TABLE] | + | --verbose -v Displays log entries | + | for log levels info | + | and higher. | + | --debug Displays log entries | + | for log levels debug | + | and higher; debug logs | + | contain additional | + | information. | + | --silent Turns off intermediate | + | output to console. | + | --enhanced-exit-codes Differentiate exit | + | error codes based on | + | failure type. | + | [env var: | + | SNOWFLAKE_ENHANCED_EX… | + | --decimal-precision INTEGER Number of decimal | + | places to display for | + | decimal values. Uses | + | Python's default | + | precision if not | + | specified. | + | [env var: | + | SNOWFLAKE_DECIMAL_PRE… | + +------------------------------------------------------------------------------+ + + ''' # --- # name: test_help_messages[dcm.create] @@ -7356,6 +7512,7 @@ | --configuration TEXT Configuration of the DCM Project to use. If | | not specified default configuration is used. | | --alias TEXT Alias for the deployment. | + | --skip-plan Skips planning step | | --help -h Show this message and exit. | +------------------------------------------------------------------------------+ +- Connection configuration ---------------------------------------------------+ @@ -8349,6 +8506,455 @@ +------------------------------------------------------------------------------+ + ''' +# --- +# name: test_help_messages[dcm.preview] + ''' + + Usage: root dcm preview [OPTIONS] IDENTIFIER + + Returns rows from any table, view, dynamic table. + + Examples: + + snow dcm preview MY_PROJECT --configuration DEV --object MY_DB.PUBLIC.MY_VIEW + --limit 2 + + +- Arguments ------------------------------------------------------------------+ + | * identifier TEXT Identifier of the DCM Project; for example: | + | MY_PROJECT | + | [required] | + +------------------------------------------------------------------------------+ + +- Options --------------------------------------------------------------------+ + | * --object TEXT FQN of table/view/etc to be previewed. | + | [required] | + | --from TEXT Source location: stage path (starting | + | with '@') or local directory path. Omit | + | to use current directory. | + | --variable -D TEXT Variables for the execution context; | + | for example: -D "=". | + | --configuration TEXT Configuration of the DCM Project to | + | use. If not specified default | + | configuration is used. | + | --limit INTEGER The maximum number of rows to be | + | returned. | + | --help -h Show this message and exit. | + +------------------------------------------------------------------------------+ + +- Connection configuration ---------------------------------------------------+ + | --connection,--environment -c TEXT Name of the connection, as | + | defined in your config.toml | + | file. Default: default. | + | --host TEXT Host address for the | + | connection. Overrides the | + | value specified for the | + | connection. | + | --port INTEGER Port for the connection. | + | Overrides the value specified | + | for the connection. | + | --account,--accountname TEXT Name assigned to your | + | Snowflake account. Overrides | + | the value specified for the | + | connection. | + | --user,--username TEXT Username to connect to | + | Snowflake. Overrides the | + | value specified for the | + | connection. | + | --password TEXT Snowflake password. Overrides | + | the value specified for the | + | connection. | + | --authenticator TEXT Snowflake authenticator. | + | Overrides the value specified | + | for the connection. | + | --workload-identity-provider TEXT Workload identity provider | + | (AWS, AZURE, GCP, OIDC). | + | Overrides the value specified | + | for the connection | + | --private-key-file,--privat… TEXT Snowflake private key file | + | path. Overrides the value | + | specified for the connection. | + | --token TEXT OAuth token to use when | + | connecting to Snowflake. | + | --token-file-path TEXT Path to file with an OAuth | + | token to use when connecting | + | to Snowflake. | + | --database,--dbname TEXT Database to use. Overrides | + | the value specified for the | + | connection. | + | --schema,--schemaname TEXT Database schema to use. | + | Overrides the value specified | + | for the connection. | + | --role,--rolename TEXT Role to use. Overrides the | + | value specified for the | + | connection. | + | --warehouse TEXT Warehouse to use. Overrides | + | the value specified for the | + | connection. | + | --temporary-connection -x Uses a connection defined | + | with command line parameters, | + | instead of one defined in | + | config | + | --mfa-passcode TEXT Token to use for multi-factor | + | authentication (MFA) | + | --enable-diag Whether to generate a | + | connection diagnostic report. | + | --diag-log-path TEXT Path for the generated | + | report. Defaults to system | + | temporary directory. | + | --diag-allowlist-path TEXT Path to a JSON file that | + | contains allowlist | + | parameters. | + | --oauth-client-id TEXT Value of client id provided | + | by the Identity Provider for | + | Snowflake integration. | + | --oauth-client-secret TEXT Value of the client secret | + | provided by the Identity | + | Provider for Snowflake | + | integration. | + | --oauth-authorization-url TEXT Identity Provider endpoint | + | supplying the authorization | + | code to the driver. | + | --oauth-token-request-url TEXT Identity Provider endpoint | + | supplying the access tokens | + | to the driver. | + | --oauth-redirect-uri TEXT URI to use for authorization | + | code redirection. | + | --oauth-scope TEXT Scope requested in the | + | Identity Provider | + | authorization request. | + | --oauth-disable-pkce Disables Proof Key for Code | + | Exchange (PKCE). Default: | + | False. | + | --oauth-enable-refresh-toke… Enables a silent | + | re-authentication when the | + | actual access token becomes | + | outdated. Default: False. | + | --oauth-enable-single-use-r… Whether to opt-in to | + | single-use refresh token | + | semantics. Default: False. | + | --client-store-temporary-cr… Store the temporary | + | credential. | + +------------------------------------------------------------------------------+ + +- Global configuration -------------------------------------------------------+ + | --format [TABLE|JSON|JSON_EXT| Specifies the output | + | CSV] format. | + | [default: TABLE] | + | --verbose -v Displays log entries | + | for log levels info | + | and higher. | + | --debug Displays log entries | + | for log levels debug | + | and higher; debug logs | + | contain additional | + | information. | + | --silent Turns off intermediate | + | output to console. | + | --enhanced-exit-codes Differentiate exit | + | error codes based on | + | failure type. | + | [env var: | + | SNOWFLAKE_ENHANCED_EX… | + | --decimal-precision INTEGER Number of decimal | + | places to display for | + | decimal values. Uses | + | Python's default | + | precision if not | + | specified. | + | [env var: | + | SNOWFLAKE_DECIMAL_PRE… | + +------------------------------------------------------------------------------+ + + + ''' +# --- +# name: test_help_messages[dcm.refresh] + ''' + + Usage: root dcm refresh [OPTIONS] IDENTIFIER + + Refreshes dynamic tables defined in DCM project. + + +- Arguments ------------------------------------------------------------------+ + | * identifier TEXT Identifier of the DCM Project; for example: | + | MY_PROJECT | + | [required] | + +------------------------------------------------------------------------------+ + +- Options --------------------------------------------------------------------+ + | --help -h Show this message and exit. | + +------------------------------------------------------------------------------+ + +- Connection configuration ---------------------------------------------------+ + | --connection,--environment -c TEXT Name of the connection, as | + | defined in your config.toml | + | file. Default: default. | + | --host TEXT Host address for the | + | connection. Overrides the | + | value specified for the | + | connection. | + | --port INTEGER Port for the connection. | + | Overrides the value specified | + | for the connection. | + | --account,--accountname TEXT Name assigned to your | + | Snowflake account. Overrides | + | the value specified for the | + | connection. | + | --user,--username TEXT Username to connect to | + | Snowflake. Overrides the | + | value specified for the | + | connection. | + | --password TEXT Snowflake password. Overrides | + | the value specified for the | + | connection. | + | --authenticator TEXT Snowflake authenticator. | + | Overrides the value specified | + | for the connection. | + | --workload-identity-provider TEXT Workload identity provider | + | (AWS, AZURE, GCP, OIDC). | + | Overrides the value specified | + | for the connection | + | --private-key-file,--privat… TEXT Snowflake private key file | + | path. Overrides the value | + | specified for the connection. | + | --token TEXT OAuth token to use when | + | connecting to Snowflake. | + | --token-file-path TEXT Path to file with an OAuth | + | token to use when connecting | + | to Snowflake. | + | --database,--dbname TEXT Database to use. Overrides | + | the value specified for the | + | connection. | + | --schema,--schemaname TEXT Database schema to use. | + | Overrides the value specified | + | for the connection. | + | --role,--rolename TEXT Role to use. Overrides the | + | value specified for the | + | connection. | + | --warehouse TEXT Warehouse to use. Overrides | + | the value specified for the | + | connection. | + | --temporary-connection -x Uses a connection defined | + | with command line parameters, | + | instead of one defined in | + | config | + | --mfa-passcode TEXT Token to use for multi-factor | + | authentication (MFA) | + | --enable-diag Whether to generate a | + | connection diagnostic report. | + | --diag-log-path TEXT Path for the generated | + | report. Defaults to system | + | temporary directory. | + | --diag-allowlist-path TEXT Path to a JSON file that | + | contains allowlist | + | parameters. | + | --oauth-client-id TEXT Value of client id provided | + | by the Identity Provider for | + | Snowflake integration. | + | --oauth-client-secret TEXT Value of the client secret | + | provided by the Identity | + | Provider for Snowflake | + | integration. | + | --oauth-authorization-url TEXT Identity Provider endpoint | + | supplying the authorization | + | code to the driver. | + | --oauth-token-request-url TEXT Identity Provider endpoint | + | supplying the access tokens | + | to the driver. | + | --oauth-redirect-uri TEXT URI to use for authorization | + | code redirection. | + | --oauth-scope TEXT Scope requested in the | + | Identity Provider | + | authorization request. | + | --oauth-disable-pkce Disables Proof Key for Code | + | Exchange (PKCE). Default: | + | False. | + | --oauth-enable-refresh-toke… Enables a silent | + | re-authentication when the | + | actual access token becomes | + | outdated. Default: False. | + | --oauth-enable-single-use-r… Whether to opt-in to | + | single-use refresh token | + | semantics. Default: False. | + | --client-store-temporary-cr… Store the temporary | + | credential. | + +------------------------------------------------------------------------------+ + +- Global configuration -------------------------------------------------------+ + | --format [TABLE|JSON|JSON_EXT| Specifies the output | + | CSV] format. | + | [default: TABLE] | + | --verbose -v Displays log entries | + | for log levels info | + | and higher. | + | --debug Displays log entries | + | for log levels debug | + | and higher; debug logs | + | contain additional | + | information. | + | --silent Turns off intermediate | + | output to console. | + | --enhanced-exit-codes Differentiate exit | + | error codes based on | + | failure type. | + | [env var: | + | SNOWFLAKE_ENHANCED_EX… | + | --decimal-precision INTEGER Number of decimal | + | places to display for | + | decimal values. Uses | + | Python's default | + | precision if not | + | specified. | + | [env var: | + | SNOWFLAKE_DECIMAL_PRE… | + +------------------------------------------------------------------------------+ + + + ''' +# --- +# name: test_help_messages[dcm.test] + ''' + + Usage: root dcm test [OPTIONS] IDENTIFIER + + Test all expectations set for tables, views and dynamic tables defined in DCM + project. + + +- Arguments ------------------------------------------------------------------+ + | * identifier TEXT Identifier of the DCM Project; for example: | + | MY_PROJECT | + | [required] | + +------------------------------------------------------------------------------+ + +- Options --------------------------------------------------------------------+ + | --result-format [json|junit|tap] Export test results in specified | + | format(s) into directory set with | + | --output-path. Can be specified | + | multiple times for multiple | + | formats. | + | --output-path PATH Directory where test result files | + | will be saved. Defaults to | + | current directory. | + | --help -h Show this message and exit. | + +------------------------------------------------------------------------------+ + +- Connection configuration ---------------------------------------------------+ + | --connection,--environment -c TEXT Name of the connection, as | + | defined in your config.toml | + | file. Default: default. | + | --host TEXT Host address for the | + | connection. Overrides the | + | value specified for the | + | connection. | + | --port INTEGER Port for the connection. | + | Overrides the value specified | + | for the connection. | + | --account,--accountname TEXT Name assigned to your | + | Snowflake account. Overrides | + | the value specified for the | + | connection. | + | --user,--username TEXT Username to connect to | + | Snowflake. Overrides the | + | value specified for the | + | connection. | + | --password TEXT Snowflake password. Overrides | + | the value specified for the | + | connection. | + | --authenticator TEXT Snowflake authenticator. | + | Overrides the value specified | + | for the connection. | + | --workload-identity-provider TEXT Workload identity provider | + | (AWS, AZURE, GCP, OIDC). | + | Overrides the value specified | + | for the connection | + | --private-key-file,--privat… TEXT Snowflake private key file | + | path. Overrides the value | + | specified for the connection. | + | --token TEXT OAuth token to use when | + | connecting to Snowflake. | + | --token-file-path TEXT Path to file with an OAuth | + | token to use when connecting | + | to Snowflake. | + | --database,--dbname TEXT Database to use. Overrides | + | the value specified for the | + | connection. | + | --schema,--schemaname TEXT Database schema to use. | + | Overrides the value specified | + | for the connection. | + | --role,--rolename TEXT Role to use. Overrides the | + | value specified for the | + | connection. | + | --warehouse TEXT Warehouse to use. Overrides | + | the value specified for the | + | connection. | + | --temporary-connection -x Uses a connection defined | + | with command line parameters, | + | instead of one defined in | + | config | + | --mfa-passcode TEXT Token to use for multi-factor | + | authentication (MFA) | + | --enable-diag Whether to generate a | + | connection diagnostic report. | + | --diag-log-path TEXT Path for the generated | + | report. Defaults to system | + | temporary directory. | + | --diag-allowlist-path TEXT Path to a JSON file that | + | contains allowlist | + | parameters. | + | --oauth-client-id TEXT Value of client id provided | + | by the Identity Provider for | + | Snowflake integration. | + | --oauth-client-secret TEXT Value of the client secret | + | provided by the Identity | + | Provider for Snowflake | + | integration. | + | --oauth-authorization-url TEXT Identity Provider endpoint | + | supplying the authorization | + | code to the driver. | + | --oauth-token-request-url TEXT Identity Provider endpoint | + | supplying the access tokens | + | to the driver. | + | --oauth-redirect-uri TEXT URI to use for authorization | + | code redirection. | + | --oauth-scope TEXT Scope requested in the | + | Identity Provider | + | authorization request. | + | --oauth-disable-pkce Disables Proof Key for Code | + | Exchange (PKCE). Default: | + | False. | + | --oauth-enable-refresh-toke… Enables a silent | + | re-authentication when the | + | actual access token becomes | + | outdated. Default: False. | + | --oauth-enable-single-use-r… Whether to opt-in to | + | single-use refresh token | + | semantics. Default: False. | + | --client-store-temporary-cr… Store the temporary | + | credential. | + +------------------------------------------------------------------------------+ + +- Global configuration -------------------------------------------------------+ + | --format [TABLE|JSON|JSON_EXT| Specifies the output | + | CSV] format. | + | [default: TABLE] | + | --verbose -v Displays log entries | + | for log levels info | + | and higher. | + | --debug Displays log entries | + | for log levels debug | + | and higher; debug logs | + | contain additional | + | information. | + | --silent Turns off intermediate | + | output to console. | + | --enhanced-exit-codes Differentiate exit | + | error codes based on | + | failure type. | + | [env var: | + | SNOWFLAKE_ENHANCED_EX… | + | --decimal-precision INTEGER Number of decimal | + | places to display for | + | decimal values. Uses | + | Python's default | + | precision if not | + | specified. | + | [env var: | + | SNOWFLAKE_DECIMAL_PRE… | + +------------------------------------------------------------------------------+ + + ''' # --- # name: test_help_messages[dcm] @@ -8362,6 +8968,7 @@ | --help -h Show this message and exit. | +------------------------------------------------------------------------------+ +- Commands -------------------------------------------------------------------+ + | analyze Analyzes a DCM Project. | | create Creates a DCM Project in Snowflake. | | deploy Applies changes defined in DCM Project to Snowflake. | | describe Provides description of DCM Project. | @@ -8371,6 +8978,10 @@ | list-deployments Lists deployments of given DCM Project. | | plan Plans a DCM Project deployment (validates without | | executing). | + | preview Returns rows from any table, view, dynamic table. | + | refresh Refreshes dynamic tables defined in DCM project. | + | test Test all expectations set for tables, views and dynamic | + | tables defined in DCM project. | +------------------------------------------------------------------------------+ @@ -21947,6 +22558,7 @@ | --help -h Show this message and exit. | +------------------------------------------------------------------------------+ +- Commands -------------------------------------------------------------------+ + | analyze Analyzes a DCM Project. | | create Creates a DCM Project in Snowflake. | | deploy Applies changes defined in DCM Project to Snowflake. | | describe Provides description of DCM Project. | @@ -21956,6 +22568,10 @@ | list-deployments Lists deployments of given DCM Project. | | plan Plans a DCM Project deployment (validates without | | executing). | + | preview Returns rows from any table, view, dynamic table. | + | refresh Refreshes dynamic tables defined in DCM project. | + | test Test all expectations set for tables, views and dynamic | + | tables defined in DCM project. | +------------------------------------------------------------------------------+ diff --git a/tests/dcm/__snapshots__/test_commands.ambr b/tests/dcm/__snapshots__/test_commands.ambr new file mode 100644 index 0000000000..57bd429f98 --- /dev/null +++ b/tests/dcm/__snapshots__/test_commands.ambr @@ -0,0 +1,22 @@ +# serializer version: 1 +# name: TestDCMRefresh.test_refresh_with_fresh_tables + ''' + + 0 dynamic table(s) refreshed. 1 dynamic table(s) up-to-date. + + ''' +# --- +# name: TestDCMRefresh.test_refresh_with_no_dynamic_tables + ''' + + No dynamic tables found in the project. + + ''' +# --- +# name: TestDCMRefresh.test_refresh_with_outdated_tables + ''' + + 1 dynamic table(s) refreshed. 0 dynamic table(s) up-to-date. + + ''' +# --- diff --git a/tests/dcm/test_commands.py b/tests/dcm/test_commands.py index 5bcbaddd65..2d0ca754ce 100644 --- a/tests/dcm/test_commands.py +++ b/tests/dcm/test_commands.py @@ -1,6 +1,8 @@ +import json from unittest import mock import pytest +from snowflake.cli._plugins.dcm.manager import AnalysisType from snowflake.cli.api.identifiers import FQN DCMProjectManager = "snowflake.cli._plugins.dcm.commands.DCMProjectManager" @@ -75,7 +77,7 @@ def test_deploy_project( mock_connect, mock_from_resource, ): - mock_pm().execute.return_value = mock_cursor( + mock_pm().deploy.return_value = mock_cursor( rows=[("[]",)], columns=("operations") ) mock_pm().sync_local_files.return_value = mock_from_resource() @@ -85,40 +87,40 @@ def test_deploy_project( assert result.exit_code == 0, result.output - mock_pm().execute.assert_called_once_with( + mock_pm().deploy.assert_called_once_with( project_identifier=FQN.from_string("fooBar"), configuration=None, from_stage=mock_from_resource(), variables=None, alias=None, - output_path=None, + skip_plan=False, ) @mock.patch(DCMProjectManager) def test_deploy_project_with_from_stage( self, mock_pm, runner, project_directory, mock_cursor ): - mock_pm().execute.return_value = mock_cursor( + mock_pm().deploy.return_value = mock_cursor( rows=[("[]",)], columns=("operations") ) result = runner.invoke(["dcm", "deploy", "fooBar", "--from", "@my_stage"]) assert result.exit_code == 0, result.output - mock_pm().execute.assert_called_once_with( + mock_pm().deploy.assert_called_once_with( project_identifier=FQN.from_string("fooBar"), configuration=None, from_stage="@my_stage", variables=None, alias=None, - output_path=None, + skip_plan=False, ) @mock.patch(DCMProjectManager) def test_deploy_project_with_variables( self, mock_pm, runner, project_directory, mock_cursor ): - mock_pm().execute.return_value = mock_cursor( + mock_pm().deploy.return_value = mock_cursor( rows=[("[]",)], columns=("operations") ) @@ -127,20 +129,20 @@ def test_deploy_project_with_variables( ) assert result.exit_code == 0, result.output - mock_pm().execute.assert_called_once_with( + mock_pm().deploy.assert_called_once_with( project_identifier=FQN.from_string("fooBar"), configuration=None, from_stage="@my_stage", variables=["key=value"], alias=None, - output_path=None, + skip_plan=False, ) @mock.patch(DCMProjectManager) def test_deploy_project_with_configuration( self, mock_pm, runner, project_directory, mock_cursor ): - mock_pm().execute.return_value = mock_cursor( + mock_pm().deploy.return_value = mock_cursor( rows=[("[]",)], columns=("operations") ) @@ -157,20 +159,20 @@ def test_deploy_project_with_configuration( ) assert result.exit_code == 0, result.output - mock_pm().execute.assert_called_once_with( + mock_pm().deploy.assert_called_once_with( project_identifier=FQN.from_string("fooBar"), configuration="some_configuration", from_stage="@my_stage", variables=None, alias=None, - output_path=None, + skip_plan=False, ) @mock.patch(DCMProjectManager) def test_deploy_project_with_alias( self, mock_pm, runner, project_directory, mock_cursor ): - mock_pm().execute.return_value = mock_cursor( + mock_pm().deploy.return_value = mock_cursor( rows=[("[]",)], columns=("operations") ) @@ -179,13 +181,13 @@ def test_deploy_project_with_alias( ) assert result.exit_code == 0, result.output - mock_pm().execute.assert_called_once_with( + mock_pm().deploy.assert_called_once_with( project_identifier=FQN.from_string("fooBar"), configuration=None, from_stage="@my_stage", variables=None, alias="my_alias", - output_path=None, + skip_plan=False, ) @mock.patch("snowflake.cli._plugins.dcm.manager.StageManager.create") @@ -200,7 +202,7 @@ def test_deploy_project_with_sync( mock_connect, ): """Test that files are synced to project stage when from_stage is not provided.""" - mock_pm().execute.return_value = mock_cursor( + mock_pm().deploy.return_value = mock_cursor( rows=[("[]",)], columns=("operations") ) mock_pm().sync_local_files.return_value = ( @@ -211,7 +213,7 @@ def test_deploy_project_with_sync( result = runner.invoke(["dcm", "deploy", "my_project"]) assert result.exit_code == 0, result.output - call_args = mock_pm().execute.call_args + call_args = mock_pm().deploy.call_args assert "DCM_FOOBAR" in call_args.kwargs["from_stage"] assert call_args.kwargs["from_stage"].endswith("_TMP_STAGE") @@ -225,7 +227,7 @@ def test_deploy_project_with_from_local_directory( mock_connect, tmp_path, ): - mock_pm().execute.return_value = mock_cursor( + mock_pm().deploy.return_value = mock_cursor( rows=[("[]",)], columns=("operations") ) mock_pm().sync_local_files.return_value = ( @@ -249,7 +251,7 @@ def test_deploy_project_with_from_local_directory( source_directory=str(source_dir), ) - call_args = mock_pm().execute.call_args + call_args = mock_pm().deploy.call_args assert call_args.kwargs["from_stage"].endswith("_TMP_STAGE") @@ -264,7 +266,7 @@ def test_plan_project( mock_connect, mock_from_resource, ): - mock_pm().execute.return_value = mock_cursor( + mock_pm().plan.return_value = mock_cursor( rows=[("[]",)], columns=("operations") ) mock_pm().sync_local_files.return_value = mock_from_resource() @@ -283,11 +285,10 @@ def test_plan_project( ) assert result.exit_code == 0, result.output - mock_pm().execute.assert_called_once_with( + mock_pm().plan.assert_called_once_with( project_identifier=FQN.from_string("fooBar"), configuration="some_configuration", from_stage=mock_from_resource(), - dry_run=True, variables=["key=value"], output_path=None, ) @@ -296,7 +297,7 @@ def test_plan_project( def test_plan_project_with_from_stage( self, mock_pm, runner, project_directory, mock_cursor ): - mock_pm().execute.return_value = mock_cursor( + mock_pm().plan.return_value = mock_cursor( rows=[("[]",)], columns=("operations") ) @@ -315,11 +316,10 @@ def test_plan_project_with_from_stage( ) assert result.exit_code == 0, result.output - mock_pm().execute.assert_called_once_with( + mock_pm().plan.assert_called_once_with( project_identifier=FQN.from_string("fooBar"), configuration="some_configuration", from_stage="@my_stage", - dry_run=True, variables=["key=value"], output_path=None, ) @@ -328,7 +328,7 @@ def test_plan_project_with_from_stage( def test_plan_project_with_output_path( self, mock_pm, runner, project_directory, mock_cursor ): - mock_pm().execute.return_value = mock_cursor( + mock_pm().plan.return_value = mock_cursor( rows=[("[]",)], columns=("operations") ) @@ -345,11 +345,10 @@ def test_plan_project_with_output_path( ) assert result.exit_code == 0, result.output - mock_pm().execute.assert_called_once_with( + mock_pm().plan.assert_called_once_with( project_identifier=FQN.from_string("fooBar"), configuration=None, from_stage="@my_stage", - dry_run=True, variables=None, output_path="@output_stage/results", ) @@ -358,7 +357,7 @@ def test_plan_project_with_output_path( def test_plan_project_with_output_path_and_configuration( self, mock_pm, runner, project_directory, mock_cursor ): - mock_pm().execute.return_value = mock_cursor( + mock_pm().plan.return_value = mock_cursor( rows=[("[]",)], columns=("operations") ) @@ -377,11 +376,10 @@ def test_plan_project_with_output_path_and_configuration( ) assert result.exit_code == 0, result.output - mock_pm().execute.assert_called_once_with( + mock_pm().plan.assert_called_once_with( project_identifier=FQN.from_string("fooBar"), configuration="some_config", from_stage="@my_stage", - dry_run=True, variables=None, output_path="@output_stage", ) @@ -398,7 +396,7 @@ def test_plan_project_with_sync( mock_connect, ): """Test that files are synced to project stage when from_stage is not provided.""" - mock_pm().execute.return_value = mock_cursor( + mock_pm().plan.return_value = mock_cursor( rows=[("[]",)], columns=("operations") ) mock_pm().sync_local_files.return_value = ( @@ -409,7 +407,7 @@ def test_plan_project_with_sync( result = runner.invoke(["dcm", "plan", "my_project"]) assert result.exit_code == 0, result.output - call_args = mock_pm().execute.call_args + call_args = mock_pm().plan.call_args assert "DCM_FOOBAR_" in call_args.kwargs["from_stage"] assert call_args.kwargs["from_stage"].endswith("_TMP_STAGE") @@ -423,7 +421,7 @@ def test_plan_project_with_from_local_directory( mock_connect, tmp_path, ): - mock_pm().execute.return_value = mock_cursor( + mock_pm().plan.return_value = mock_cursor( rows=[("[]",)], columns=("operations") ) mock_pm().sync_local_files.return_value = ( @@ -446,7 +444,7 @@ def test_plan_project_with_from_local_directory( source_directory=str(source_dir), ) - call_args = mock_pm().execute.call_args + call_args = mock_pm().plan.call_args assert call_args.kwargs["from_stage"].endswith("_TMP_STAGE") @@ -650,3 +648,831 @@ def test_describe_command_alias(self, mock_connect, runner): == queries[1] == "describe DCM Project IDENTIFIER('PROJECT_NAME')" ) + + +class TestDCMTest: + @mock.patch(DCMProjectManager) + def test_test_success(self, mock_pm, runner, mock_cursor): + """Test the test command when all expectations pass.""" + success_result = { + "status": "SUCCESS", + "expectations": [ + { + "table_name": "JW_DCM_TESTALL.ANALYTICS.EMPLOYEES", + "metric_database": "JW_DCM_TESTALL", + "metric_schema": "ANALYTICS", + "metric_name": "COUNT_BELOW_2", + "expectation_name": "LEVELS_MUST_BE_HIGHER_THAN_ZERO", + "expectation_expression": "value = 0", + "value": 0, + "expectation_violated": False, + }, + ], + } + mock_pm().test.return_value = mock_cursor( + rows=[(json.dumps(success_result),)], columns=("result",) + ) + + result = runner.invoke(["dcm", "test", "my_project"]) + + assert result.exit_code == 0, result.output + assert "All 1 expectation(s) passed successfully." in result.output + mock_pm().test.assert_called_once_with( + project_identifier=FQN.from_string("my_project") + ) + + @mock.patch(DCMProjectManager) + def test_test_with_violated_expectations(self, mock_pm, runner, mock_cursor): + """Test the test command when expectations are violated.""" + violated_result = { + "status": "EXPECTATION_VIOLATED", + "expectations": [ + { + "table_name": "JW_DCM_TESTALL.ANALYTICS.EMPLOYEES", + "metric_database": "JW_DCM_TESTALL", + "metric_schema": "ANALYTICS", + "metric_name": "COUNT_BELOW_2", + "expectation_name": "LEVELS_MUST_BE_HIGHER_THAN_ZERO", + "expectation_expression": "value = 0", + "value": 0, + "expectation_violated": False, + }, + { + "table_name": "JW_DCM_TESTALL.ANALYTICS.EMPLOYEES", + "metric_database": "JW_DCM_TESTALL", + "metric_schema": "ANALYTICS", + "metric_name": "COUNT_BELOW_1", + "expectation_name": "LEVELS_MUST_BE_HIGHER_THAN_ZERO", + "expectation_expression": "value = 0", + "value": 0, + "expectation_violated": False, + }, + { + "table_name": "JW_DCM_TESTALL.ANALYTICS.EMPLOYEES", + "metric_database": "JW_DCM_TESTALL", + "metric_schema": "ANALYTICS", + "metric_name": "COUNT_BELOW_5", + "expectation_name": "LEVELS_MUST_BE_HIGHER_THAN_ZERO", + "expectation_expression": "value = 0", + "value": 4, + "expectation_violated": True, + }, + ], + } + + mock_pm().test.return_value = mock_cursor( + rows=[(json.dumps(violated_result),)], columns=("result",) + ) + + result = runner.invoke(["dcm", "test", "my_project"]) + + assert result.exit_code == 1, result.output + assert "Tests completed: 2 passed, 1 failed out of 3 total." in result.output + assert "Failed expectations:" in result.output + assert "Table: JW_DCM_TESTALL.ANALYTICS.EMPLOYEES" in result.output + assert "Expectation: LEVELS_MUST_BE_HIGHER_THAN_ZERO" in result.output + assert "Metric: COUNT_BELOW_5" in result.output + mock_pm().test.assert_called_once_with( + project_identifier=FQN.from_string("my_project") + ) + + @mock.patch(DCMProjectManager) + def test_test_with_no_expectations(self, mock_pm, runner, mock_cursor): + """Test the test command when there are no expectations defined.""" + no_expectations_result = {"status": "SUCCESS", "expectations": []} + + mock_pm().test.return_value = mock_cursor( + rows=[(json.dumps(no_expectations_result),)], columns=("result",) + ) + + result = runner.invoke(["dcm", "test", "my_project"]) + + assert result.exit_code == 0, result.output + assert "No expectations defined in the project." in result.output + mock_pm().test.assert_called_once_with( + project_identifier=FQN.from_string("my_project") + ) + + @mock.patch(DCMProjectManager) + def test_test_with_multiple_failed_expectations(self, mock_pm, runner, mock_cursor): + """Test the test command with multiple failed expectations from different tables.""" + violated_result = { + "status": "EXPECTATION_VIOLATED", + "expectations": [ + { + "table_name": "DB.SCHEMA.TABLE_A", + "metric_database": "DB", + "metric_schema": "SCHEMA", + "metric_name": "ROW_COUNT", + "expectation_name": "MIN_ROWS", + "expectation_expression": "value >= 100", + "value": 50, + "expectation_violated": True, + }, + { + "table_name": "DB.SCHEMA.TABLE_B", + "metric_database": "DB", + "metric_schema": "SCHEMA", + "metric_name": "NULL_COUNT", + "expectation_name": "NO_NULLS", + "expectation_expression": "value = 0", + "value": 5, + "expectation_violated": True, + }, + ], + } + + mock_pm().test.return_value = mock_cursor( + rows=[(json.dumps(violated_result),)], columns=("result",) + ) + + result = runner.invoke(["dcm", "test", "my_project"]) + + assert result.exit_code == 1, result.output + assert "Tests completed: 0 passed, 2 failed out of 2 total." in result.output + assert "Failed expectations:" in result.output + assert "Table: DB.SCHEMA.TABLE_A" in result.output + assert "Table: DB.SCHEMA.TABLE_B" in result.output + mock_pm().test.assert_called_once_with( + project_identifier=FQN.from_string("my_project") + ) + + +class TestDCMRefresh: + @mock.patch(DCMProjectManager) + def test_refresh_with_outdated_tables(self, mock_pm, runner, mock_cursor, snapshot): + refresh_result = { + "refreshed_tables": [ + { + "dt_name": "JW_DCM_TESTALL.ANALYTICS.DYNAMIC_EMPLOYEES", + "refreshed_dt_count": 1, + "data_timestamp": "1760357032.175", + "statistics": '{"insertedRows":5,"copiedRows":0,"deletedRows":5}', + } + ] + } + mock_pm().refresh.return_value = mock_cursor( + rows=[(json.dumps(refresh_result),)], columns=("result",) + ) + + result = runner.invoke(["dcm", "refresh", "my_project"]) + + assert result.exit_code == 0, result.output + assert result.output == snapshot + mock_pm().refresh.assert_called_once_with( + project_identifier=FQN.from_string("my_project") + ) + + @mock.patch(DCMProjectManager) + def test_refresh_with_fresh_tables(self, mock_pm, runner, mock_cursor, snapshot): + refresh_result = { + "refreshed_tables": [ + { + "dt_name": "JW_DCM_TESTALL.ANALYTICS.DYNAMIC_EMPLOYEES", + "refreshed_dt_count": 0, + "data_timestamp": "1760356974.543", + "statistics": "No new data", + } + ] + } + mock_pm().refresh.return_value = mock_cursor( + rows=[(json.dumps(refresh_result),)], columns=("result",) + ) + + result = runner.invoke(["dcm", "refresh", "my_project"]) + + assert result.exit_code == 0, result.output + assert result.output == snapshot + mock_pm().refresh.assert_called_once_with( + project_identifier=FQN.from_string("my_project") + ) + + @mock.patch(DCMProjectManager) + def test_refresh_with_no_dynamic_tables( + self, mock_pm, runner, mock_cursor, snapshot + ): + refresh_result = {"refreshed_tables": []} + mock_pm().refresh.return_value = mock_cursor( + rows=[(json.dumps(refresh_result),)], columns=("result",) + ) + + result = runner.invoke(["dcm", "refresh", "my_project"]) + + assert result.exit_code == 0, result.output + assert result.output == snapshot + mock_pm().refresh.assert_called_once_with( + project_identifier=FQN.from_string("my_project") + ) + + +class TestDCMPreview: + @mock.patch(DCMProjectManager) + def test_preview_basic( + self, + mock_pm, + runner, + project_directory, + mock_cursor, + mock_connect, + mock_from_resource, + ): + mock_pm().preview.return_value = mock_cursor( + rows=[(1, "Alice", "alice@example.com"), (2, "Bob", "bob@example.com")], + columns=("id", "name", "email"), + ) + mock_pm().sync_local_files.return_value = mock_from_resource() + + with project_directory("dcm_project"): + result = runner.invoke( + ["dcm", "preview", "my_project", "--object", "my_table"] + ) + + assert result.exit_code == 0, result.output + + mock_pm().preview.assert_called_once_with( + project_identifier=FQN.from_string("my_project"), + object_identifier=FQN.from_string("my_table"), + configuration=None, + from_stage=mock_from_resource(), + variables=None, + limit=None, + ) + + @mock.patch(DCMProjectManager) + def test_preview_with_from_stage( + self, mock_pm, runner, project_directory, mock_cursor + ): + mock_pm().preview.return_value = mock_cursor( + rows=[(1, "Alice", "alice@example.com")], + columns=("id", "name", "email"), + ) + + result = runner.invoke( + [ + "dcm", + "preview", + "my_project", + "--object", + "my_table", + "--from", + "@my_stage", + ] + ) + assert result.exit_code == 0, result.output + + mock_pm().preview.assert_called_once_with( + project_identifier=FQN.from_string("my_project"), + object_identifier=FQN.from_string("my_table"), + configuration=None, + from_stage="@my_stage", + variables=None, + limit=None, + ) + + @mock.patch(DCMProjectManager) + @pytest.mark.parametrize( + "extra_args,expected_config,expected_vars,expected_limit", + [ + ( + ["--configuration", "dev", "-D", "key=value", "--limit", "10"], + "dev", + ["key=value"], + 10, + ), + ( + ["--configuration", "prod"], + "prod", + None, + None, + ), + ( + ["-D", "var1=val1", "-D", "var2=val2", "--limit", "5"], + None, + ["var1=val1", "var2=val2"], + 5, + ), + ( + ["--limit", "100"], + None, + None, + 100, + ), + ], + ) + def test_preview_with_various_options( + self, + mock_pm, + runner, + project_directory, + mock_cursor, + extra_args, + expected_config, + expected_vars, + expected_limit, + ): + mock_pm().preview.return_value = mock_cursor( + rows=[(1, "Alice", "alice@example.com")], + columns=("id", "name", "email"), + ) + + result = runner.invoke( + [ + "dcm", + "preview", + "my_project", + "--object", + "my_table", + "--from", + "@my_stage", + ] + + extra_args + ) + assert result.exit_code == 0, result.output + + mock_pm().preview.assert_called_once_with( + project_identifier=FQN.from_string("my_project"), + object_identifier=FQN.from_string("my_table"), + configuration=expected_config, + from_stage="@my_stage", + variables=expected_vars, + limit=expected_limit, + ) + + @mock.patch("snowflake.cli._plugins.dcm.manager.StageManager.create") + @mock.patch(DCMProjectManager) + def test_preview_with_sync( + self, + mock_pm, + _mock_create, + runner, + project_directory, + mock_cursor, + mock_connect, + ): + mock_pm().preview.return_value = mock_cursor( + rows=[(1, "Alice", "alice@example.com")], + columns=("id", "name", "email"), + ) + mock_pm().sync_local_files.return_value = ( + "MockDatabase.MockSchema.DCM_FOOBAR_1234567890_TMP_STAGE" + ) + + with project_directory("dcm_project"): + result = runner.invoke( + ["dcm", "preview", "my_project", "--object", "my_table"] + ) + assert result.exit_code == 0, result.output + + call_args = mock_pm().preview.call_args + assert "DCM_FOOBAR_" in call_args.kwargs["from_stage"] + assert call_args.kwargs["from_stage"].endswith("_TMP_STAGE") + + @mock.patch(DCMProjectManager) + def test_preview_with_from_local_directory( + self, + mock_pm, + runner, + project_directory, + mock_cursor, + mock_connect, + tmp_path, + ): + mock_pm().preview.return_value = mock_cursor( + rows=[(1, "Alice", "alice@example.com")], + columns=("id", "name", "email"), + ) + mock_pm().sync_local_files.return_value = ( + "MockDatabase.MockSchema.DCM_FOOBAR_1234567890_TMP_STAGE" + ) + + source_dir = tmp_path / "source_project" + source_dir.mkdir() + + manifest_file = source_dir / "manifest.yml" + manifest_file.write_text("type: dcm_project\n") + + with project_directory("dcm_project"): + result = runner.invoke( + [ + "dcm", + "preview", + "my_project", + "--object", + "my_table", + "--from", + str(source_dir), + ] + ) + assert result.exit_code == 0, result.output + + mock_pm().sync_local_files.assert_called_once_with( + project_identifier=FQN.from_string("my_project"), + source_directory=str(source_dir), + ) + + call_args = mock_pm().preview.call_args + assert call_args.kwargs["from_stage"].endswith("_TMP_STAGE") + + def test_preview_without_object_fails(self, runner, project_directory): + with project_directory("dcm_project"): + result = runner.invoke(["dcm", "preview", "my_project"]) + + assert result.exit_code == 2 + assert "Missing option '--object'" in result.output + + +class TestDCMAnalyze: + @staticmethod + def _create_analyze_result(has_errors=False): + """Helper to create properly structured analyze result matching real API response.""" + result = { + "files": [ + { + "sourcePath": "definitions/test.sql", + "definitions": [ + { + "id": { + "name": "TEST_TABLE", + "schema": "PUBLIC", + "database": "TEST_DB", + "domain": "TABLE", + }, + "renderedPosition": {"line": 1, "column": 1}, + "dependencies": [], + "errors": [ + { + "sourcePosition": None, + "renderedPosition": {"line": 1, "column": 1}, + "message": "DCM project ANALYZE error: Test error message.", + "code": "001597", + "type": "syntax_error", + } + ] + if has_errors + else [], + "refinedDomain": "table", + } + ], + "errors": [], + } + ] + } + return result + + @mock.patch(DCMProjectManager) + def test_analyze_basic( + self, + mock_pm, + runner, + project_directory, + mock_cursor, + mock_connect, + mock_from_resource, + ): + analyze_result = self._create_analyze_result(has_errors=False) + mock_pm().analyze.return_value = mock_cursor( + rows=[(json.dumps(analyze_result),)], columns=("result",) + ) + mock_pm().sync_local_files.return_value = mock_from_resource() + + with project_directory("dcm_project"): + result = runner.invoke(["dcm", "analyze", "my_project"]) + + assert result.exit_code == 0, result.output + assert "✓ Analysis complete" in result.output + assert "1 file(s) analyzed" in result.output + assert "1 definition(s) found" in result.output + assert "No errors detected" in result.output + + mock_pm().analyze.assert_called_once_with( + project_identifier=FQN.from_string("my_project"), + configuration=None, + from_stage=mock_from_resource(), + variables=None, + analysis_type=None, + output_path=None, + ) + + @mock.patch(DCMProjectManager) + @pytest.mark.parametrize( + "type_value", ["dependencies", "DEPENDENCIES", "Dependencies"] + ) + def test_analyze_with_type( + self, + mock_pm, + runner, + project_directory, + mock_cursor, + mock_connect, + mock_from_resource, + type_value, + ): + analyze_result = self._create_analyze_result(has_errors=False) + mock_pm().analyze.return_value = mock_cursor( + rows=[(json.dumps(analyze_result),)], columns=("result",) + ) + mock_pm().sync_local_files.return_value = mock_from_resource() + + with project_directory("dcm_project"): + result = runner.invoke( + ["dcm", "analyze", "my_project", "--type", type_value] + ) + + assert result.exit_code == 0, result.output + + mock_pm().analyze.assert_called_once_with( + project_identifier=FQN.from_string("my_project"), + configuration=None, + from_stage=mock_from_resource(), + variables=None, + analysis_type=AnalysisType.DEPENDENCIES, + output_path=None, + ) + + @mock.patch(DCMProjectManager) + def test_analyze_with_from_stage( + self, mock_pm, runner, project_directory, mock_cursor + ): + analyze_result = self._create_analyze_result(has_errors=False) + mock_pm().analyze.return_value = mock_cursor( + rows=[(json.dumps(analyze_result),)], columns=("result",) + ) + + result = runner.invoke(["dcm", "analyze", "my_project", "--from", "@my_stage"]) + assert result.exit_code == 0, result.output + + mock_pm().analyze.assert_called_once_with( + project_identifier=FQN.from_string("my_project"), + configuration=None, + from_stage="@my_stage", + variables=None, + analysis_type=None, + output_path=None, + ) + + @mock.patch(DCMProjectManager) + def test_analyze_with_configuration_and_variables( + self, mock_pm, runner, project_directory, mock_cursor + ): + analyze_result = self._create_analyze_result(has_errors=False) + mock_pm().analyze.return_value = mock_cursor( + rows=[(json.dumps(analyze_result),)], columns=("result",) + ) + + result = runner.invoke( + [ + "dcm", + "analyze", + "my_project", + "--from", + "@my_stage", + "--configuration", + "dev", + "-D", + "key=value", + ] + ) + assert result.exit_code == 0, result.output + + mock_pm().analyze.assert_called_once_with( + project_identifier=FQN.from_string("my_project"), + configuration="dev", + from_stage="@my_stage", + variables=["key=value"], + analysis_type=None, + output_path=None, + ) + + @mock.patch(DCMProjectManager) + def test_analyze_with_output_path_stage( + self, mock_pm, runner, project_directory, mock_cursor + ): + analyze_result = self._create_analyze_result(has_errors=False) + mock_pm().analyze.return_value = mock_cursor( + rows=[(json.dumps(analyze_result),)], columns=("result",) + ) + + result = runner.invoke( + [ + "dcm", + "analyze", + "my_project", + "--from", + "@my_stage", + "--output-path", + "@output_stage/results", + ] + ) + assert result.exit_code == 0, result.output + + mock_pm().analyze.assert_called_once_with( + project_identifier=FQN.from_string("my_project"), + configuration=None, + from_stage="@my_stage", + variables=None, + analysis_type=None, + output_path="@output_stage/results", + ) + + @mock.patch(DCMProjectManager) + def test_analyze_with_all_options( + self, mock_pm, runner, project_directory, mock_cursor + ): + analyze_result = self._create_analyze_result(has_errors=False) + mock_pm().analyze.return_value = mock_cursor( + rows=[(json.dumps(analyze_result),)], columns=("result",) + ) + + result = runner.invoke( + [ + "dcm", + "analyze", + "my_project", + "--from", + "@my_stage", + "--type", + "dependencies", + "--configuration", + "prod", + "-D", + "var1=val1", + "-D", + "var2=val2", + "--output-path", + "@output_stage", + ] + ) + assert result.exit_code == 0, result.output + + mock_pm().analyze.assert_called_once_with( + project_identifier=FQN.from_string("my_project"), + configuration="prod", + from_stage="@my_stage", + variables=["var1=val1", "var2=val2"], + analysis_type=AnalysisType.DEPENDENCIES, + output_path="@output_stage", + ) + + @mock.patch("snowflake.cli._plugins.dcm.manager.StageManager.create") + @mock.patch(DCMProjectManager) + def test_analyze_with_sync( + self, + mock_pm, + _mock_create, + runner, + project_directory, + mock_cursor, + mock_connect, + ): + analyze_result = self._create_analyze_result(has_errors=False) + mock_pm().analyze.return_value = mock_cursor( + rows=[(json.dumps(analyze_result),)], columns=("result",) + ) + mock_pm().sync_local_files.return_value = ( + "MockDatabase.MockSchema.DCM_FOOBAR_1234567890_TMP_STAGE" + ) + + with project_directory("dcm_project"): + result = runner.invoke(["dcm", "analyze", "my_project"]) + assert result.exit_code == 0, result.output + + call_args = mock_pm().analyze.call_args + assert "DCM_FOOBAR_" in call_args.kwargs["from_stage"] + assert call_args.kwargs["from_stage"].endswith("_TMP_STAGE") + + @mock.patch(DCMProjectManager) + def test_analyze_with_from_local_directory( + self, + mock_pm, + runner, + project_directory, + mock_cursor, + mock_connect, + tmp_path, + ): + analyze_result = self._create_analyze_result(has_errors=False) + mock_pm().analyze.return_value = mock_cursor( + rows=[(json.dumps(analyze_result),)], columns=("result",) + ) + mock_pm().sync_local_files.return_value = ( + "MockDatabase.MockSchema.DCM_FOOBAR_1234567890_TMP_STAGE" + ) + + source_dir = tmp_path / "source_project" + source_dir.mkdir() + + manifest_file = source_dir / "manifest.yml" + manifest_file.write_text("type: dcm_project\n") + + with project_directory("dcm_project"): + result = runner.invoke( + ["dcm", "analyze", "my_project", "--from", str(source_dir)] + ) + assert result.exit_code == 0, result.output + + mock_pm().sync_local_files.assert_called_once_with( + project_identifier=FQN.from_string("my_project"), + source_directory=str(source_dir), + ) + + call_args = mock_pm().analyze.call_args + assert call_args.kwargs["from_stage"].endswith("_TMP_STAGE") + + def test_analyze_with_invalid_type(self, runner, project_directory): + with project_directory("dcm_project"): + result = runner.invoke( + ["dcm", "analyze", "my_project", "--type", "invalid"] + ) + + assert result.exit_code == 2 + assert "Invalid value for '--type'" in result.output + assert "'invalid' is not 'dependencies'" in result.output + + @mock.patch(DCMProjectManager) + def test_analyze_with_errors_exits_with_code_1( + self, + mock_pm, + runner, + project_directory, + mock_cursor, + mock_connect, + mock_from_resource, + ): + analyze_result = self._create_analyze_result(has_errors=True) + mock_pm().analyze.return_value = mock_cursor( + rows=[(json.dumps(analyze_result),)], columns=("result",) + ) + mock_pm().sync_local_files.return_value = mock_from_resource() + + with project_directory("dcm_project"): + result = runner.invoke(["dcm", "analyze", "my_project"]) + + assert result.exit_code == 1, result.output + assert "Error" in result.output + assert "Analysis found 1 error(s) in 1 file(s)" in result.output + assert "definitions/test.sql" in result.output + assert "DCM project ANALYZE error: Test error message" in result.output + + @mock.patch(DCMProjectManager) + def test_analyze_with_file_level_errors( + self, + mock_pm, + runner, + project_directory, + mock_cursor, + mock_connect, + mock_from_resource, + ): + analyze_result = { + "files": [ + { + "sourcePath": "definitions/test.sql", + "definitions": [], + "errors": [ + { + "message": "File-level error", + "code": "001598", + "type": "parse_error", + } + ], + } + ] + } + mock_pm().analyze.return_value = mock_cursor( + rows=[(json.dumps(analyze_result),)], columns=("result",) + ) + mock_pm().sync_local_files.return_value = mock_from_resource() + + with project_directory("dcm_project"): + result = runner.invoke(["dcm", "analyze", "my_project"]) + + assert result.exit_code == 1, result.output + assert "Error" in result.output + assert "Analysis found 1 error(s) in 1 file(s)" in result.output + assert "definitions/test.sql" in result.output + assert "File-level error" in result.output + + @mock.patch(DCMProjectManager) + def test_analyze_without_errors_exits_with_code_0( + self, + mock_pm, + runner, + project_directory, + mock_cursor, + mock_connect, + mock_from_resource, + ): + analyze_result = self._create_analyze_result(has_errors=False) + mock_pm().analyze.return_value = mock_cursor( + rows=[(json.dumps(analyze_result),)], columns=("result",) + ) + mock_pm().sync_local_files.return_value = mock_from_resource() + + with project_directory("dcm_project"): + result = runner.invoke(["dcm", "analyze", "my_project"]) + + assert result.exit_code == 0, result.output + assert "✓ Analysis complete" in result.output + assert "1 file(s) analyzed" in result.output + assert "1 definition(s) found" in result.output + assert "No errors detected" in result.output diff --git a/tests/dcm/test_manager.py b/tests/dcm/test_manager.py index 48691be27d..1b0b5ed49b 100644 --- a/tests/dcm/test_manager.py +++ b/tests/dcm/test_manager.py @@ -7,6 +7,7 @@ from snowflake.cli._plugins.dcm.manager import ( DCM_PROJECT_TYPE, MANIFEST_FILE_NAME, + AnalysisType, DCMProjectManager, ) from snowflake.cli.api.constants import PatternMatchingType @@ -44,9 +45,9 @@ def test_create(mock_execute_query): @mock.patch(execute_queries) -def test_execute_project(mock_execute_query): +def test_deploy_project(mock_execute_query): mgr = DCMProjectManager() - mgr.execute( + mgr.deploy( project_identifier=TEST_PROJECT, from_stage="@test_stage", variables=["key=value", "aaa=bbb"], @@ -60,9 +61,9 @@ def test_execute_project(mock_execute_query): @mock.patch(execute_queries) -def test_execute_project_with_from_stage(mock_execute_query): +def test_deploy_project_with_from_stage(mock_execute_query): mgr = DCMProjectManager() - mgr.execute( + mgr.deploy( project_identifier=TEST_PROJECT, from_stage="@my_stage", variables=["key=value", "aaa=bbb"], @@ -76,9 +77,9 @@ def test_execute_project_with_from_stage(mock_execute_query): @mock.patch(execute_queries) -def test_execute_project_with_from_stage_without_prefix(mock_execute_query): +def test_deploy_project_with_from_stage_without_prefix(mock_execute_query): mgr = DCMProjectManager() - mgr.execute( + mgr.deploy( project_identifier=TEST_PROJECT, from_stage="my_stage", variables=["key=value", "aaa=bbb"], @@ -92,10 +93,10 @@ def test_execute_project_with_from_stage_without_prefix(mock_execute_query): @mock.patch(execute_queries) -def test_execute_project_with_default_deployment(mock_execute_query, project_directory): +def test_deploy_project_with_default_deployment(mock_execute_query, project_directory): mgr = DCMProjectManager() - mgr.execute(project_identifier=TEST_PROJECT, from_stage="@test_stage") + mgr.deploy(project_identifier=TEST_PROJECT, from_stage="@test_stage") mock_execute_query.assert_called_once_with( query="EXECUTE DCM PROJECT IDENTIFIER('my_project') DEPLOY FROM @test_stage" @@ -105,10 +106,9 @@ def test_execute_project_with_default_deployment(mock_execute_query, project_dir @mock.patch(execute_queries) def test_plan_project(mock_execute_query, project_directory): mgr = DCMProjectManager() - mgr.execute( + mgr.plan( project_identifier=TEST_PROJECT, from_stage="@test_stage", - dry_run=True, configuration="some_configuration", ) @@ -120,10 +120,9 @@ def test_plan_project(mock_execute_query, project_directory): @mock.patch(execute_queries) def test_plan_project_with_from_stage(mock_execute_query, project_directory): mgr = DCMProjectManager() - mgr.execute( + mgr.plan( project_identifier=TEST_PROJECT, from_stage="@my_stage", - dry_run=True, configuration="some_configuration", ) @@ -160,25 +159,162 @@ def test_drop_deployment(mock_execute_query, if_exists): @mock.patch(execute_queries) -def test_plan_project_with_output_path__stage(mock_execute_query, project_directory): +def test_test_project(mock_execute_query): + mgr = DCMProjectManager() + mgr.test(project_identifier=TEST_PROJECT) + + mock_execute_query.assert_called_once_with( + query="EXECUTE DCM PROJECT IDENTIFIER('my_project') TEST ALL" + ) + + +@mock.patch(execute_queries) +def test_refresh_project(mock_execute_query): + mgr = DCMProjectManager() + mgr.refresh(project_identifier=TEST_PROJECT) + + mock_execute_query.assert_called_once_with( + query="EXECUTE DCM PROJECT IDENTIFIER('my_project') REFRESH ALL" + ) + + +@mock.patch(execute_queries) +def test_preview_project_basic(mock_execute_query): mgr = DCMProjectManager() - mgr.execute( + mgr.preview( + project_identifier=TEST_PROJECT, + object_identifier=FQN.from_string("my_table"), + from_stage="@test_stage", + ) + + mock_execute_query.assert_called_once_with( + query="EXECUTE DCM PROJECT IDENTIFIER('my_project') PREVIEW IDENTIFIER('my_table') FROM @test_stage" + ) + + +@mock.patch(execute_queries) +@pytest.mark.parametrize( + "configuration,variables,limit,expected_suffix", + [ + ( + "dev", + ["key=value"], + 10, + " USING CONFIGURATION dev (key=>value) FROM @test_stage LIMIT 10", + ), + ( + "prod", + None, + None, + " USING CONFIGURATION prod FROM @test_stage", + ), + ( + None, + ["var1=val1", "var2=val2"], + 5, + " USING (var1=>val1, var2=>val2) FROM @test_stage LIMIT 5", + ), + ( + None, + None, + 100, + " FROM @test_stage LIMIT 100", + ), + ], +) +def test_preview_project_with_various_options( + mock_execute_query, configuration, variables, limit, expected_suffix +): + mgr = DCMProjectManager() + mgr.preview( + project_identifier=TEST_PROJECT, + object_identifier=FQN.from_string("my_view"), + from_stage="@test_stage", + configuration=configuration, + variables=variables, + limit=limit, + ) + + expected_query = ( + f"EXECUTE DCM PROJECT IDENTIFIER('my_project') PREVIEW IDENTIFIER('my_view')" + + expected_suffix + ) + mock_execute_query.assert_called_once_with(query=expected_query) + + +@mock.patch(execute_queries) +def test_analyze_project_basic(mock_execute_query): + mgr = DCMProjectManager() + mgr.analyze( + project_identifier=TEST_PROJECT, + from_stage="@test_stage", + ) + + mock_execute_query.assert_called_once_with( + query="EXECUTE DCM PROJECT IDENTIFIER('my_project') ANALYZE FROM @test_stage" + ) + + +@mock.patch(execute_queries) +def test_analyze_project_with_type(mock_execute_query): + mgr = DCMProjectManager() + mgr.analyze( + project_identifier=TEST_PROJECT, + from_stage="@test_stage", + analysis_type=AnalysisType.DEPENDENCIES, + ) + + mock_execute_query.assert_called_once_with( + query="EXECUTE DCM PROJECT IDENTIFIER('my_project') ANALYZE DEPENDENCIES FROM @test_stage" + ) + + +@mock.patch(execute_queries) +def test_analyze_project_with_configuration(mock_execute_query): + mgr = DCMProjectManager() + mgr.analyze( + project_identifier=TEST_PROJECT, + from_stage="@test_stage", + configuration="dev", + ) + + mock_execute_query.assert_called_once_with( + query="EXECUTE DCM PROJECT IDENTIFIER('my_project') ANALYZE USING CONFIGURATION dev FROM @test_stage" + ) + + +@mock.patch(execute_queries) +def test_analyze_project_with_variables(mock_execute_query): + mgr = DCMProjectManager() + mgr.analyze( + project_identifier=TEST_PROJECT, + from_stage="@test_stage", + variables=["key=value", "foo=bar"], + ) + + mock_execute_query.assert_called_once_with( + query="EXECUTE DCM PROJECT IDENTIFIER('my_project') ANALYZE USING (key=>value, foo=>bar) FROM @test_stage" + ) + + +@mock.patch(execute_queries) +def test_analyze_project_with_output_path_stage(mock_execute_query): + mgr = DCMProjectManager() + mgr.analyze( project_identifier=TEST_PROJECT, from_stage="@test_stage", - dry_run=True, - configuration="some_configuration", output_path="@output_stage/results", ) mock_execute_query.assert_called_once_with( - query="EXECUTE DCM PROJECT IDENTIFIER('my_project') PLAN USING CONFIGURATION some_configuration FROM @test_stage OUTPUT_PATH @output_stage/results" + query="EXECUTE DCM PROJECT IDENTIFIER('my_project') ANALYZE FROM @test_stage OUTPUT_PATH @output_stage/results" ) @mock.patch(execute_queries) @mock.patch("snowflake.cli._plugins.dbt.manager.StageManager.get_recursive") @mock.patch("snowflake.cli._plugins.dbt.manager.StageManager.create") -def test_plan_project_with_output_path__local_path( +def test_analyze_project_with_output_path_local( mock_create, mock_get_recursive, mock_execute_query, @@ -186,17 +322,15 @@ def test_plan_project_with_output_path__local_path( mock_from_resource, ): mgr = DCMProjectManager() - mgr.execute( + mgr.analyze( project_identifier=TEST_PROJECT, from_stage="@test_stage", - dry_run=True, - configuration="some_configuration", output_path="output_path/results", ) temp_stage_fqn = mock_from_resource() mock_execute_query.assert_called_once_with( - query=f"EXECUTE DCM PROJECT IDENTIFIER('my_project') PLAN USING CONFIGURATION some_configuration FROM @test_stage OUTPUT_PATH @{temp_stage_fqn}" + query=f"EXECUTE DCM PROJECT IDENTIFIER('my_project') ANALYZE FROM @test_stage OUTPUT_PATH @{temp_stage_fqn}" ) mock_create.assert_called_once_with(temp_stage_fqn, temporary=True) mock_get_recursive.assert_called_once_with( @@ -205,18 +339,91 @@ def test_plan_project_with_output_path__local_path( @mock.patch(execute_queries) -def test_deploy_project_with_output_path(mock_execute_query, project_directory): +@pytest.mark.parametrize( + "configuration,variables,analysis_type,expected_suffix", + [ + ( + "dev", + ["key=value"], + AnalysisType.DEPENDENCIES, + " ANALYZE DEPENDENCIES USING CONFIGURATION dev (key=>value) FROM @test_stage", + ), + ( + "prod", + None, + None, + " ANALYZE USING CONFIGURATION prod FROM @test_stage", + ), + ( + None, + ["var1=val1", "var2=val2"], + AnalysisType.DEPENDENCIES, + " ANALYZE DEPENDENCIES USING (var1=>val1, var2=>val2) FROM @test_stage", + ), + ( + None, + None, + AnalysisType.DEPENDENCIES, + " ANALYZE DEPENDENCIES FROM @test_stage", + ), + ], +) +def test_analyze_project_with_various_options( + mock_execute_query, configuration, variables, analysis_type, expected_suffix +): mgr = DCMProjectManager() - mgr.execute( + mgr.analyze( project_identifier=TEST_PROJECT, from_stage="@test_stage", - dry_run=False, - alias="v1", - output_path="@output_stage", + configuration=configuration, + variables=variables, + analysis_type=analysis_type, ) + expected_query = f"EXECUTE DCM PROJECT IDENTIFIER('my_project')" + expected_suffix + mock_execute_query.assert_called_once_with(query=expected_query) + + +@mock.patch(execute_queries) +def test_plan_project_with_output_path__stage(mock_execute_query, project_directory): + mgr = DCMProjectManager() + mgr.plan( + project_identifier=TEST_PROJECT, + from_stage="@test_stage", + configuration="some_configuration", + output_path="@output_stage/results", + ) + + mock_execute_query.assert_called_once_with( + query="EXECUTE DCM PROJECT IDENTIFIER('my_project') PLAN USING CONFIGURATION some_configuration FROM @test_stage OUTPUT_PATH @output_stage/results" + ) + + +@mock.patch(execute_queries) +@mock.patch("snowflake.cli._plugins.dbt.manager.StageManager.get_recursive") +@mock.patch("snowflake.cli._plugins.dbt.manager.StageManager.create") +def test_plan_project_with_output_path__local_path( + mock_create, + mock_get_recursive, + mock_execute_query, + project_directory, + mock_from_resource, +): + mgr = DCMProjectManager() + mgr.plan( + project_identifier=TEST_PROJECT, + from_stage="@test_stage", + configuration="some_configuration", + output_path="output_path/results", + ) + + temp_stage_fqn = mock_from_resource() mock_execute_query.assert_called_once_with( - query=f"EXECUTE DCM PROJECT IDENTIFIER('my_project') DEPLOY AS \"v1\" FROM @test_stage" + query=f"EXECUTE DCM PROJECT IDENTIFIER('my_project') PLAN USING CONFIGURATION some_configuration FROM @test_stage OUTPUT_PATH @{temp_stage_fqn}" + ) + mock_create.assert_called_once_with(temp_stage_fqn, temporary=True) + mock_get_recursive.assert_called_once_with( + stage_path=str(temp_stage_fqn), dest_path=Path("output_path/results") ) @@ -235,7 +442,7 @@ def test_deploy_project_with_alias_special_characters( mock_execute_query, alias, expected_alias ): mgr = DCMProjectManager() - mgr.execute( + mgr.deploy( project_identifier=TEST_PROJECT, from_stage="@test_stage", alias=alias, diff --git a/tests_integration/test_dcm_project.py b/tests_integration/test_dcm_project.py index 3aed0d849a..696a12636f 100644 --- a/tests_integration/test_dcm_project.py +++ b/tests_integration/test_dcm_project.py @@ -555,3 +555,311 @@ def test_dcm_plan_and_deploy_from_another_directory( # Clean up result = runner.invoke_with_connection(["dcm", "drop", project_name]) assert result.exit_code == 0, result.output + + +@pytest.mark.qa_only +@pytest.mark.integration +def test_dcm_test_command( + runner, + test_database, + project_directory, + object_name_provider, + sql_test_helper, +): + project_name = object_name_provider.create_and_get_next_object_name() + table_name = f"{test_database}.PUBLIC.TestedTable" + dmf_name = "test_dmf" + + with project_directory("dcm_project") as project_root: + result = runner.invoke_with_connection(["dcm", "create", project_name]) + assert result.exit_code == 0, result.output + + # 1) Without any data metric functions, run test command to assert that exitcode is 0 and message is returned. + result = runner.invoke_with_connection(["dcm", "test", project_name]) + assert result.exit_code == 0, result.output + assert "No expectations defined in the project." in result.output + + # Define table and deploy + table_definition = f""" +define table identifier('{table_name}') ( + id int, name varchar, email varchar, level int +) data_metric_schedule = '5 minute'; +""" + file_a_path = project_root / "file_a.sql" + original_content = file_a_path.read_text() + file_a_path.write_text(original_content + table_definition) + + result = runner.invoke_with_connection_json( + [ + "dcm", + "deploy", + project_name, + "-D", + f"table_name='{test_database}.PUBLIC.OutputTestTable'", + ] + ) + assert result.exit_code == 0, result.output + + # Add some data + insert_data_sql = f""" +INSERT INTO {table_name} (id, name, email, level) VALUES + (1, 'Alice Johnson', 'alice.j@example.com', 5), + (2, 'Bob Williams', 'bob.w@example.com', 3), + (3, 'Charlie Brown', 'charlie.b@example.com', 3), + (4, 'Diana Miller', 'diana.m@example.com', 4), + (5, 'Evan Davis', 'evan.d@example.com', 2); +""" + sql_test_helper.execute_single_sql(insert_data_sql) + + # 2) Set a DMF that'll fail and run test command - should return exit code 1 with error message + dmf_sql = f""" +create or alter data metric function {dmf_name}( + arg_t table(arg_c int) +) +returns int +as $$ +select count(*) +from arg_t +where arg_c < 5 +$$; + +alter table {table_name} add data metric function {dmf_name} on (level) +expectation levels_must_be_higher_than_zero (value = 0); +""" + sql_test_helper.execute_single_sql(dmf_sql) + + result = runner.invoke_with_connection(["dcm", "test", project_name]) + assert result.exit_code == 1, result.output + assert "Failed expectations:" in result.output + assert "levels_must_be_higher_than_zero" in result.output.lower() + + # 3) Fix the data and run test command again + fix_data_sql = f""" +UPDATE {table_name} SET level = 5 WHERE level < 5; +""" + sql_test_helper.execute_single_sql(fix_data_sql) + + result = runner.invoke_with_connection(["dcm", "test", project_name]) + assert result.exit_code == 0, result.output + assert "expectation(s) passed successfully" in result.output + + +@pytest.mark.qa_only +@pytest.mark.integration +def test_dcm_refresh_command( + runner, + test_database, + project_directory, + object_name_provider, + sql_test_helper, +): + project_name = object_name_provider.create_and_get_next_object_name() + base_table_name = f"{test_database}.PUBLIC.RefreshBaseTable" + dynamic_table_name = f"{test_database}.PUBLIC.RefreshDynamicTable" + + with project_directory("dcm_project") as project_root: + result = runner.invoke_with_connection(["dcm", "create", project_name]) + assert result.exit_code == 0, result.output + + # Deploy the project. + result = runner.invoke_with_connection_json( + [ + "dcm", + "deploy", + project_name, + "-D", + f"table_name='{test_database}.PUBLIC.OutputTestTable'", + ] + ) + assert result.exit_code == 0, result.output + + # 1) Without any dynamic tables, run refresh command - should report no dynamic tables. + result = runner.invoke_with_connection(["dcm", "refresh", project_name]) + assert result.exit_code == 0, result.output + assert "No dynamic tables found in the project." in result.output + + # 2) Define base table and dynamic table with long refresh time. + table_definitions = f""" +define table identifier('{base_table_name}') ( + id int, name varchar, email varchar +); + +define dynamic table identifier('{dynamic_table_name}') +target_lag = '1000 minutes' +WAREHOUSE = xs +as select * from {base_table_name}; +""" + file_a_path = project_root / "file_a.sql" + original_content = file_a_path.read_text() + file_a_path.write_text(original_content + table_definitions) + + # Deploy the project. + result = runner.invoke_with_connection_json( + [ + "dcm", + "deploy", + project_name, + "-D", + f"table_name='{test_database}.PUBLIC.OutputTestTable'", + ] + ) + assert result.exit_code == 0, result.output + + # 3) Insert data into the base table. + insert_data_sql = f""" +INSERT INTO {base_table_name} (id, name, email) VALUES + (1, 'Alice Johnson', 'alice.j@example.com'), + (2, 'Bob Williams', 'bob.w@example.com'), + (3, 'Charlie Brown', 'charlie.b@example.com'); +""" + sql_test_helper.execute_single_sql(insert_data_sql) + + # 4) Verify that data is NOT yet in the dynamic table (due to long refresh time). + check_dt_sql = f"SELECT COUNT(*) as cnt FROM {dynamic_table_name}" + result = sql_test_helper.execute_single_sql(check_dt_sql) + count_before = result[0]["CNT"] + assert count_before == 0, "Dynamic table should be empty before refresh." + + # 5) Run dcm refresh command. + result = runner.invoke_with_connection(["dcm", "refresh", project_name]) + assert result.exit_code == 0, result.output + # Should show at least 1 table was refreshed + assert ( + "1 dynamic table(s) refreshed" in result.output + or "dynamic table(s) refreshed" in result.output + ) + + # 6) Verify that data is NOW in the dynamic table. + result = sql_test_helper.execute_single_sql(check_dt_sql) + count_after = result[0]["CNT"] + assert count_after == 3, "Dynamic table should have 3 rows after refresh." + + +@pytest.mark.qa_only +@pytest.mark.integration +def test_dcm_preview_command( + runner, + test_database, + project_directory, + object_name_provider, + sql_test_helper, +): + project_name = object_name_provider.create_and_get_next_object_name() + view_name = f"{test_database}.PUBLIC.PreviewTestView" + base_table_name = f"{test_database}.PUBLIC.OutputTestTable" + + with project_directory("dcm_project") as project_root: + result = runner.invoke_with_connection(["dcm", "create", project_name]) + assert result.exit_code == 0, result.output + + result = runner.invoke_with_connection_json( + [ + "dcm", + "deploy", + project_name, + "-D", + f"table_name='{base_table_name}'", + ] + ) + assert result.exit_code == 0, result.output + + # Define a view that selects from OutputTestTable. Preview can work on views that are not yet deployed + view_definition = f""" +define view identifier('{view_name}') as + select UPPER(fooBar) as upperFooBar from {{{{ table_name }}}}; +""" + file_a_path = project_root / "file_a.sql" + original_content = file_a_path.read_text() + file_a_path.write_text(original_content + view_definition) + + # Insert sample data into the base table. + insert_data_sql = f""" +INSERT INTO {base_table_name} (fooBar) VALUES + ('foo'), + ('bar'), + ('baz'), + ('foobar'), +""" + sql_test_helper.execute_single_sql(insert_data_sql) + + # 1) Preview without limit - should return all rows (or system default). + result = runner.invoke_with_connection_json( + [ + "dcm", + "preview", + project_name, + "--object", + view_name, + "-D", + f"table_name='{base_table_name}'", + ] + ) + assert result.exit_code == 0, result.output + assert isinstance(result.json, list) + assert len(result.json) == 4 + + # 2) Preview with limit - should return limited rows. + result = runner.invoke_with_connection_json( + [ + "dcm", + "preview", + project_name, + "--object", + view_name, + "--limit", + "2", + "-D", + f"table_name='{base_table_name}'", + ] + ) + assert result.exit_code == 0, result.output + assert isinstance(result.json, list) + assert len(result.json) == 2 + + +@pytest.mark.qa_only +@pytest.mark.integration +def test_dcm_analyze_command( + runner, + test_database, + project_directory, + object_name_provider, + sql_test_helper, +): + project_name = object_name_provider.create_and_get_next_object_name() + table_name = f"{test_database}.PUBLIC.AnalyzeTestTable" + view_name = f"{test_database}.PUBLIC.AnalyzeTestView" + + with project_directory("dcm_project") as project_root: + result = runner.invoke_with_connection(["dcm", "create", project_name]) + assert result.exit_code == 0, result.output + + table_definition = f""" +define table identifier('{table_name}') ( + id int, name varchar +); + +define view identifier('{view_name}') as + select * from {table_name}; +""" + file_a_path = project_root / "file_a.sql" + original_content = file_a_path.read_text() + file_a_path.write_text(original_content + table_definition) + + result = runner.invoke_with_connection_json( + [ + "dcm", + "deploy", + project_name, + "-D", + f"table_name='{test_database}.PUBLIC.OutputTestTable'", + ] + ) + assert result.exit_code == 0, result.output + + result = runner.invoke_with_connection_json( + ["dcm", "analyze", project_name, "--type", "dependencies"] + ) + assert result.exit_code == 0, result.output + assert isinstance(result.json, list) + assert len(result.json) > 0