|
| 1 | +errors: |
| 2 | + validation: |
| 3 | + not_csv_file: "'{file_path}' is not a csv file" |
| 4 | + invalid_nodes_relations: "'{file_path}' does not contains valid nodes or relations" |
| 5 | + invalid_truth_value: '"{string} is not a recognized truth value' |
| 6 | + environment: |
| 7 | + no_env_vars: "No set of environment variables found for a valid Cosmo Tech API connection" |
| 8 | + no_valid_connection: "No valid connection available to the Cosmo Tech API" |
| 9 | + missing_env_var: "Missing the following environment variable: {envvar}" |
| 10 | + file_system: |
| 11 | + file_not_found: "{source_folder} does not exists" |
| 12 | + file_exists: "File {csv_path} already exists" |
| 13 | + not_directory: "{target_dir} is a file and not a directory" |
| 14 | + file_not_exists: '"{file_path}" does not exists' |
| 15 | + not_single_file: '"{file_path}" is not a single file' |
| 16 | + data: |
| 17 | + no_table: "No table with name {table_name} exists" |
| 18 | + parameter_not_exists: "Parameter {parameter_name} does not exists" |
| 19 | + invalid_output_type: "{output_type} is not a valid type of output" |
| 20 | + no_workspace_files: "No workspace file were found with filter {file_prefix} in workspace {workspace_id}" |
| 21 | + workspace: |
| 22 | + not_found: "Workspace {workspace_id} was not found in Organization {organization_id}" |
| 23 | + |
| 24 | + solution: |
| 25 | + loaded: "Loaded {path}" |
| 26 | + api_configured: "Configuration to the api set" |
| 27 | + loading_workspace: "Loading Workspace information to get Solution ID" |
| 28 | + errors: |
| 29 | + solution: |
| 30 | + invalid_file: "{file} is not a `.yaml` or `.json` file" |
| 31 | + environment: |
| 32 | + missing_var: "Missing the following environment variable: {envvar}" |
| 33 | + |
| 34 | +web: |
| 35 | + failed_open: "Failed to open: {url}" |
| 36 | + opened: "Opened {url} in your navigator" |
| 37 | + |
| 38 | +logs: |
| 39 | + connection: |
| 40 | + existing_sets: "Existing sets are:" |
| 41 | + azure_connection: " Azure Entra Connection : {keys}" |
| 42 | + api_key_connection: " Cosmo Tech API Key : {keys}" |
| 43 | + keycloak_connection: " Keycloak connection : {keys}" |
| 44 | + found_keycloak: "Found Keycloack connection info" |
| 45 | + found_cert_authority: "Found Certificate Authority override for IDP connection, using it." |
| 46 | + found_api_key: "Found Api Key connection info" |
| 47 | + found_azure: "Found Azure Entra connection info" |
| 48 | + found_valid: "Found valid connection of type: {type}" |
| 49 | + data_transfer: |
| 50 | + sending_table: "Sending table {table_name} as {output_type}" |
| 51 | + sending_data: " Sending {size} bytes of data" |
| 52 | + table_empty: "Table {table_name} is empty (skipping)" |
| 53 | + rows_inserted: "Inserted {rows} rows in table {table_name}" |
| 54 | + file_sent: "Sending {file_path} as {uploaded_name}" |
| 55 | + ingestion: |
| 56 | + creating_table: "Create table query: {query}" |
| 57 | + table_created: "Table {table} created successfully" |
| 58 | + table_creation_failed: "Issue creating table {table}" |
| 59 | + ingesting: "Ingesting {table}" |
| 60 | + waiting_results: "Waiting for ingestion results, retry in {duration}s ({count}/{limit})" |
| 61 | + max_retry: "Max number of retry, stop waiting" |
| 62 | + status_report: "{table} - {status}" |
| 63 | + no_wait: "No wait for ingestion result" |
| 64 | + progress: |
| 65 | + loading_file: "Loading {file_name} from the API" |
| 66 | + file_loaded: "{file} successfully loaded from the API" |
| 67 | + operation_timing: "{operation} took {time:0.3}s" |
| 68 | + |
| 69 | + runner: |
| 70 | + starting_download: "Starting the Run data download" |
| 71 | + no_parameters: "no parameters found in the runner" |
| 72 | + loaded_data: "Loaded run data" |
| 73 | + parameter_debug: " - {param_id:<{max_name_size}} {var_type:<{max_type_size}} \"{value}\"{inherited}" |
| 74 | + not_single_dataset: "{runner_id} is not tied to a single dataset but {count}" |
| 75 | + dataset_state: "Dataset {dataset_id} is in state {status}" |
| 76 | + downloading_datasets: "Downloading {count} datasets" |
| 77 | + writing_parameters: "Writing parameters to files" |
| 78 | + generating_file: "Generating {file}" |
| 79 | + dataset_debug: " - {folder} ({id})" |
| 80 | + no_dataset_write: "No dataset write asked, skipping" |
| 81 | + no_parameters_write: "No parameters write asked, skipping" |
| 82 | + |
| 83 | + database: |
| 84 | + creating_table: "creating table {table}" |
| 85 | + updating_metadata: "adding/updating runner metadata" |
| 86 | + metadata_updated: "Runner metadata table has been updated" |
| 87 | + sending_data: "Sending data to table {table}" |
| 88 | + no_rows: " - No rows : skipping" |
| 89 | + column_list: " - Column list: {columns}" |
| 90 | + row_count: " - Sending {count} rows" |
| 91 | + query_results: "Query returned {count} rows" |
| 92 | + saved_results: "Results saved as {file}" |
| 93 | + no_results: "No results returned by the query" |
| 94 | + store_empty: "Data store is empty" |
| 95 | + store_tables: "Data store contains the following tables" |
| 96 | + table_entry: " - {table}" |
| 97 | + store_reset: "Data store in {folder} got reset" |
| 98 | + rows_fetched: "Rows fetched in {table} table: {count} in {time} seconds" |
| 99 | + tables_to_fetch: "Tables to fetched: {tables}" |
| 100 | + full_dataset: "Full dataset fetched and wrote in {time} seconds" |
| 101 | + |
| 102 | + storage: |
| 103 | + deleting_objects: "Deleting {objects}" |
| 104 | + no_objects: "No objects to delete" |
| 105 | + downloading: "Downloading {path} to {output}" |
| 106 | + sending_file: "Sending {file} as {name}" |
| 107 | + found_file: "Found {file}, storing it" |
| 108 | + clearing_content: "Clearing all dataset content" |
| 109 | + sending_content: "Sending content of '{file}'" |
| 110 | + row_batch: "Found row count of {count}, sending now" |
| 111 | + import_errors: "Found {count} errors while importing: " |
| 112 | + all_data_sent: "Sent all data found" |
| 113 | + writing_lines: "Writing {count} lines in {file}" |
| 114 | + all_csv_written: "All CSV are written" |
| 115 | + |
| 116 | + orchestrator: |
| 117 | + searching_template: "Searching {template} in the solution" |
| 118 | + template_not_found: "Run template {template} was not found." |
| 119 | + generating_json: "Found {template} in the solution generating json file" |
| 120 | + no_parameters: "No parameters to write for {template}" |
| 121 | + creating_folders: "Creating folders for dataset parameters" |
| 122 | + folder_created: "- {folder}" |
| 123 | + step_found: "- {step} step found" |
| 124 | + steps_summary: "{count} step{plural} found, writing json file" |
| 125 | + loading_solution: "Loading Workspace information to get Solution ID" |
| 126 | + querying_handler: "Querying Handler {handler} for {template}" |
| 127 | + handler_not_found: "Handler {handler} was not found for Run Template {template} in Solution {solution}" |
| 128 | + extracting_handler: "Extracting handler to {path}" |
| 129 | + handler_not_zip: "Handler {handler} is not a zip file" |
| 130 | + run_issues: "Issues were met during run, please check the previous logs" |
| 131 | + |
| 132 | + postgresql: |
| 133 | + getting_schema: "Getting schema for table {postgres_schema}.{target_table_name}" |
| 134 | + table_not_found: "Table {postgres_schema}.{target_table_name} not found" |
| 135 | + schema_adaptation_start: "Starting schema adaptation for table with {rows} rows" |
| 136 | + original_schema: "Original schema: {schema}" |
| 137 | + target_schema: "Target schema: {schema}" |
| 138 | + casting_column: "Attempting to cast column '{field_name}' from {original_type} to {target_type}" |
| 139 | + cast_failed: "Failed to cast column '{field_name}' from {original_type} to {target_type}. Filling with nulls. Error: {error}" |
| 140 | + adding_missing_column: "Adding missing column '{field_name}' with null values" |
| 141 | + dropping_columns: "Dropping extra columns not in target schema: {columns}" |
| 142 | + adaptation_summary: "Schema adaptation summary:" |
| 143 | + added_columns: "- Added columns (filled with nulls): {columns}" |
| 144 | + dropped_columns: "- Dropped columns: {columns}" |
| 145 | + successful_conversions: "- Successful type conversions: {conversions}" |
| 146 | + failed_conversions: "- Failed conversions (filled with nulls): {conversions}" |
| 147 | + final_schema: "Final adapted table schema: {schema}" |
| 148 | + preparing_send: "Preparing to send data to PostgreSQL table '{postgres_schema}.{target_table_name}'" |
| 149 | + input_rows: "Input table has {rows} rows" |
| 150 | + found_existing_table: "Found existing table with schema: {schema}" |
| 151 | + adapting_data: "Adapting incoming data to match existing schema" |
| 152 | + replace_mode: "Replace mode enabled - skipping schema adaptation" |
| 153 | + no_existing_table: "No existing table found - will create new table" |
| 154 | + connecting: "Connecting to PostgreSQL database" |
| 155 | + ingesting_data: "Ingesting data with mode: {mode}" |
| 156 | + ingestion_success: "Successfully ingested {rows} rows" |
| 157 | + |
| 158 | + adx: |
| 159 | + creating_kusto_client: "Creating Kusto client for cluster: {cluster_url}" |
| 160 | + creating_ingest_client: "Creating ingest client for URL: {ingest_url}" |
| 161 | + using_app_auth: "Using Azure AD application authentication" |
| 162 | + using_cli_auth: "Using Azure CLI authentication" |
| 163 | + generating_urls: "Generating URLs for cluster {cluster_name} in region {cluster_region}" |
| 164 | + running_query: "Running query on database {database}: {query}" |
| 165 | + running_command: "Running command on database {database}: {query}" |
| 166 | + query_complete: "Query complete, returned {rows} rows" |
| 167 | + command_complete: "Command execution complete" |
| 168 | + ingesting_dataframe: "Ingesting dataframe with {rows} rows to table {table_name}" |
| 169 | + ingestion_queued: "Ingestion queued with source ID: {source_id}" |
| 170 | + sending_to_adx: "Sending {items} items to ADX table {table_name}" |
| 171 | + empty_dict_list: "Empty dictionary list provided, nothing to send" |
| 172 | + table_creation_failed: "Error creating table {table_name}" |
| 173 | + checking_status: "Checking ingestion status for {count} operations" |
| 174 | + status_messages: "Found {success} success messages and {failure} failure messages" |
| 175 | + status_found: "Found status for {source_id}: {status}" |
| 176 | + ingestion_timeout: "Ingestion operation {source_id} timed out" |
| 177 | + clear_queues_no_confirmation: "Clear queues operation requires confirmation=True" |
| 178 | + clearing_queues: "DANGER: Clearing all ingestion status queues" |
| 179 | + queues_cleared: "All ingestion status queues have been cleared" |
| 180 | + checking_table: "Checking if table {table_name} exists in database {database}" |
| 181 | + table_exists: "Table {table_name} exists" |
| 182 | + table_not_exists: "Table {table_name} does not exist" |
| 183 | + creating_table: "Creating table {table_name} in database {database}" |
| 184 | + create_query: "Create table query: {query}" |
| 185 | + table_created: "Table {table_name} created successfully" |
| 186 | + table_creation_error: "Error creating table {table_name}: {error}" |
| 187 | + mapping_type: "Mapping type for key {key} with value type {value_type}" |
| 188 | + |
| 189 | + dataset: |
| 190 | + # General |
| 191 | + download_started: "Starting download of {dataset_type} dataset" |
| 192 | + download_completed: "Successfully downloaded {dataset_type} dataset" |
| 193 | + operation_timing: "{operation} took {time} seconds" |
| 194 | + dataset_downloading: "Downloading dataset (organization: {organization_id}, dataset: {dataset_id})" |
| 195 | + dataset_info_retrieved: "Retrieved dataset info: {dataset_name} ({dataset_id})" |
| 196 | + dataset_type_detected: "Detected dataset type: {type}" |
| 197 | + parallel_download: "Downloading {count} datasets in parallel" |
| 198 | + sequential_download: "Downloading {count} datasets sequentially" |
| 199 | + |
| 200 | + # Processing |
| 201 | + processing_graph_data: "Processing graph data with {nodes_count} nodes and {relationships_count} relationships (restore_names={restore_names})" |
| 202 | + entity_count: "Found {count} entities of type {entity_type}" |
| 203 | + extracting_headers: "Extracting headers from {rows} rows" |
| 204 | + headers_extracted: "Extracted {count} fields: {fields}" |
| 205 | + |
| 206 | + # File operations |
| 207 | + converting_to_files: "Converting {dataset_type} dataset '{dataset_name}' to files" |
| 208 | + created_temp_folder: "Created temporary folder: {folder}" |
| 209 | + using_folder: "Using folder: {folder}" |
| 210 | + converting_graph_data: "Converting graph data with {entity_types} entity types to folder: {folder}" |
| 211 | + converting_file_data: "Converting {file_count} files of type {file_type} to folder: {folder}" |
| 212 | + skipping_empty_entity: "Skipping empty entity type: {entity_type}" |
| 213 | + writing_csv: "Writing CSV file with {count} records: {file_name}" |
| 214 | + writing_file: "Writing file: {file_name} (type: {file_type})" |
| 215 | + file_written: "File written: {file_path}" |
| 216 | + files_created: "Created {count} files in folder: {folder}" |
| 217 | + |
| 218 | + # ADT specific |
| 219 | + adt_connecting: "Connecting to ADT instance at {url}" |
| 220 | + adt_no_credentials: "No credentials available for ADT connection" |
| 221 | + adt_querying_twins: "Querying digital twins" |
| 222 | + adt_twins_found: "Found {count} digital twins" |
| 223 | + adt_querying_relations: "Querying relationships" |
| 224 | + adt_relations_found: "Found {count} relationships" |
| 225 | + |
| 226 | + # TwinGraph specific |
| 227 | + twingraph_downloading: "Downloading TwinGraph dataset (organization: {organization_id}, dataset: {dataset_id})" |
| 228 | + twingraph_querying_nodes: "Querying TwinGraph nodes for dataset {dataset_id}" |
| 229 | + twingraph_nodes_found: "Found {count} nodes in TwinGraph" |
| 230 | + twingraph_querying_edges: "Querying TwinGraph edges for dataset {dataset_id}" |
| 231 | + twingraph_edges_found: "Found {count} edges in TwinGraph" |
| 232 | + |
| 233 | + # Legacy TwinGraph specific |
| 234 | + legacy_twingraph_downloading: "Downloading legacy TwinGraph dataset (organization: {organization_id}, cache: {cache_name})" |
| 235 | + legacy_twingraph_querying_nodes: "Querying legacy TwinGraph nodes for cache {cache_name}" |
| 236 | + legacy_twingraph_nodes_found: "Found {count} nodes in legacy TwinGraph" |
| 237 | + legacy_twingraph_querying_relations: "Querying legacy TwinGraph relationships for cache {cache_name}" |
| 238 | + legacy_twingraph_relations_found: "Found {count} relationships in legacy TwinGraph" |
| 239 | + |
| 240 | + # File specific |
| 241 | + file_downloading: "Downloading file dataset (organization: {organization_id}, workspace: {workspace_id}, file: {file_name})" |
| 242 | + listing_workspace_files: "Listing workspace files" |
| 243 | + workspace_files_found: "Found {count} workspace files" |
| 244 | + no_files_found: "No files found matching: {file_name}" |
| 245 | + downloading_file: "Downloading file: {file_name}" |
| 246 | + file_downloaded: "Downloaded file: {file_name} to {path}" |
| 247 | + |
| 248 | + # File processing |
| 249 | + processing_excel: "Processing Excel file: {file_name}" |
| 250 | + sheet_processed: "Processed sheet {sheet_name} with {rows} rows" |
| 251 | + processing_csv: "Processing CSV file: {file_name}" |
| 252 | + csv_processed: "Processed CSV file {file_name} with {rows} rows" |
| 253 | + processing_json: "Processing JSON file: {file_name}" |
| 254 | + json_processed: "Processed JSON file {file_name} with {items} items" |
| 255 | + processing_text: "Processing text file: {file_name}" |
| 256 | + text_processed: "Processed text file {file_name} with {lines} lines" |
0 commit comments