1+ from __future__ import annotations
2+
13import asyncio
24import logging
35from datetime import UTC , datetime
4- from pathlib import Path
5- from typing import Any
6+ from typing import TYPE_CHECKING , Any
67
78from alembic .autogenerate import compare_metadata
8- from alembic .config import Config
99from alembic .runtime .environment import EnvironmentContext
1010from alembic .runtime .migration import MigrationContext
1111from alembic .script import ScriptDirectory
12- from httpx import AsyncClient
13- from onetl .connection import FileConnection
14- from onetl .file import FileDownloader , FileUploader
15- from pyspark .sql import DataFrame
16- from pyspark .sql .functions import (
17- col ,
18- date_format ,
19- date_trunc ,
20- from_unixtime ,
21- to_timestamp ,
22- )
2312from sqlalchemy import Connection as AlchConnection
2413from sqlalchemy import MetaData , pool , text
2514from sqlalchemy .ext .asyncio import (
3019
3120from syncmaster .db .models import Status
3221from syncmaster .exceptions .base import EntityNotFoundError
33- from syncmaster .server .settings import ServerAppSettings as Settings
34- from tests .mocks import MockUser
22+
23+ if TYPE_CHECKING :
24+ from pathlib import Path
25+
26+ from alembic .config import Config
27+ from httpx import AsyncClient
28+ from onetl .connection import FileConnection
29+ from pyspark .sql import DataFrame
30+
31+ from syncmaster .server .settings import ServerAppSettings as Settings
32+ from tests .mocks import MockUser
3533
3634logger = logging .getLogger (__name__ )
3735
@@ -198,6 +196,13 @@ def truncate_datetime_to_seconds(
198196 init_df : DataFrame ,
199197 transfer_direction : str | None = None ,
200198) -> tuple [DataFrame , DataFrame ]:
199+ from pyspark .sql .functions import (
200+ col ,
201+ date_format ,
202+ date_trunc ,
203+ to_timestamp ,
204+ )
205+
201206 # Excel does not support datetime values with precision greater than milliseconds
202207 # Spark rounds datetime to nearest 3.33 milliseconds when writing to MSSQL: https://onetl.readthedocs.io/en/latest/connection/db_connection/mssql/types.html#id5
203208 if transfer_direction == "file_to_db" or transfer_direction is None :
@@ -212,6 +217,11 @@ def truncate_datetime_to_seconds(
212217
213218
214219def round_datetime_to_seconds (df : DataFrame , init_df : DataFrame ) -> tuple [DataFrame , DataFrame ]:
220+ from pyspark .sql .functions import (
221+ col ,
222+ from_unixtime ,
223+ )
224+
215225 # Spark rounds milliseconds to seconds while writing to MySQL: https://onetl.readthedocs.io/en/latest/connection/db_connection/mysql/types.html#id5
216226 df = df .withColumn (
217227 "REGISTERED_AT" ,
@@ -225,6 +235,8 @@ def round_datetime_to_seconds(df: DataFrame, init_df: DataFrame) -> tuple[DataFr
225235
226236
227237def add_increment_to_files_and_upload (file_connection : FileConnection , remote_path : str , tmp_path : Path ) -> None :
238+ from onetl .file import FileDownloader , FileUploader
239+
228240 downloader = FileDownloader (
229241 connection = file_connection ,
230242 source_path = remote_path ,
0 commit comments