Skip to content

Commit b58b147

Browse files
committed
update
1 parent 5c40bca commit b58b147

11 files changed

+246
-31
lines changed

experimental/python/databricks/bundles/jobs/_models/environment.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ class Environment:
3333
The version is a string, consisting of an integer.
3434
"""
3535

36-
jar_dependencies: VariableOrList[str] = field(default_factory=list)
36+
java_dependencies: VariableOrList[str] = field(default_factory=list)
3737
"""
3838
:meta private: [EXPERIMENTAL]
3939
@@ -68,7 +68,7 @@ class EnvironmentDict(TypedDict, total=False):
6868
The version is a string, consisting of an integer.
6969
"""
7070

71-
jar_dependencies: VariableOrList[str]
71+
java_dependencies: VariableOrList[str]
7272
"""
7373
:meta private: [EXPERIMENTAL]
7474

experimental/python/databricks/bundles/jobs/_models/spark_submit_task.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,9 @@
1111

1212
@dataclass(kw_only=True)
1313
class SparkSubmitTask:
14-
""""""
14+
"""
15+
[DEPRECATED]
16+
"""
1517

1618
parameters: VariableOrList[str] = field(default_factory=list)
1719
"""

experimental/python/databricks/bundles/jobs/_models/task.py

Lines changed: 18 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ class Task:
104104

105105
clean_rooms_notebook_task: VariableOrOptional[CleanRoomsNotebookTask] = None
106106
"""
107-
The task runs a [clean rooms](https://docs.databricks.com/en/clean-rooms/index.html) notebook
107+
The task runs a [clean rooms](https://docs.databricks.com/clean-rooms/index.html) notebook
108108
when the `clean_rooms_notebook_task` field is present.
109109
"""
110110

@@ -145,6 +145,13 @@ class Task:
145145
An option to disable auto optimization in serverless
146146
"""
147147

148+
disabled: VariableOrOptional[bool] = None
149+
"""
150+
:meta private: [EXPERIMENTAL]
151+
152+
An optional flag to disable the task. If set to true, the task will not run even if it is part of a job.
153+
"""
154+
148155
email_notifications: VariableOrOptional[TaskEmailNotifications] = None
149156
"""
150157
An optional set of email addresses that is notified when runs of this task begin or complete as well as when this task is deleted. The default behavior is to not send any emails.
@@ -261,15 +268,7 @@ class Task:
261268

262269
spark_submit_task: VariableOrOptional[SparkSubmitTask] = None
263270
"""
264-
(Legacy) The task runs the spark-submit script when the `spark_submit_task` field is present. This task can run only on new clusters and is not compatible with serverless compute.
265-
266-
In the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations.
267-
268-
`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.
269-
270-
By default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.
271-
272-
The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.
271+
[DEPRECATED] (Legacy) The task runs the spark-submit script when the spark_submit_task field is present. Databricks recommends using the spark_jar_task instead; see [Spark Submit task for jobs](/jobs/spark-submit).
273272
"""
274273

275274
sql_task: VariableOrOptional[SqlTask] = None
@@ -307,7 +306,7 @@ class TaskDict(TypedDict, total=False):
307306

308307
clean_rooms_notebook_task: VariableOrOptional[CleanRoomsNotebookTaskParam]
309308
"""
310-
The task runs a [clean rooms](https://docs.databricks.com/en/clean-rooms/index.html) notebook
309+
The task runs a [clean rooms](https://docs.databricks.com/clean-rooms/index.html) notebook
311310
when the `clean_rooms_notebook_task` field is present.
312311
"""
313312

@@ -348,6 +347,13 @@ class TaskDict(TypedDict, total=False):
348347
An option to disable auto optimization in serverless
349348
"""
350349

350+
disabled: VariableOrOptional[bool]
351+
"""
352+
:meta private: [EXPERIMENTAL]
353+
354+
An optional flag to disable the task. If set to true, the task will not run even if it is part of a job.
355+
"""
356+
351357
email_notifications: VariableOrOptional[TaskEmailNotificationsParam]
352358
"""
353359
An optional set of email addresses that is notified when runs of this task begin or complete as well as when this task is deleted. The default behavior is to not send any emails.
@@ -464,15 +470,7 @@ class TaskDict(TypedDict, total=False):
464470

465471
spark_submit_task: VariableOrOptional[SparkSubmitTaskParam]
466472
"""
467-
(Legacy) The task runs the spark-submit script when the `spark_submit_task` field is present. This task can run only on new clusters and is not compatible with serverless compute.
468-
469-
In the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations.
470-
471-
`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.
472-
473-
By default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.
474-
475-
The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.
473+
[DEPRECATED] (Legacy) The task runs the spark-submit script when the spark_submit_task field is present. Databricks recommends using the spark_jar_task instead; see [Spark Submit task for jobs](/jobs/spark-submit).
476474
"""
477475

478476
sql_task: VariableOrOptional[SqlTaskParam]

experimental/python/databricks/bundles/pipelines/__init__.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,12 @@
5151
"IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfig",
5252
"IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfigDict",
5353
"IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfigParam",
54+
"IngestionPipelineDefinitionWorkdayReportParameters",
55+
"IngestionPipelineDefinitionWorkdayReportParametersDict",
56+
"IngestionPipelineDefinitionWorkdayReportParametersParam",
57+
"IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValue",
58+
"IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValueDict",
59+
"IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValueParam",
5460
"IngestionSourceType",
5561
"IngestionSourceTypeParam",
5662
"InitScriptInfo",
@@ -230,6 +236,16 @@
230236
IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfigDict,
231237
IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfigParam,
232238
)
239+
from databricks.bundles.pipelines._models.ingestion_pipeline_definition_workday_report_parameters import (
240+
IngestionPipelineDefinitionWorkdayReportParameters,
241+
IngestionPipelineDefinitionWorkdayReportParametersDict,
242+
IngestionPipelineDefinitionWorkdayReportParametersParam,
243+
)
244+
from databricks.bundles.pipelines._models.ingestion_pipeline_definition_workday_report_parameters_query_key_value import (
245+
IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValue,
246+
IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValueDict,
247+
IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValueParam,
248+
)
233249
from databricks.bundles.pipelines._models.ingestion_source_type import (
234250
IngestionSourceType,
235251
IngestionSourceTypeParam,

experimental/python/databricks/bundles/pipelines/_models/ingestion_pipeline_definition.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,15 @@ class IngestionPipelineDefinition:
3939
Immutable. Identifier for the gateway that is used by this ingestion pipeline to communicate with the source database. This is used with connectors to databases like SQL Server.
4040
"""
4141

42+
netsuite_jar_path: VariableOrOptional[str] = None
43+
"""
44+
:meta private: [EXPERIMENTAL]
45+
46+
Netsuite only configuration. When the field is set for a netsuite connector,
47+
the jar stored in the field will be validated and added to the classpath of
48+
pipeline's cluster.
49+
"""
50+
4251
objects: VariableOrList[IngestionConfig] = field(default_factory=list)
4352
"""
4453
Required. Settings specifying tables to replicate and the destination for the replicated tables.
@@ -84,6 +93,15 @@ class IngestionPipelineDefinitionDict(TypedDict, total=False):
8493
Immutable. Identifier for the gateway that is used by this ingestion pipeline to communicate with the source database. This is used with connectors to databases like SQL Server.
8594
"""
8695

96+
netsuite_jar_path: VariableOrOptional[str]
97+
"""
98+
:meta private: [EXPERIMENTAL]
99+
100+
Netsuite only configuration. When the field is set for a netsuite connector,
101+
the jar stored in the field will be validated and added to the classpath of
102+
pipeline's cluster.
103+
"""
104+
87105
objects: VariableOrList[IngestionConfigParam]
88106
"""
89107
Required. Settings specifying tables to replicate and the destination for the replicated tables.
Lines changed: 95 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,95 @@
1+
from dataclasses import dataclass, field
2+
from typing import TYPE_CHECKING, TypedDict
3+
4+
from databricks.bundles.core._transform import _transform
5+
from databricks.bundles.core._transform_to_json import _transform_to_json_value
6+
from databricks.bundles.core._variable import (
7+
VariableOrDict,
8+
VariableOrList,
9+
VariableOrOptional,
10+
)
11+
from databricks.bundles.pipelines._models.ingestion_pipeline_definition_workday_report_parameters_query_key_value import (
12+
IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValue,
13+
IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValueParam,
14+
)
15+
16+
if TYPE_CHECKING:
17+
from typing_extensions import Self
18+
19+
20+
@dataclass(kw_only=True)
21+
class IngestionPipelineDefinitionWorkdayReportParameters:
22+
"""
23+
:meta private: [EXPERIMENTAL]
24+
"""
25+
26+
incremental: VariableOrOptional[bool] = None
27+
"""
28+
[DEPRECATED] (Optional) Marks the report as incremental.
29+
This field is deprecated and should not be used. Use `parameters` instead. The incremental behavior is now
30+
controlled by the `parameters` field.
31+
"""
32+
33+
parameters: VariableOrDict[str] = field(default_factory=dict)
34+
"""
35+
Parameters for the Workday report. Each key represents the parameter name (e.g., "start_date", "end_date"),
36+
and the corresponding value is a SQL-like expression used to compute the parameter value at runtime.
37+
Example:
38+
{
39+
"start_date": "{ coalesce(current_offset(), date(\"2025-02-01\")) }",
40+
"end_date": "{ current_date() - INTERVAL 1 DAY }"
41+
}
42+
"""
43+
44+
report_parameters: VariableOrList[
45+
IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValue
46+
] = field(default_factory=list)
47+
"""
48+
[DEPRECATED] (Optional) Additional custom parameters for Workday Report
49+
This field is deprecated and should not be used. Use `parameters` instead.
50+
"""
51+
52+
@classmethod
53+
def from_dict(
54+
cls, value: "IngestionPipelineDefinitionWorkdayReportParametersDict"
55+
) -> "Self":
56+
return _transform(cls, value)
57+
58+
def as_dict(self) -> "IngestionPipelineDefinitionWorkdayReportParametersDict":
59+
return _transform_to_json_value(self) # type:ignore
60+
61+
62+
class IngestionPipelineDefinitionWorkdayReportParametersDict(TypedDict, total=False):
63+
""""""
64+
65+
incremental: VariableOrOptional[bool]
66+
"""
67+
[DEPRECATED] (Optional) Marks the report as incremental.
68+
This field is deprecated and should not be used. Use `parameters` instead. The incremental behavior is now
69+
controlled by the `parameters` field.
70+
"""
71+
72+
parameters: VariableOrDict[str]
73+
"""
74+
Parameters for the Workday report. Each key represents the parameter name (e.g., "start_date", "end_date"),
75+
and the corresponding value is a SQL-like expression used to compute the parameter value at runtime.
76+
Example:
77+
{
78+
"start_date": "{ coalesce(current_offset(), date(\"2025-02-01\")) }",
79+
"end_date": "{ current_date() - INTERVAL 1 DAY }"
80+
}
81+
"""
82+
83+
report_parameters: VariableOrList[
84+
IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValueParam
85+
]
86+
"""
87+
[DEPRECATED] (Optional) Additional custom parameters for Workday Report
88+
This field is deprecated and should not be used. Use `parameters` instead.
89+
"""
90+
91+
92+
IngestionPipelineDefinitionWorkdayReportParametersParam = (
93+
IngestionPipelineDefinitionWorkdayReportParametersDict
94+
| IngestionPipelineDefinitionWorkdayReportParameters
95+
)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
from dataclasses import dataclass
2+
from typing import TYPE_CHECKING, TypedDict
3+
4+
from databricks.bundles.core._transform import _transform
5+
from databricks.bundles.core._transform_to_json import _transform_to_json_value
6+
from databricks.bundles.core._variable import VariableOrOptional
7+
8+
if TYPE_CHECKING:
9+
from typing_extensions import Self
10+
11+
12+
@dataclass(kw_only=True)
13+
class IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValue:
14+
"""
15+
:meta private: [EXPERIMENTAL]
16+
17+
[DEPRECATED]
18+
"""
19+
20+
key: VariableOrOptional[str] = None
21+
"""
22+
Key for the report parameter, can be a column name or other metadata
23+
"""
24+
25+
value: VariableOrOptional[str] = None
26+
"""
27+
Value for the report parameter.
28+
Possible values it can take are these sql functions:
29+
1. coalesce(current_offset(), date("YYYY-MM-DD")) -> if current_offset() is null, then the passed date, else current_offset()
30+
2. current_date()
31+
3. date_sub(current_date(), x) -> subtract x (some non-negative integer) days from current date
32+
"""
33+
34+
@classmethod
35+
def from_dict(
36+
cls,
37+
value: "IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValueDict",
38+
) -> "Self":
39+
return _transform(cls, value)
40+
41+
def as_dict(
42+
self,
43+
) -> "IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValueDict":
44+
return _transform_to_json_value(self) # type:ignore
45+
46+
47+
class IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValueDict(
48+
TypedDict, total=False
49+
):
50+
""""""
51+
52+
key: VariableOrOptional[str]
53+
"""
54+
Key for the report parameter, can be a column name or other metadata
55+
"""
56+
57+
value: VariableOrOptional[str]
58+
"""
59+
Value for the report parameter.
60+
Possible values it can take are these sql functions:
61+
1. coalesce(current_offset(), date("YYYY-MM-DD")) -> if current_offset() is null, then the passed date, else current_offset()
62+
2. current_date()
63+
3. date_sub(current_date(), x) -> subtract x (some non-negative integer) days from current date
64+
"""
65+
66+
67+
IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValueParam = (
68+
IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValueDict
69+
| IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValue
70+
)

experimental/python/databricks/bundles/pipelines/_models/ingestion_source_type.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ class IngestionSourceType(Enum):
2121
DYNAMICS365 = "DYNAMICS365"
2222
CONFLUENCE = "CONFLUENCE"
2323
META_MARKETING = "META_MARKETING"
24+
FOREIGN_CATALOG = "FOREIGN_CATALOG"
2425

2526

2627
IngestionSourceTypeParam = (
@@ -43,6 +44,7 @@ class IngestionSourceType(Enum):
4344
"DYNAMICS365",
4445
"CONFLUENCE",
4546
"META_MARKETING",
47+
"FOREIGN_CATALOG",
4648
]
4749
| IngestionSourceType
4850
)

experimental/python/databricks/bundles/pipelines/_models/pipeline.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -184,9 +184,6 @@ class Pipeline(Resource):
184184
"""
185185

186186
run_as: VariableOrOptional[RunAs] = None
187-
"""
188-
:meta private: [EXPERIMENTAL]
189-
"""
190187

191188
schema: VariableOrOptional[str] = None
192189
"""
@@ -347,9 +344,6 @@ class PipelineDict(TypedDict, total=False):
347344
"""
348345

349346
run_as: VariableOrOptional[RunAsParam]
350-
"""
351-
:meta private: [EXPERIMENTAL]
352-
"""
353347

354348
schema: VariableOrOptional[str]
355349
"""

experimental/python/databricks/bundles/pipelines/_models/run_as.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,6 @@
1212
@dataclass(kw_only=True)
1313
class RunAs:
1414
"""
15-
:meta private: [EXPERIMENTAL]
16-
1715
Write-only setting, available only in Create/Update calls. Specifies the user or service principal that the pipeline runs as. If not specified, the pipeline runs as the user who created the pipeline.
1816
1917
Only `user_name` or `service_principal_name` can be specified. If both are specified, an error is thrown.

0 commit comments

Comments
 (0)