Skip to content

Commit 90496be

Browse files
authored
fix datafactory quickstart for python
Because python SDK was updated(https://pypi.org/project/azure-mgmt-datafactory/), there are many erros in quickstart datafactory python code. This PR is for azure-mgmt-datafactory 0.8.0
1 parent 1c9858e commit 90496be

File tree

1 file changed

+17
-17
lines changed

1 file changed

+17
-17
lines changed

articles/data-factory/quickstart-create-data-factory-python.md

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,7 @@ You create linked services in a data factory to link your data stores and comput
153153
ls_name = 'storageLinkedService'
154154

155155
# IMPORTANT: specify the name and key of your Azure Storage account.
156-
storage_string = SecureString('DefaultEndpointsProtocol=https;AccountName=<storageaccountname>;AccountKey=<storageaccountkey>')
156+
storage_string = SecureString(value='DefaultEndpointsProtocol=https;AccountName=<storageaccountname>;AccountKey=<storageaccountkey>')
157157

158158
ls_azure_storage = AzureStorageLinkedService(connection_string=storage_string)
159159
ls = adf_client.linked_services.create_or_update(rg_name, df_name, ls_name, ls_azure_storage)
@@ -222,10 +222,7 @@ Add the following code to the **Main** method that **triggers a pipeline run**.
222222

223223
```python
224224
#Create a pipeline run.
225-
run_response = adf_client.pipelines.create_run(rg_name, df_name, p_name,
226-
{
227-
}
228-
)
225+
run_response = adf_client.pipelines.create_run(rg_name, df_name, p_name, parameters={})
229226
```
230227

231228
## Monitor a pipeline run
@@ -237,8 +234,12 @@ To monitor the pipeline run, add the following code the **Main** method:
237234
time.sleep(30)
238235
pipeline_run = adf_client.pipeline_runs.get(rg_name, df_name, run_response.run_id)
239236
print("\n\tPipeline run status: {}".format(pipeline_run.status))
240-
activity_runs_paged = list(adf_client.activity_runs.list_by_pipeline_run(rg_name, df_name, pipeline_run.run_id, datetime.now() - timedelta(1), datetime.now() + timedelta(1)))
241-
print_activity_run_details(activity_runs_paged[0])
237+
filter_params = RunFilterParameters(
238+
last_updated_after=datetime.now() - timedelta(1), last_updated_before=datetime.now() + timedelta(1))
239+
query_response = adf_client.activity_runs.query_by_pipeline_run(
240+
rg_name, df_name, pipeline_run.run_id, filter_params)
241+
print_activity_run_details(query_response.value[0])
242+
242243
```
243244

244245
Now, add the following statement to invoke the **main** method when the program is run:
@@ -334,7 +335,7 @@ def main():
334335

335336
# Specify the name and key of your Azure Storage account
336337
storage_string = SecureString(
337-
'DefaultEndpointsProtocol=https;AccountName=<storage account name>;AccountKey=<storage account key>')
338+
value='DefaultEndpointsProtocol=https;AccountName=<storage account name>;AccountKey=<storage account key>')
338339

339340
ls_azure_storage = AzureStorageLinkedService(
340341
connection_string=storage_string)
@@ -348,15 +349,15 @@ def main():
348349
blob_path = 'adfv2tutorial/input'
349350
blob_filename = 'input.txt'
350351
ds_azure_blob = AzureBlobDataset(
351-
ds_ls, folder_path=blob_path, file_name=blob_filename)
352+
linked_service_name=ds_ls, folder_path=blob_path, file_name=blob_filename)
352353
ds = adf_client.datasets.create_or_update(
353354
rg_name, df_name, ds_name, ds_azure_blob)
354355
print_item(ds)
355356

356357
# Create an Azure blob dataset (output)
357358
dsOut_name = 'ds_out'
358359
output_blobpath = 'adfv2tutorial/output'
359-
dsOut_azure_blob = AzureBlobDataset(ds_ls, folder_path=output_blobpath)
360+
dsOut_azure_blob = AzureBlobDataset(linked_service_name=ds_ls, folder_path=output_blobpath)
360361
dsOut = adf_client.datasets.create_or_update(
361362
rg_name, df_name, dsOut_name, dsOut_azure_blob)
362363
print_item(dsOut)
@@ -379,19 +380,18 @@ def main():
379380
print_item(p)
380381

381382
# Create a pipeline run
382-
run_response = adf_client.pipelines.create_run(rg_name, df_name, p_name,
383-
{
384-
}
385-
)
383+
run_response = adf_client.pipelines.create_run(rg_name, df_name, p_name, parameters={})
386384

387385
# Monitor the pipeline run
388386
time.sleep(30)
389387
pipeline_run = adf_client.pipeline_runs.get(
390388
rg_name, df_name, run_response.run_id)
391389
print("\n\tPipeline run status: {}".format(pipeline_run.status))
392-
activity_runs_paged = list(adf_client.activity_runs.list_by_pipeline_run(
393-
rg_name, df_name, pipeline_run.run_id, datetime.now() - timedelta(1), datetime.now() + timedelta(1)))
394-
print_activity_run_details(activity_runs_paged[0])
390+
filter_params = RunFilterParameters(
391+
last_updated_after=datetime.now() - timedelta(1), last_updated_before=datetime.now() + timedelta(1))
392+
query_response = adf_client.activity_runs.query_by_pipeline_run(
393+
rg_name, df_name, pipeline_run.run_id, filter_params)
394+
print_activity_run_details(query_response.value[0])
395395

396396

397397
# Start the main method

0 commit comments

Comments
 (0)