@@ -81,11 +81,11 @@ class SharedDictLstm(GretelModel):
8181 }
8282
8383
84- def test_run_with_gretel_dataset (working_dir , project , evaluate_report_path , iris ):
84+ def test_run_with_gretel_dataset (working_dir , project , evaluate_report_handle , iris ):
8585 evaluate_model = Mock (
8686 status = Status .COMPLETED ,
8787 )
88- evaluate_model .get_artifact_link .return_value = evaluate_report_path
88+ evaluate_model .get_artifact_handle .return_value = evaluate_report_handle
8989 project .create_model_obj .side_effect = [evaluate_model ]
9090
9191 session = compare (
@@ -107,11 +107,11 @@ def test_run_with_gretel_dataset(working_dir, project, evaluate_report_path, iri
107107 assert result ["SQS" ] == 95
108108
109109
110- def test_run_with_custom_csv_dataset (working_dir , project , evaluate_report_path , df ):
110+ def test_run_with_custom_csv_dataset (working_dir , project , evaluate_report_handle , df ):
111111 evaluate_model = Mock (
112112 status = Status .COMPLETED ,
113113 )
114- evaluate_model .get_artifact_link .return_value = evaluate_report_path
114+ evaluate_model .get_artifact_handle .return_value = evaluate_report_handle
115115 project .create_model_obj .side_effect = [evaluate_model ]
116116
117117 with tempfile .NamedTemporaryFile () as f :
@@ -137,11 +137,11 @@ def test_run_with_custom_csv_dataset(working_dir, project, evaluate_report_path,
137137 assert result ["SQS" ] == 95
138138
139139
140- def test_run_with_custom_psv_dataset (working_dir , project , evaluate_report_path , df ):
140+ def test_run_with_custom_psv_dataset (working_dir , project , evaluate_report_handle , df ):
141141 evaluate_model = Mock (
142142 status = Status .COMPLETED ,
143143 )
144- evaluate_model .get_artifact_link .return_value = evaluate_report_path
144+ evaluate_model .get_artifact_handle .return_value = evaluate_report_handle
145145 project .create_model_obj .side_effect = [evaluate_model ]
146146
147147 with tempfile .NamedTemporaryFile () as f :
@@ -168,12 +168,12 @@ def test_run_with_custom_psv_dataset(working_dir, project, evaluate_report_path,
168168
169169
170170def test_run_with_custom_dataframe_dataset (
171- working_dir , project , evaluate_report_path , df
171+ working_dir , project , evaluate_report_handle , df
172172):
173173 evaluate_model = Mock (
174174 status = Status .COMPLETED ,
175175 )
176- evaluate_model .get_artifact_link .return_value = evaluate_report_path
176+ evaluate_model .get_artifact_handle .return_value = evaluate_report_handle
177177 project .create_model_obj .side_effect = [evaluate_model ]
178178
179179 dataset = create_dataset (df , datatype = "tabular" , name = "pets" )
@@ -205,7 +205,7 @@ def test_run_with_custom_dataframe_dataset(
205205
206206@pytest .mark .parametrize ("benchmark_model" , [GretelLSTM , TailoredActgan ])
207207def test_run_happy_path_gretel_sdk (
208- benchmark_model , working_dir , iris , project , evaluate_report_path
208+ benchmark_model , working_dir , iris , project , evaluate_report_handle
209209):
210210 record_handler = Mock (
211211 status = Status .COMPLETED ,
@@ -221,7 +221,7 @@ def test_run_happy_path_gretel_sdk(
221221 evaluate_model = Mock (
222222 status = Status .COMPLETED ,
223223 )
224- evaluate_model .get_artifact_link .return_value = evaluate_report_path
224+ evaluate_model .get_artifact_handle .return_value = evaluate_report_handle
225225
226226 project .create_model_obj .side_effect = [model , evaluate_model ]
227227
0 commit comments