Skip to content

Commit 5d180e5

Browse files
committed
Fix typo in function name
1 parent ec565ac commit 5d180e5

File tree

8 files changed

+44
-44
lines changed

8 files changed

+44
-44
lines changed

dbclient/ClustersClient.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -276,7 +276,7 @@ def import_cluster_configs(self, log_file='clusters.log', acl_log_file='acl_clus
276276
if 'cluster_id' in cluster_conf:
277277
checkpoint_cluster_configs_set.write(cluster_conf['cluster_id'])
278278
else:
279-
logging_utils.log_reponse_error(error_logger, cluster_resp)
279+
logging_utils.log_response_error(error_logger, cluster_resp)
280280
print(cluster_resp)
281281

282282
# TODO: May be put it into a separate step to make it more rerunnable.
@@ -299,7 +299,7 @@ def import_cluster_configs(self, log_file='clusters.log', acl_log_file='acl_clus
299299
raise ValueError(error_message)
300300
api = f'/preview/permissions/clusters/{cid}'
301301
resp = self.put(api, acl_args)
302-
if not logging_utils.log_reponse_error(error_logger, resp):
302+
if not logging_utils.log_response_error(error_logger, resp):
303303
if 'object_id' in data:
304304
checkpoint_cluster_configs_set.write(data['object_id'])
305305
print(resp)
@@ -397,7 +397,7 @@ def import_cluster_policies(self, log_file='cluster_policies.log', acl_log_file=
397397
'definition': policy_conf['definition']}
398398
resp = self.post('/policies/clusters/create', create_args)
399399
ignore_error_list = ['INVALID_PARAMETER_VALUE']
400-
if not logging_utils.log_reponse_error(error_logger, resp, ignore_error_list=ignore_error_list):
400+
if not logging_utils.log_response_error(error_logger, resp, ignore_error_list=ignore_error_list):
401401
if 'policy_id' in policy_conf:
402402
checkpoint_cluster_policies_set.write(policy_conf['policy_id'])
403403

@@ -412,7 +412,7 @@ def import_cluster_policies(self, log_file='cluster_policies.log', acl_log_file=
412412
policy_id = id_map[p_acl['name']]
413413
api = f'/permissions/cluster-policies/{policy_id}'
414414
resp = self.put(api, acl_create_args)
415-
if not logging_utils.log_reponse_error(error_logger, resp):
415+
if not logging_utils.log_response_error(error_logger, resp):
416416
if 'object_id' in p_acl:
417417
checkpoint_cluster_policies_set.write(p_acl['object_id'])
418418
else:
@@ -430,7 +430,7 @@ def import_instance_pools(self, log_file='instance_pools.log'):
430430
pool_conf = json.loads(line)
431431
pool_resp = self.post('/instance-pools/create', pool_conf)
432432
ignore_error_list = ['INVALID_PARAMETER_VALUE']
433-
logging_utils.log_reponse_error(error_logger, pool_resp, ignore_error_list=ignore_error_list)
433+
logging_utils.log_response_error(error_logger, pool_resp, ignore_error_list=ignore_error_list)
434434

435435
def import_instance_profiles(self, log_file='instance_profiles.log'):
436436
# currently an AWS only operation
@@ -453,7 +453,7 @@ def import_instance_profiles(self, log_file='instance_profiles.log'):
453453
if ip_arn not in list_of_profiles:
454454
print("Importing arn: {0}".format(ip_arn))
455455
resp = self.post('/instance-profiles/add', {'instance_profile_arn': ip_arn})
456-
if not logging_utils.log_reponse_error(error_logger, resp):
456+
if not logging_utils.log_response_error(error_logger, resp):
457457
import_profiles_count += 1
458458
else:
459459
logging.info("Skipping since profile already exists: {0}".format(ip_arn))

dbclient/HiveClient.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -377,7 +377,7 @@ def import_hive_metastore(self, cluster_name=None, metastore_dir='metastore/', v
377377
logging.info(all_db_details_json)
378378
raise ValueError('Missing Database Attributes Log. Re-run metastore export')
379379
create_db_resp = self.create_database_db(db_name, ec_id, cid, database_attributes)
380-
if logging_utils.log_reponse_error(error_logger, create_db_resp):
380+
if logging_utils.log_response_error(error_logger, create_db_resp):
381381
logging.error(f"Failed to create database {db_name} during metastore import. Exiting Import.")
382382
return
383383
db_path = database_attributes.get('Location')
@@ -394,7 +394,7 @@ def import_hive_metastore(self, cluster_name=None, metastore_dir='metastore/', v
394394
if not self.move_table_view(db_name, tbl_name, local_table_ddl):
395395
# we hit a table ddl here, so we apply the ddl
396396
resp = self.apply_table_ddl(local_table_ddl, ec_id, cid, db_path, has_unicode)
397-
if not logging_utils.log_reponse_error(error_logger, resp):
397+
if not logging_utils.log_response_error(error_logger, resp):
398398
checkpoint_metastore_set.write(full_table_name)
399399
else:
400400
logging.info(f'Moving view ddl to re-apply later: {db_name}.{tbl_name}')
@@ -414,7 +414,7 @@ def import_hive_metastore(self, cluster_name=None, metastore_dir='metastore/', v
414414
logging.info(f"Importing view {full_view_name}")
415415
local_view_ddl = metastore_view_dir + db_name + '/' + view_name
416416
resp = self.apply_table_ddl(local_view_ddl, ec_id, cid, db_path, has_unicode)
417-
if logging_utils.log_reponse_error(error_logger, resp):
417+
if logging_utils.log_response_error(error_logger, resp):
418418
checkpoint_metastore_set.write(full_view_name)
419419
logging.info(resp)
420420

@@ -428,7 +428,7 @@ def get_all_databases(self, error_logger, cid, ec_id):
428428
# DBR 7.0 changes databaseName to namespace for the return value of show databases
429429
all_dbs_cmd = 'all_dbs = [x.databaseName for x in spark.sql("show databases").collect()]; print(len(all_dbs))'
430430
results = self.submit_command(cid, ec_id, all_dbs_cmd)
431-
if logging_utils.log_reponse_error(error_logger, results):
431+
if logging_utils.log_response_error(error_logger, results):
432432
raise ValueError("Cannot identify number of databases due to the above error")
433433
num_of_dbs = ast.literal_eval(results['data'])
434434
batch_size = 100 # batch size to iterate over databases
@@ -510,12 +510,12 @@ def log_table_ddl(self, cid, ec_id, db_name, table_name, metastore_dir, error_lo
510510
if ddl_len > 2048 or has_unicode:
511511
# create the dbfs tmp path for exports / imports. no-op if exists
512512
resp = self.post('/dbfs/mkdirs', {'path': '/tmp/migration/'})
513-
if logging_utils.log_reponse_error(error_logger, resp):
513+
if logging_utils.log_response_error(error_logger, resp):
514514
return False
515515
# save the ddl to the tmp path on dbfs
516516
save_ddl_cmd = "with open('/dbfs/tmp/migration/tmp_export_ddl.txt', 'w') as fp: fp.write(ddl_str)"
517517
save_resp = self.submit_command(cid, ec_id, save_ddl_cmd)
518-
if logging_utils.log_reponse_error(error_logger, save_resp):
518+
if logging_utils.log_response_error(error_logger, save_resp):
519519
return False
520520
# read that data using the dbfs rest endpoint which can handle 2MB of text easily
521521
read_args = {'path': '/tmp/migration/tmp_export_ddl.txt'}

dbclient/JobsClient.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ def update_imported_job_names(self, error_logger, checkpoint_job_configs_set):
7373
update_args = {'job_id': job_id, 'new_settings': new_settings}
7474
logging.info(f'Updating job name: {update_args}')
7575
resp = self.post('/jobs/update', update_args)
76-
if not logging_utils.log_reponse_error(error_logger, resp):
76+
if not logging_utils.log_response_error(error_logger, resp):
7777
checkpoint_job_configs_set.write(job_name)
7878
else:
7979
raise RuntimeError("Import job has failed. Refer to the previous log messages to investigate.")
@@ -118,7 +118,7 @@ def log_job_configs(self, users_list=None, groups_list = None, log_file='jobs.lo
118118
x['settings'] = job_settings
119119
log_fp.write(json.dumps(x) + '\n')
120120
job_perms = self.get(f'/preview/permissions/jobs/{job_id}')
121-
if not logging_utils.log_reponse_error(error_logger, job_perms):
121+
if not logging_utils.log_response_error(error_logger, job_perms):
122122
job_perms['job_name'] = new_job_name
123123
acl_fp.write(json.dumps(job_perms) + '\n')
124124

@@ -188,7 +188,7 @@ def adjust_ids_for_cluster(settings): #job_settings or task_settings
188188
logging.info("Resetting job to use default cluster configs due to expired configurations.")
189189
job_settings['new_cluster'] = self.get_jobs_default_cluster_conf()
190190
create_resp_retry = self.post('/jobs/create', job_settings)
191-
if not logging_utils.log_reponse_error(error_logger, create_resp_retry):
191+
if not logging_utils.log_response_error(error_logger, create_resp_retry):
192192
if 'job_id' in job_conf:
193193
checkpoint_job_configs_set.write(job_conf["job_id"])
194194
else:
@@ -215,7 +215,7 @@ def adjust_ids_for_cluster(settings): #job_settings or task_settings
215215
acl_perms = self.build_acl_args(acl_conf['access_control_list'], True)
216216
acl_create_args = {'access_control_list': acl_perms}
217217
acl_resp = self.patch(api, acl_create_args)
218-
if not logging_utils.log_reponse_error(error_logger, acl_resp) and 'object_id' in acl_conf:
218+
if not logging_utils.log_response_error(error_logger, acl_resp) and 'object_id' in acl_conf:
219219
checkpoint_job_configs_set.write(acl_conf['object_id'])
220220
else:
221221
raise RuntimeError("Import job has failed. Refer to the previous log messages to investigate.")

dbclient/MLFlowClient.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ def _put_mlflow_experiment_acl(self, acl_str, experiment_id_map, checkpoint_key_
8282
new_experiment_id = experiment_id_map[experiment_id]
8383
acl_create_args = {'access_control_list': self.build_acl_args(acl_obj['access_control_list'], True)}
8484
resp = self.put(f'/permissions/experiments/{new_experiment_id}', acl_create_args)
85-
if not logging_utils.log_reponse_error(error_logger, resp):
85+
if not logging_utils.log_response_error(error_logger, resp):
8686
checkpoint_key_set.write(experiment_id)
8787

8888
def _get_mlflow_experiment_acls(self, acl_log_file_writer, experiment_str, checkpoint_key_set, error_logger):
@@ -97,7 +97,7 @@ def _get_mlflow_experiment_acls(self, acl_log_file_writer, experiment_str, check
9797
return
9898
logging.info(f"Exporting ACLs for experiment_id: {experiment_id}.")
9999
perms = self.get(f"/permissions/experiments/{experiment_id}", do_not_throw=True)
100-
if not logging_utils.log_reponse_error(error_logger, perms):
100+
if not logging_utils.log_response_error(error_logger, perms):
101101
acl_log_file_writer.write(json.dumps(perms) + '\n')
102102
checkpoint_key_set.write(experiment_id)
103103
logging.info(f"Successfully exported ACLs for experiment_id: {experiment_id}.")

dbclient/ScimClient.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -229,7 +229,7 @@ def assign_group_entitlements(self, group_dir, error_logger):
229229
g_id = group_ids[group_name]
230230
update_entitlements = self.assign_entitlements_args(entitlements)
231231
up_resp = self.patch(f'/preview/scim/v2/Groups/{g_id}', update_entitlements)
232-
logging_utils.log_reponse_error(error_logger, up_resp)
232+
logging_utils.log_response_error(error_logger, up_resp)
233233

234234
def assign_group_roles(self, group_dir, error_logger):
235235
# assign group role ACLs, which are only available via SCIM apis
@@ -246,13 +246,13 @@ def assign_group_roles(self, group_dir, error_logger):
246246
g_id = group_ids[group_name]
247247
update_roles = self.assign_roles_args(roles)
248248
up_resp = self.patch(f'/preview/scim/v2/Groups/{g_id}', update_roles)
249-
logging_utils.log_reponse_error(error_logger, up_resp)
249+
logging_utils.log_response_error(error_logger, up_resp)
250250
entitlements = group_data.get('entitlements', None)
251251
if entitlements:
252252
g_id = group_ids[group_name]
253253
update_entitlements = self.assign_entitlements_args(entitlements)
254254
up_resp = self.patch(f'/preview/scim/v2/Groups/{g_id}', update_entitlements)
255-
logging_utils.log_reponse_error(error_logger, up_resp)
255+
logging_utils.log_response_error(error_logger, up_resp)
256256

257257
def get_current_group_ids(self):
258258
# return a dict of group displayName and id mappings
@@ -303,7 +303,7 @@ def assign_user_entitlements(self, current_user_ids, error_logger, user_log_file
303303
if user_entitlements:
304304
entitlements_args = self.assign_entitlements_args(user_entitlements)
305305
update_resp = self.patch(f'/preview/scim/v2/Users/{user_id}', entitlements_args)
306-
logging_utils.log_reponse_error(error_logger, update_resp)
306+
logging_utils.log_response_error(error_logger, update_resp)
307307

308308
def assign_user_roles(self, current_user_ids, error_logger, user_log_file='users.log'):
309309
"""
@@ -351,7 +351,7 @@ def assign_user_roles(self, current_user_ids, error_logger, user_log_file='users
351351
# get the json to add the roles to the user profile
352352
patch_roles = self.add_roles_arg(roles_needed)
353353
update_resp = self.patch(f'/preview/scim/v2/Users/{user_id}', patch_roles)
354-
logging_utils.log_reponse_error(error_logger, update_resp)
354+
logging_utils.log_response_error(error_logger, update_resp)
355355

356356
@staticmethod
357357
def get_member_args(member_id_list):
@@ -409,7 +409,7 @@ def import_groups(self, group_dir, current_user_ids, error_logger):
409409
"displayName": x
410410
}
411411
group_resp = self.post('/preview/scim/v2/Groups', create_args)
412-
if not logging_utils.log_reponse_error(error_logger, group_resp):
412+
if not logging_utils.log_response_error(error_logger, group_resp):
413413
checkpoint_groups_set.write(x)
414414

415415
# dict of { group_name : group_id }
@@ -444,7 +444,7 @@ def import_groups(self, group_dir, current_user_ids, error_logger):
444444
add_members_json = self.get_member_args(member_id_list)
445445
group_id = current_group_ids[group_name]
446446
add_resp = self.patch('/preview/scim/v2/Groups/{0}'.format(group_id), add_members_json)
447-
logging_utils.log_reponse_error(error_logger, add_resp)
447+
logging_utils.log_response_error(error_logger, add_resp)
448448

449449
def import_users(self, user_log, error_logger, checkpoint_set, num_parallel):
450450
# first create the user identities with the required fields
@@ -468,7 +468,7 @@ def _import_users_helper(self, user_data, create_keys, checkpoint_set, error_log
468468
logging.info("Creating user: {0}".format(user_name))
469469
user_create = {k: user[k] for k in create_keys if k in user}
470470
create_resp = self.post('/preview/scim/v2/Users', user_create)
471-
if not logging_utils.log_reponse_error(error_logger, create_resp):
471+
if not logging_utils.log_response_error(error_logger, create_resp):
472472
checkpoint_set.write(user_name)
473473

474474

dbclient/SecretsClient.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -24,9 +24,9 @@ def get_secret_value(self, scope_name, secret_key, cid, ec_id, error_logger):
2424
results_set = self.submit_command(cid, ec_id, cmd_set_value)
2525
results_convert = self.submit_command(cid, ec_id, cmd_convert_b64)
2626
results_get = self.submit_command(cid, ec_id, cmd_get_b64)
27-
if logging_utils.log_reponse_error(error_logger, results_set) \
28-
or logging_utils.log_reponse_error(error_logger, results_convert) \
29-
or logging_utils.log_reponse_error(error_logger, results_get):
27+
if logging_utils.log_response_error(error_logger, results_set) \
28+
or logging_utils.log_response_error(error_logger, results_convert) \
29+
or logging_utils.log_response_error(error_logger, results_get):
3030
return None
3131
else:
3232
return results_get.get('data')
@@ -44,7 +44,7 @@ def log_all_secrets(self, cluster_name=None, log_dir='secret_scopes/'):
4444
for scope_json in scopes_list:
4545
scope_name = scope_json.get('name')
4646
secrets_list = self.get_secrets(scope_name)
47-
if logging_utils.log_reponse_error(error_logger, secrets_list):
47+
if logging_utils.log_response_error(error_logger, secrets_list):
4848
continue
4949
scopes_logfile = scopes_dir + scope_name
5050
try:
@@ -72,7 +72,7 @@ def log_all_secrets_acls(self, log_name='secret_scopes_acls.log'):
7272
for scope_json in scopes_list:
7373
scope_name = scope_json.get('name', None)
7474
resp = self.get('/secrets/acls/list', {'scope': scope_name})
75-
if logging_utils.log_reponse_error(error_logger, resp):
75+
if logging_utils.log_response_error(error_logger, resp):
7676
return
7777
else:
7878
resp['scope_name'] = scope_name
@@ -150,7 +150,7 @@ def import_all_secrets(self, log_dir='secret_scopes/'):
150150
create_scope_args['initial_manage_principal'] = 'users'
151151
other_permissions = self.get_all_other_permissions(scope_name, scopes_acl_dict)
152152
create_resp = self.post('/secrets/scopes/create', create_scope_args)
153-
logging_utils.log_reponse_error(
153+
logging_utils.log_response_error(
154154
error_logger, create_resp, ignore_error_list=['RESOURCE_ALREADY_EXISTS'])
155155
if other_permissions:
156156
# use this dict minus the `users:MANAGE` permissions and apply the other permissions to the scope
@@ -161,7 +161,7 @@ def import_all_secrets(self, log_dir='secret_scopes/'):
161161
put_acl_args["principal"] = x
162162
logging.info(put_acl_args)
163163
put_resp = self.post('/secrets/acls/put', put_acl_args)
164-
logging_utils.log_reponse_error(error_logger, put_resp)
164+
logging_utils.log_response_error(error_logger, put_resp)
165165
# loop through the scope and create the k/v pairs
166166
with open(file_path, 'r') as fp:
167167
for s in fp:
@@ -176,7 +176,7 @@ def import_all_secrets(self, log_dir='secret_scopes/'):
176176
'key': k,
177177
'string_value': base64.b64decode(v.encode('ascii')).decode('ascii')}
178178
put_resp = self.post('/secrets/put', put_secret_args)
179-
logging_utils.log_reponse_error(error_logger, put_resp)
179+
logging_utils.log_response_error(error_logger, put_resp)
180180
except Exception as error:
181181
if "Invalid base64-encoded string" in str(error) or 'decode' in str(error) or "padding" in str(error):
182182
error_msg = f"secret_scope: {scope_name} has invalid invalid data characters: {str(error)} skipping.. and logging to error file."

0 commit comments

Comments
 (0)