diff --git a/schema/check_generated_data.py b/schema/check_generated_data.py index df433de8..c33f5173 100644 --- a/schema/check_generated_data.py +++ b/schema/check_generated_data.py @@ -15,20 +15,20 @@ import schema_validator from schema_files import ALL_TEST_TYPES +logger = logging.Logger("Checking Test Data vs. Schemas LOGGER") +logger.setLevel(logging.WARNING) + def main(args): logging.config.fileConfig("../logging.conf") if len(args) <= 1: - logging.error('Please specify the path to test data directory') + logger.error('Please specify the path to test data directory') return else: test_data_path = args[1] - logging.debug('TEST DATA PATH = %s', test_data_path) - logger = logging.Logger("Checking Test Data vs. Schemas LOGGER") - logger.setLevel(logging.INFO) logger.info('+++ Test Generated test data vs. schemas files') # TODO: get ICU versions @@ -39,8 +39,8 @@ def main(args): for dir_name in icu_dirs: icu_versions.append(os.path.basename(dir_name)) - logging.debug('ICU directories = %s', icu_versions) - logging.debug('test types = %s', ALL_TEST_TYPES) + logger.debug('ICU directories = %s', icu_versions) + logger.debug('test types = %s', ALL_TEST_TYPES) validator = schema_validator.ConformanceSchemaValidator() @@ -52,7 +52,7 @@ def main(args): validator.debug = 1 all_results = validator.validate_test_data_with_schema() - logging.info(' %d results for generated test data', len(all_results)) + logger.debug(' %d results for generated test data', len(all_results)) schema_errors = [] failed_validations = [] @@ -78,7 +78,7 @@ def main(args): try: summary_data = json.dumps(summary_json) except BaseException as error: - logging.error('json.dumps Summary data problem: %s at %s', error, error) + logger.error('json.dumps Summary data problem: %s at %s', error, error) sys.exit(1) output_filename = os.path.join(test_data_path, 'test_data_validation_summary.json') @@ -88,19 +88,18 @@ def main(args): file_out.close() except BaseException as error: schema_errors.append(output_filename) - logging.fatal('Error: %s. Cannot save validation summary in file %s', error, output_filename) + logger.fatal('Error: %s. Cannot save validation summary in file %s', error, output_filename) sys.exit(1) if schema_errors: - logging.critical('Test data file files: %d fail out of %d:', + logger.critical('Test data file files: %d fail out of %d:', len(schema_errors), schema_count) for failure in schema_errors: - logging.critical(' %s', failure) + logger.critical(' %s', failure) sys.exit(1) else: logging.info("All %d generated test data files match with schema", schema_count) - if __name__ == "__main__": main(sys.argv) diff --git a/schema/check_schemas.py b/schema/check_schemas.py index c16865cc..f851a55a 100644 --- a/schema/check_schemas.py +++ b/schema/check_schemas.py @@ -76,7 +76,7 @@ def parallel_validate_schema(validator, file_names): def main(args): logger = logging.Logger("TEST SCHEMAS LOGGER") - logger.setLevel(logging.INFO) + logger.setLevel(logging.WARNING) logger.info('+++ Test JSON Schema files') validator = schema_validator.ConformanceSchemaValidator() diff --git a/schema/check_test_output.py b/schema/check_test_output.py index 5d45d676..6fa1c4ab 100644 --- a/schema/check_test_output.py +++ b/schema/check_test_output.py @@ -24,10 +24,12 @@ def main(args): else: test_output_path = args[1] - logging.debug('TEST OUTPUT PATH = %s', test_output_path) logger = logging.Logger("Checking Test Data vs. Schemas LOGGER") - logger.setLevel(logging.INFO) + logger.setLevel(logging.WARNING) + + logger.debug('TEST OUTPUT PATH = %s', test_output_path) + logger.info('+++ Test Generated test data vs. schemas files') # TODO: get ICU versions @@ -54,14 +56,14 @@ def main(args): test_type = schema_files.TEST_FILE_TO_TEST_TYPE_MAP[test_file_prefix] test_type_set.add(test_type) except BaseException as err: - logging.debug('No file (%s) during schema check output: %s', file, err + logger.debug('No file (%s) during schema check output: %s', file, err ) for dir_nane in icu_dirs: icu_version_set.add(os.path.basename(dir_nane)) icu_versions = sorted(list(icu_version_set)) - logging.debug('ICU directories = %s', icu_versions) - logging.debug('test types = %s', ALL_TEST_TYPES) + logger.debug('ICU directories = %s', icu_versions) + logger.debug('test types = %s', ALL_TEST_TYPES) validator = schema_validator.ConformanceSchemaValidator() # Todo: use setters to initialize validator @@ -74,7 +76,7 @@ def main(args): validator.debug = 1 all_results, test_validation_plans = validator.validate_test_output_with_schema() - logging.info(' %d results for test output', len(all_results)) + logger.info(' %d results for test output', len(all_results)) # Check if any files in the expected list were not validated. test_paths = set() @@ -83,7 +85,7 @@ def main(args): for json_file in json_files: if json_file not in test_paths: - logging.fatal('JSON file %s was not verified against a schema', json_file) + logger.fatal('JSON file %s was not verified against a schema', json_file) # Bail out right away! sys.exit(1) @@ -109,7 +111,6 @@ def main(args): } except BaseException as error: logging.fatal('Cannot create summary_json %s', error) - sys.exit(1) # Create outputs from these results. try: @@ -124,11 +125,11 @@ def main(args): file_out.write(summary_data) file_out.close() except BaseException as error: - logging.fatal('Error: %s. Cannot save validation summary in file %s', error, output_filename) + logger.fatal('Error: %s. Cannot save validation summary in file %s', error, output_filename) # Don't continue after this problem. sys.exit(1) - logging.info("All %d test output files match with schema", schema_count) + logger.info("All %d test output files match with schema", schema_count) return diff --git a/schema/schema_validator.py b/schema/schema_validator.py index 7c0e1fa6..ae2592ca 100644 --- a/schema/schema_validator.py +++ b/schema/schema_validator.py @@ -19,7 +19,7 @@ # ?? Move to the initialization ch = logging.StreamHandler() -ch.setLevel(logging.INFO) +ch.setLevel(logging.WARNING) # Given a directory, validate JSON files against expected schema @@ -45,7 +45,6 @@ def __init__(self): self.test_types = schema_files.ALL_TEST_TYPES self.executors = [] self.icu_versions = [] - self.debug_leve = 0 logging.config.fileConfig("../logging.conf") @@ -145,7 +144,6 @@ def validate_test_data_with_schema(self): schema_test_info.append(file_path_pair) else: test_data_files_not_found.append([icu_version, test_type]) - logging.debug('No data test file %s for %s, %s', file_path_pair, test_type, icu_version) pass if test_data_files_not_found: @@ -161,7 +159,8 @@ def validate_test_data_with_schema(self): logging.warning('FAIL: Test data %s, %s. MSG=%s', result_data['test_type'], result_data['icu_version'], result_data['err_info']) else: - logging.debug('Test data validated: %s %s', result_data['test_type'], result_data['icu_version']) + pass + all_results.append(result_data) return all_results @@ -217,7 +216,7 @@ def check_test_data_against_schema(self, schema_info): def check_test_data_schema(self, icu_version, test_type): # Check the generated test data for structure against the schema - logging.debug('Validating %s with %s', test_type, icu_version) + logging.info('Validating %s with %s', test_type, icu_version) # Check test output vs. the test data schema schema_verify_file = os.path.join(self.schema_base, test_type, 'test_schema.json') @@ -247,7 +246,7 @@ def check_test_data_schema(self, icu_version, test_type): results['result'] = result if result: - logging.debug('Test data %s validated successfully, with ICU %s', test_type, icu_version) + logging.info('Test data %s validated with ICU %s', test_type, icu_version) else: logging.error('Test data %s FAILED with ICU %s: %s', test_type, icu_version, err_info) @@ -278,7 +277,7 @@ def get_test_output_schema_plan(self, icu_version, test_type, executor): def check_test_output_schema(self, icu_version, test_type, executor): # Check the output of the tests for structure against the schema - logging.debug('Validating test output: %s %s %s', executor, test_type, icu_version) + logging.info('Validating test output: %s %s %s', executor, test_type, icu_version) # Check test output vs. the schema schema_file_name = SCHEMA_FILE_MAP[test_type]['result_data']['schema_file'] @@ -333,7 +332,6 @@ def validate_schema_file(self, schema_file_path): logging.fatal('%s for %s. Cannot get test_type value', err, schema_file_path, test_type) return [False, err, schema_file_path, test_type] - logging.info('Checking schema %s', schema_file_path) try: # With just a schema, it validates the schema. # However Validator.check_schema doesn't fail as expected. @@ -449,7 +447,7 @@ def main(args): base_folders, test_types, result_folders = process_args(args) logger = logging.Logger("TEST_GENERATE LOGGER") - logger.setLevel(logging.INFO) + logger.setLevel(logging.WARNING) logger.info('+++ Running JSON Schema tests') schema_validator = ConformanceSchemaValidator() @@ -461,20 +459,20 @@ def main(args): 'icu76'] schema_validator.executors = ['node', 'rust', 'dart_web', 'dart_native', 'icu4j'] - logging.info('Checking test outputs') + logger.info('Checking test outputs') all_test_out_results = schema_validator.validate_test_output_with_schema() # Check all schema files for correctness. schema_errors = schema_validator.check_schema_files() if schema_errors: - logging.error('INVALID SCHEMA: %s', schema_errors) + logger.error('INVALID SCHEMA: %s', schema_errors) else: - logging.info('All schemas are valid: %s', schema_errors) + logger.info('All schemas are valid: %s', schema_errors) - logging.info('Checking generated data') + logger.info('Checking generated data') all_test_data_results = schema_validator.validate_test_data_with_schema() - logging.info('Checking test outputs') + logger.info('Checking test outputs') all_test_out_results = schema_validator.validate_test_output_with_schema() return diff --git a/testdriver/datasets.py b/testdriver/datasets.py index 493e3483..c527b0d8 100644 --- a/testdriver/datasets.py +++ b/testdriver/datasets.py @@ -502,6 +502,7 @@ def printCldrIcuMap(): except KeyError: logging.debug(' %s not in map', name) + def main(args): logging.config.fileConfig("../logging.conf") diff --git a/testdriver/ddtargs.py b/testdriver/ddtargs.py index 0a91a5b0..81b908cf 100644 --- a/testdriver/ddtargs.py +++ b/testdriver/ddtargs.py @@ -53,6 +53,10 @@ def __init__(self, args): setCommonArgs(self.parser) + self.parser.add_argument('--log', + default='WARNING', + help='logging level') + self.parser.add_argument( '--start_test', default=0, help='number of tests to skip at start of the test data') @@ -92,6 +96,10 @@ def __init__(self, args): help='Verify all available report files', default=None) + self.parser.add_argument('--log', + default='WARNING', + help='logging level') + self.parser.add_argument( '--summary_only', action='store_true', help='Flag to create summaries from current reports', diff --git a/testdriver/testdriver.py b/testdriver/testdriver.py index 981a7717..e78a530d 100644 --- a/testdriver/testdriver.py +++ b/testdriver/testdriver.py @@ -18,11 +18,12 @@ from testplan import TestPlan +logger = logging.Logger("TEST DRIVER LOGGER") +logger.setLevel(logging.WARNING) -# TODO: Separate TestPlan into another module. class TestDriver: - def __init__(self): + def __init__(self, logger=None): # All test plans requested from command line arguments self.cldrVersion = None self.icuVersion = None @@ -46,20 +47,19 @@ def set_args(self, arg_options): for test_type in arg_options.test_type: if test_type not in ddt_data.testDatasets: - logging.warning('**** WARNING: test_type %s not in testDatasets', test_type) + logger.warning('**** WARNING: test_type %s not in testDatasets', test_type) else: # Create a test plan based on data and options test_data_info = ddt_data.testDatasets[test_type] - if self.debug: - logging.debug('$$$$$ test_type = %s test_data_info = %s', - test_type, test_data_info.testDataFilename) + logger.debug('$$$$$ test_type = %s test_data_info = %s', + test_type, test_data_info.testDataFilename) for executor in arg_options.exec: if not ddt_data.allExecutors.has(executor): # Run a non-specified executor. Compatibility of versions # between test data and the executor should be done the text executor # program itself. - logging.error('No executable command configured for executor platform: %s', executor) + logger.error('No executable command configured for executor platform: %s', executor) exec_command = {'path': executor} else: # Set details for execution from ExecutorInfo @@ -76,7 +76,7 @@ def set_args(self, arg_options): test_data = ddt_data.testDatasets[test_type] new_plan.set_test_data(test_data) except KeyError as err: - logging.warning('!!! %s: No test data filename for %s', err, test_type) + logger.warning('!!! %s: No test data filename for %s', err, test_type) if not new_plan.ignore: self.test_plans.append(new_plan) @@ -91,7 +91,7 @@ def parse_args(self, args): # Get all the arguments argparse = ddtargs.DdtArgs(args) - logging.debug('TestDriver OPTIONS: %s', argparse.getOptions()) + logger.debug('TestDriver OPTIONS: %s', argparse.getOptions()) # Now use the argparse.options to set the values in the driver self.set_args(argparse.getOptions()) @@ -104,7 +104,7 @@ def run_plans(self): plan.run_plan() def run_one(self, plan): - logging.debug("Parallel of %s %s %s" % (plan.test_lang, plan.test_type, plan.icu_version)) + logger.debug("Parallel of %s %s %s" % (plan.test_lang, plan.test_type, plan.icu_version)) plan.run_plan() def run_plans_parallel(self): @@ -114,7 +114,7 @@ def run_plans_parallel(self): num_processors = mp.cpu_count() plan_info = '%s, %s' % (self.test_plans[0].test_type, self.test_plans[0].exec_command) - logging.info('TestDriver: %s processors for %s plans. %s' % + logger.info('TestDriver: %s processors for %s plans. %s' % (num_processors, len(self.test_plans), plan_info)) processor_pool = mp.Pool(num_processors) @@ -122,28 +122,21 @@ def run_plans_parallel(self): p.map(self.run_one, self.test_plans) - # Run the test with command line arguments def main(args): - driver = TestDriver() - # print('ARGS = %s' % (args)) - driver.parse_args(args[1:]) - logger = logging.Logger("TEST DRIVER LOGGER") - logger.setLevel(logging.INFO) + logger.setLevel(logging.WARNING) + + driver = TestDriver(logger) + + logger.debug('ARGS = %s', args) + driver.parse_args(args[1:]) if driver.run_serial: driver.run_plans() else: driver.run_plans_parallel() - # if len(args)> 2: - # Set limit on number to run - # numberToRun = int(args[2]) - # driver.runLimit = numberToRun - - # driver.initExecutor() - if __name__ == "__main__": main(sys.argv) diff --git a/testdriver/testplan.py b/testdriver/testplan.py index 29909dca..c01a0e1c 100644 --- a/testdriver/testplan.py +++ b/testdriver/testplan.py @@ -50,10 +50,12 @@ def __init__(self, exec_data, test_type, args=None): self.platformVersion = '' # Records the executor version self.icu_version = None # Requested by the test driver. self.run_limit = None # Set to positive integer to activate - self.debug = 1 + self.debug = False self.iteration = 0 - self.progress_interval = 1000 + + # Set to an integer > 0 if details on test progress is needed + self.progress_interval = None self.verifier = None @@ -84,7 +86,6 @@ def run_plan(self): pass # Check for option to set version of executor - # TODO # If icu_version is "latest" or not set, get the highest numbered # version of the test data @@ -99,7 +100,7 @@ def run_plan(self): # TODO: Consider sorting with possible dotted versions, e.g., 73.1.3 newest_version = sorted(icu_test_dirs, reverse=True)[0] logging.warning('** Replacing proposed icu version of %s with version %s', - self.icu_version, newest_version) + self.icu_version, newest_version) self.icu_version = newest_version if self.test_lang == 'node' and 'node_version' in self.options: @@ -130,10 +131,12 @@ def run_plan(self): if self.options.run_limit: self.run_limit = int(self.options.run_limit) - logging.debug('!!! RUN LIMIT SET: %d', self.run_limit) + if self.debug: + logging.info('!!! RUN LIMIT SET: %d', self.run_limit) - logging.debug('Running plan %s on data %s', - self.exec_command, self.inputFilePath) + if self.debug: + logging.debug('Running plan %s on data %s', + self.exec_command, self.inputFilePath) if self.options.exec_mode == 'one_test': self.run_one_test_mode() @@ -153,7 +156,8 @@ def request_executor_info(self): self.jsonOutput["platform error"] = self.run_error_message return None else: - logging.debug('EXECUTOR INFO = %s', result) + if self.debug > 1: + logging.info('EXECUTOR INFO = %s', result) try: self.jsonOutput["platform"] = json.loads(result) @@ -198,7 +202,8 @@ def request_executor_termination(self, terminate_args=None): if not result: self.jsonOutput["platform error"] = self.run_error_message else: - logging.debug('TERMINATION INFO = %s', result) + if self.debug: + logging.debug('TERMINATION INFO = %s', result) self.jsonOutput["platform"] = json.loads(result) def generate_header(self): @@ -236,8 +241,9 @@ def complete_output_file(self, error_info): self.resultsFile.close() def run_one_test_mode(self): - logging.debug(' Running OneTestMode %s on data %s', - self.exec_command, self.inputFilePath) + if self.debug: + logging.info(' Running OneTestMode %s on data %s', + self.exec_command, self.inputFilePath) # Set up calls for version data --> results @@ -250,20 +256,22 @@ def run_one_test_mode(self): # The test data was not found. Skip this test. return None - logging.debug('@@@ %d tests found', len(tests)) + if self.debug: + logging.info('@@@ %d tests found', len(tests)) # Initialize JSON output headers --> results self.exec_list = self.exec_command.split() # TODO: get other things about the exec - logging.debug('EXEC info: exec_command %s, exec_list >%s<', - self.exec_command, - self.exec_list) + if self.debug: + logging.debug('EXEC info: exec_command %s, exec_list >%s<', + self.exec_command, + self.exec_list) # Start the JSON output # Set up calls for version data --> results if not self.request_executor_info(): - # TODO: Report problem with executor (somehow). + logging.error('Cannot get executor info'); return None # Use for directory of the output results @@ -278,11 +286,12 @@ def run_one_test_mode(self): # Create results file try: - logging.debug('++++++ Results file path = %s', self.outputFilePath) + if self.debug: + logging.info('++++++ Results file path = %s', self.outputFilePath) self.resultsFile = open(self.outputFilePath, encoding='utf-8', mode='w') except BaseException as error: logging.error('*** Cannot open results file at %s. Err = %s', - self.outputFilePath, error) + self.outputFilePath, error) self.resultsFile = open(self.outputFilePath, encoding='utf-8', mode='w') # Store information the test run @@ -303,6 +312,7 @@ def run_one_test_mode(self): per_execution = int(self.options.per_execution) except TypeError: per_execution = 1 + num_errors = self.run_all_single_tests(per_execution) env_dict = {} @@ -317,7 +327,7 @@ def run_one_test_mode(self): self.exec_env = env_dict except (AttributeError, KeyError): env_dict = None - # TODO: Use env_dict for environment information. + # TODO: Use env_dict for environment information. # Complete outputFile self.complete_output_file(num_errors) @@ -343,10 +353,10 @@ def run_all_single_tests(self, tests_per_execution=1): for test in self.tests: test.update({"test_type": self.test_type}) - if self.progress_interval and test_num % self.progress_interval == 0: + if self.debug and self.progress_interval and test_num % self.progress_interval == 0: formatted_num = '{:,}'.format(test_num) - logging.debug('Testing %s / %s. %s of %s', - self.exec_list[0], self.testScenario, formatted_num, formatted_count) + logging.info('Testing %s / %s. %s of %s', + self.exec_list[0], self.testScenario, formatted_num, formatted_count) # Accumulate tests_per_execution items into a single outline if lines_in_batch < tests_per_execution: @@ -367,7 +377,7 @@ def run_all_single_tests(self, tests_per_execution=1): test_lines = [] test_num += 1 - if self.run_limit and test_num > self.run_limit: + if self.run_limit and test_num > self.run_limit and self.debug: logging.debug('** Stopped after %d tests', (test_num - 1)) break @@ -386,7 +396,7 @@ def process_batch_of_tests(self, tests_to_send): return [] if self.debug > 2: - logging.debug('PROCESSING %d tests', len(tests_to_send)) + logging.info('PROCESSING %d tests', len(tests_to_send)) # Ask process to exit when finished. out_and_exit = '\n'.join(tests_to_send) + '\n#EXIT\n' @@ -401,7 +411,7 @@ def process_batch_of_tests(self, tests_to_send): if not result: num_errors += 1 logging.warning('!!!!!! process_batch_of_tests: "platform error": "%s"\n', - self.run_error_message) + self.run_error_message) return None if self.debug > 2: @@ -439,15 +449,14 @@ def process_batch_of_tests(self, tests_to_send): except BaseException as error: if self.debug > 1: logging.warning(' && Item %s. Error in= %s. Received (%d): >%s<', - index, error, len(item), item) - index += 1 + index, error, len(item), item) + index += 1 return batch_out def run_multitest_mode(self): - # TODO Implement this logging.info('!!! Running MultiTestMode %s on data %s', - self.exec_command, self.inputFilePath) + self.exec_command, self.inputFilePath) logging.warning(' ** UNIMPLEMENTED **') # Open the input file and get tests # Open results file @@ -479,7 +488,7 @@ def open_json_test_data(self): logging.error('CANNOT parse JSON from file %s: %s', self.inputFilePath, error) return None except FileNotFoundError as err: - logging.debug('*** Cannot open file %s. Err = %s', self.inputFilePath, err) + logging.error('*** Cannot open file %s. Err = %s', self.inputFilePath, err) return None try: diff --git a/testgen/generators/datetime_fmt.py b/testgen/generators/datetime_fmt.py index f39a9e46..44433398 100644 --- a/testgen/generators/datetime_fmt.py +++ b/testgen/generators/datetime_fmt.py @@ -156,15 +156,8 @@ def process_test_data(self): result = self.generate_datetime_data_from_cldr(dt_json_path, self.run_limit) return result - # OK, there's no CLDR-based JSON data available. - run_list = [ - ['source ~/.nvm/nvm.sh; nvm install 21.6.0; nvm use 21.6.0 --silent'], - ['node generators/datetime_gen.js'], - ['mv datetime_fmt*.json icu74'] - ] - if self.icu_version not in icu_nvm_versions: - logging.warning('Generating datetime data not configured for icu version %s', self.icu_version) + logging.debug('Generating datetime data not configured for icu version %s', self.icu_version) return False # Set up Node version and call the generator @@ -173,7 +166,6 @@ def process_test_data(self): generate_command = 'source ~/.nvm/nvm.sh; nvm install %s; nvm use %s --silent; npm ci; node generators/datetime_gen.js %s %s' % ( nvm_version, nvm_version, '-run_limit', self.run_limit) - logging.debug('Running this command: %s', generate_command) result = subprocess.run(generate_command, shell=True) # Move results to the right directory diff --git a/testgen/generators/list_fmt.py b/testgen/generators/list_fmt.py index 98c16ff6..83f870b3 100644 --- a/testgen/generators/list_fmt.py +++ b/testgen/generators/list_fmt.py @@ -27,12 +27,6 @@ def process_test_data(self): exec_list.append('-run_limit') exec_list.append(str(self.run_limit)) - run_list = [ - ['source ~/.nvm/nvm.sh; nvm install 21.6.0; nvm use 21.6.0 --silent'], - exec_list, - ['mv list_fmt*.json icu74'] - ] - if self.icu_version not in icu_nvm_versions: logging.warning('Generating list_fmt data not configured for icu version %s', self.icu_version) return False diff --git a/testgen/generators/number_fmt.py b/testgen/generators/number_fmt.py index 46505b5a..b7db22be 100644 --- a/testgen/generators/number_fmt.py +++ b/testgen/generators/number_fmt.py @@ -53,7 +53,7 @@ def process_test_data(self): json.dump(json_verify, num_fmt_verify_file, indent=1) num_fmt_verify_file.close() - logging.info( + logging.debug( "NumberFormat Test (%s): %s tests created", self.icu_version, count ) return @@ -148,7 +148,7 @@ def generateNumberFmtTestDataObjects(self, rawtestdata, count=0): all_tests_list.append(entry) # All the tests in JSON form count += 1 - logging.info( + logging.debug( " generateNumberFmtTestDataObjects gives %d tests", (count - original_count), ) @@ -265,7 +265,7 @@ def generateDcmlFmtTestDataObjects(self, rawtestdata, count=0): verify_list.append({"label": label, "verify": expected}) count += 1 - logging.info( + logging.debug( " generateDcmlFmtTestDataObjects gives %d tests", (count - original_count) ) return all_tests_list, verify_list, count diff --git a/testgen/generators/plurals.py b/testgen/generators/plurals.py index e5ffb57e..fd6e5d42 100644 --- a/testgen/generators/plurals.py +++ b/testgen/generators/plurals.py @@ -109,7 +109,7 @@ def process_xml_file(self, filename, num_type): try: tree = ET.parse(filename) except: - logging.warning('No plurals file found: %s', filename) + logging.debug('No plurals file found: %s', filename) return None, None root = tree.getroot() @@ -137,7 +137,7 @@ def process_xml_file(self, filename, num_type): for sample in samples: # Ignore "root" locale. if locale == 'root': - logging.info('Plural rules: root locale ignored for %s, %s, %s', + logging.debug('Plural rules: root locale ignored for %s, %s, %s', locale, num_type, sample) continue # Locales should not use '_' but rather '-' diff --git a/testgen/generators/relativedatetime_fmt.py b/testgen/generators/relativedatetime_fmt.py index d30d864b..214c3150 100644 --- a/testgen/generators/relativedatetime_fmt.py +++ b/testgen/generators/relativedatetime_fmt.py @@ -49,7 +49,6 @@ def process_test_data(self): generate_command = 'source ~/.nvm/nvm.sh; nvm install %s; nvm use %s --silent; %s' %\ (nvm_version, nvm_version, ' '.join(exec_list)) - logging.debug('Running this command: %s', generate_command) result = subprocess.run(generate_command, shell=True) # Move results to the right directory diff --git a/testgen/testdata_gen.py b/testgen/testdata_gen.py index d41bc237..9f276122 100644 --- a/testgen/testdata_gen.py +++ b/testgen/testdata_gen.py @@ -17,6 +17,9 @@ from generators.relativedatetime_fmt import RelativeDateTimeFmtGenerator from generators.segmenter import SegmenterGenerator +logger = logging.Logger("TEST_GENERATE LOGGER") +logger.setLevel(logging.WARNING) + def setupArgs(): parser = argparse.ArgumentParser(prog="testdata_gen") @@ -32,7 +35,7 @@ def setupArgs(): def generate_versioned_data_parallel(args): num_processors = mp.cpu_count() - logging.info( + logger.info( "Test data generation: %s processors for %s plans", num_processors, len(args.icu_versions), @@ -53,13 +56,13 @@ def generate_versioned_data(version_info): args = version_info["args"] icu_version = version_info["icu_version"] - logging.info( + logger.info( "Generating .json files for data driven testing. ICU_VERSION requested = %s", icu_version, ) if len(args.test_types) < len(TestType): - logging.info("(Only generating %s)", ", ".join(args.test_types)) + logger.info("(Only generating %s)", ", ".join(args.test_types)) if TestType.COLLATION in args.test_types: # This is slow @@ -83,7 +86,7 @@ def generate_versioned_data(version_info): # First try with the new source of data. If not found, then use the older # lang names generator. generator = LocaleNamesGenerator(icu_version, args.run_limit) - logging.info('lang generated from new LocaleNames data in %s', icu_version) + logger.info('lang generated from new LocaleNames data in %s', icu_version) if generator: generator.process_test_data() @@ -114,15 +117,12 @@ def generate_versioned_data(version_info): generator = SegmenterGenerator(icu_version, args.run_limit) generator.process_test_data() - logging.info("++++ Data generation for %s is complete.", icu_version) + logger.info("++++ Data generation for %s is complete.", icu_version) def main(): new_args = setupArgs() - logger = logging.Logger("TEST_GENERATE LOGGER") - logger.setLevel(logging.INFO) - # Generate version data in parallel if possible generate_versioned_data_parallel(new_args) diff --git a/verifier/testreport.py b/verifier/testreport.py index 0ab15f61..6cd345d5 100644 --- a/verifier/testreport.py +++ b/verifier/testreport.py @@ -111,6 +111,8 @@ def __init__(self, report_path, report_html_path): self.report_html_path = report_html_path self.number_tests = 0 + # Set by the caller + self.failing_tests = [] self.passing_tests = [] self.test_errors = [] @@ -1143,10 +1145,6 @@ def fill_templates(self, result_data, result_cases, result_table_template, summa html_map[summary_name] = '' -def take_second(elem): - return elem[1] - - class SummaryReport: # TODO: use a templating language for creating these reports def __init__(self, file_base): @@ -1165,9 +1163,6 @@ def __init__(self, file_base): self.templates = reportTemplate() - if self.debug > 1: - logging.info('SUMMARY REPORT base = %s', self.file_base) - self.summary_html_path = None self.header_item_template = Template( @@ -1193,8 +1188,7 @@ def get_json_files(self): raw_reports = glob.glob(json_raw_join) self.raw_reports = raw_reports self.raw_reports.sort() - if self.debug > 1: - logging.info('SUMMARY JSON RAW FILES = %s', self.raw_reports) + return self.raw_reports def setup_all_test_results(self): @@ -1373,9 +1367,6 @@ def create_summary_html(self): html_output = self.templates.summary_html_template.safe_substitute(html_map) - if self.debug > 1: - logging.debug('HTML OUTPUT =\n%s', html_output) - logging.debug('HTML OUTPUT FILEPATH =%s', self.summary_html_path) file.write(html_output) file.close() diff --git a/verifier/verifier.py b/verifier/verifier.py index 4c0c2b5c..d7c0e620 100644 --- a/verifier/verifier.py +++ b/verifier/verifier.py @@ -19,6 +19,8 @@ # Global constants VERIFIER_REPORT_NAME = 'verifier_test_report.json' +logger = logging.Logger("VERIFIER LOGGER") +logger.setLevel(logging.WARNING) class Verifier: @@ -60,9 +62,6 @@ def __init__(self): logging.config.fileConfig("../logging.conf") - logger = logging.Logger("VERIFIER LOGGER") - logger.setLevel(logging.WARNING) - def open_verify_files(self, vplan): # Get test data, verify data, and results for a case. try: @@ -71,13 +70,13 @@ def open_verify_files(self, vplan): vplan.result_time_stamp = datetime.datetime.fromtimestamp(file_time).strftime('%Y-%m-%d %H:%M') vplan.report.timestamp = vplan.result_time_stamp except BaseException as err: - logging.error(' *** CANNOT OPEN RESULTS FILE %s:\n %s', vplan.result_path, err) + logger.error(' *** CANNOT OPEN RESULTS FILE %s:\n %s', vplan.result_path, err) return None try: - vplan.verify_data_file = open(vplan.verify_path, encoding='utf-8', mode='r') + vplan.verify_data_file = open(vplan.verify_path, encoding='utf-8', mode='r') except BaseException as err: - logging.error(' **!!* %s: Cannot open verify file %s', err, vplan.verify_path) + logger.error(' **!!* %s: Cannot open verify file %s', err, vplan.verify_path) return None # Create report directory if needed @@ -86,23 +85,23 @@ def open_verify_files(self, vplan): if not os.path.isdir(report_dir): os.makedirs(report_dir, exist_ok=True) except BaseException as err: - logging.error(' !!! Cannot create directory %s for report file %s', - report_dir, vplan.report_path) - logging.error(' !!! Error = %s', err) + logger.error(' !!! Cannot create directory %s for report file %s', + report_dir, vplan.report_path) + logger.error(' !!! Error = %s', err) return None try: vplan.report_file = self.report_file = open(vplan.report_path, encoding='utf-8', mode='w') except BaseException as err: - logging.error('*** Cannot open file %s: Error = %s', vplan.report_path, err) + logger.error('*** Cannot open file %s: Error = %s', vplan.report_path, err) return None # Get the input file to explain test failures try: self.testdata_file = open(vplan.testdata_path, encoding='utf-8', mode='r') except BaseException as err: - logging.error('*** Cannot open testdata file %s: Error = %s', vplan.testdata_path, err) + logger.error('*** Cannot open testdata file %s: Error = %s', vplan.testdata_path, err) return None # Initialize values for this case. @@ -142,8 +141,8 @@ def setup_verify_plans(self): for test_type in test_list: if test_type not in ddt_data.testDatasets: - logging.warning('**** WARNING: test_type %s not in testDatasets', - test_type) + logger.warning('**** WARNING: test_type %s not in testDatasets', + test_type) raise ValueError('No test dataset found for test type >%s<' % self.test_type) else: @@ -219,16 +218,17 @@ def setup_verify_plans(self): if os.path.isfile(new_verify_plan.result_path): self.verify_plans.append(new_verify_plan) else: - logging.debug('** No results for %s, %s, %s', - executor, test_type, result_version) + # logger.debug('** No results for %s, %s, %s', + # executor, test_type, result_version) + pass # Verify plans in parallel def parallel_verify_data_results(self): if not self.options.run_serial: num_processors = mp.cpu_count() verify_plans = self.verify_plans - logging.info('JSON validation: %s processors for %s plans', - num_processors, len(verify_plans)) + logger.info('JSON validation: %s processors for %s plans', + num_processors, len(verify_plans)) processor_pool = mp.Pool(num_processors) with processor_pool as p: @@ -239,7 +239,7 @@ def parallel_verify_data_results(self): err, verify_plans) return result else: - logging.info('Running serially!') + logger.info('Running serially!') for vplan in self.verify_plans: self.verify_one_plan(vplan) @@ -250,9 +250,9 @@ def verify_one_plan(self, vplan): # Read data if not vplan.read_verify_files(): # Problems getting input data - logging.warning('Cannot get data %s or %s', - vplan.result_path, - vplan.verify_path) + logger.warning('Cannot get data %s or %s', + vplan.result_path, + vplan.verify_path) return None # This fills in values of the report @@ -268,14 +268,11 @@ def verify_one_plan(self, vplan): # Save the results if not vplan.report.save_report(): - logging.error('!!! Could not save report for (%s, %s)', - vplan.test_type, vplan.exec) + logger.error('!!! Could not save report for (%s, %s)', + vplan.test_type, vplan.exec) else: vplan.report.create_html_report() - logging.debug('\nTEST RESULTS in %s for %s. %d tests found', - vplan.exec, vplan.test_type, len(vplan.test_results)) - return result def analyze_failures(self): @@ -286,7 +283,7 @@ def analyze_failures(self): def setup_paths(self, executor, testfile, verify_file): base_dir = self.file_base if self.debug > 1: - logging.debug('&&& FILE BASE = %s', base_dir) + logger.debug('&&& FILE BASE = %s', base_dir) # Check on the path defined here test_output_dir = 'testOutput' self.resultPath = os.path.join( @@ -294,10 +291,9 @@ def setup_paths(self, executor, testfile, verify_file): self.verifyPath = os.path.join( base_dir, 'testData', verify_file) - if self.debug > 0: - logging.debug('RESULT PATH = %s', self.resultPath) - logging.debug('VERIFY PATH = %s', self.verifyPath) - logging.debug('TESTDATA PATH = %s', self.testdata_path) + logger.debug('RESULT PATH = %s', self.resultPath) + logger.debug('VERIFY PATH = %s', self.verifyPath) + logger.debug('TESTDATA PATH = %s', self.testdata_path) # Create HTML summary files def create_summary_reports(self): @@ -306,6 +302,8 @@ def create_summary_reports(self): # The following gets information from all the tests summary_report = SummaryReport(self.file_base) + summary_report.logger = logger + summary_report.setup_all_test_results() @@ -319,7 +317,7 @@ def create_summary_reports(self): # And make the output HTML results result = summary_report.create_summary_html() if not result: - logging.error('!!!!!! SUMMARY HTML fails') + logger.error('!!!!!! SUMMARY HTML fails') def schema_results(self): # Locate the files in schema, testData, and testOutput @@ -332,8 +330,8 @@ def schema_results(self): try: shutil.copyfile(schema_validation, validation_copy) except BaseException as error: - logging.warning('%s. Cannot copy schema validation file from %s to %s', - error,schema_validation, validation_copy) + logger.warning('%s. Cannot copy schema validation file from %s to %s', + error,schema_validation, validation_copy) generated_data_validation_name = 'test_data_validation_summary.json' generated_validation = os.path.join(self.file_base, 'testData', generated_data_validation_name) @@ -343,8 +341,8 @@ def schema_results(self): try: shutil.copyfile(generated_validation, validation_copy) except BaseException as error: - logging.warning('%s. Cannot copy schema validation file from %s to %s', - error, generated_data_validation_name, validation_copy) + logger.warning('%s. Cannot copy schema validation file from %s to %s', + error, generated_data_validation_name, validation_copy) test_output_validation_name = 'test_output_validation_summary.json' test_output_validation = os.path.join(self.file_base, 'testOutput', test_output_validation_name) @@ -354,8 +352,8 @@ def schema_results(self): try: shutil.copyfile(test_output_validation, validation_copy) except BaseException as error: - logging.warning('%s. Cannot copy schema validation file from %s to %s', - error, test_output_validation, validation_copy) + logger.warning('%s. Cannot copy schema validation file from %s to %s', + error, test_output_validation, validation_copy) return [schema_validation_name, generated_data_validation_name, test_output_validation_name] @@ -412,10 +410,10 @@ def display_names_exec(self, executor): executor, 'display_names.json', 'display_names_verify.json') def print_result(self): - logging.info('\n Test Report for %s', self.title) + logger.info('\n Test Report for %s', self.title) test_report = self.verifier.report report_data = test_report.create_report() - logging.info(' Report: %s', report_data) + logger.info(' Report: %s', report_data) def run_verifier_tests(): @@ -449,18 +447,17 @@ def main(args): if not verifier.options.summary_only: # Run the tests on the provided parameters. - logging.info('Verifier starting on %d verify cases', - len(verifier.verify_plans)) + logger.info('Verifier starting on %d verify cases', + len(verifier.verify_plans)) # Use multiprocessing on verification verifier.parallel_verify_data_results() - logging.info('Verifier completed %d data reports', - len(verifier.verify_plans)) + logger.info('Verifier completed %d data reports', + len(verifier.verify_plans)) - # TODO: Should this be optional? verifier.create_summary_reports() - logging.info('Verifier completed summary report') + logger.info('Verifier completed summary report') # Copy any other files such as JavaScript to the report area verifier.copy_js_files() diff --git a/verifier/verify_plan.py b/verifier/verify_plan.py index 408453ac..9622fedd 100644 --- a/verifier/verify_plan.py +++ b/verifier/verify_plan.py @@ -32,6 +32,8 @@ def __init__(self, self.exec = None self.test_type = None + self.debug = 0 + # Is this actually useful? self.verifier_obj = None @@ -163,7 +165,7 @@ def compare_test_to_expected(self): if not test: logging.debug('@@@@@ no test string: %s of %s', test, ltotal_results) - if index % 10000 == 0: + if self.debug > 0 and index % 10000 == 0: logging.debug(' progress = %d / %s', index, total_results) # The input to the test