diff --git a/gnssanalysis/gn_io/clk.py b/gnssanalysis/gn_io/clk.py index 9d3f789..e625a81 100644 --- a/gnssanalysis/gn_io/clk.py +++ b/gnssanalysis/gn_io/clk.py @@ -1,6 +1,7 @@ """RINEX CLK file parsing function""" import logging as _logging +from pathlib import Path import re as _re from io import BytesIO as _BytesIO from typing import Union as _Union @@ -15,8 +16,8 @@ _RE_LINE = _re.compile(rb"(AS[ ]G.+)") # GPS SV line (other GNSS may not have STD) -def read_clk(clk_path): - content = _gn_io.common.path2bytes(str(clk_path)) +def read_clk(clk_path_or_bytes: _Union[Path, str, bytes]) -> _pd.DataFrame: + content = _gn_io.common.path2bytes(clk_path_or_bytes) data_b = content.find(b"END OF HEADER") + 13 data_b += content[data_b : data_b + 20].find(b"\n") + 1 @@ -32,7 +33,7 @@ def read_clk(clk_path): clk_cols += [10] clk_names += ["STD"] - clk_df = _pd.read_csv( + clk_df = _pd.read_csv( # TODO consider updating to read_fwf() _BytesIO(data), sep="\\s+", # delim_whitespace is deprecated header=None, @@ -77,12 +78,12 @@ def get_sv_clocks(clk_df: _pd.DataFrame) -> _pd.Series: :raises IndexError: Raise error if the dataframe is not indexed correctly :return _pd.Series: Retrieved satellite clocks """ - if clk_df.index.names == ['A', 'J2000', 'CODE']: + if clk_df.index.names == ["A", "J2000", "CODE"]: # fastest method to grab a specific category!, same as clk_df.EST.loc['AS'] but >6 times faster AS_cat_code = clk_df.index.levels[0].categories.get_loc("AS") mask = clk_df.index.codes[0] == AS_cat_code return _pd.Series(data=clk_df.values[:, 0][mask], index=clk_df.index.droplevel(0)[mask]) - elif clk_df.index.names == ['J2000', 'PRN']: + elif clk_df.index.names == ["J2000", "PRN"]: return _pd.Series(data=clk_df[("EST", "CLK")].values, index=clk_df.index) else: raise IndexError("Incorrect index names of dataframe") diff --git a/gnssanalysis/gn_io/sp3.py b/gnssanalysis/gn_io/sp3.py index 2938e59..e1c9cab 100644 --- a/gnssanalysis/gn_io/sp3.py +++ b/gnssanalysis/gn_io/sp3.py @@ -407,17 +407,17 @@ def remove_offline_sats(sp3_df: _pd.DataFrame, df_friendly_name: str = "") -> _p offline_sats = sp3_df[mask_either].index.get_level_values(1).unique() # Using that list of offline / partially offline sats, remove all entries for those sats from the SP3 DataFrame: - sp3_df = sp3_df.drop(offline_sats, level=1, errors="ignore") + sp3_df_cleaned = sp3_df.drop(offline_sats, level=1, errors="ignore") if len(offline_sats) > 0: # Update the internal representation of the SP3 header to match the change - remove_svs_from_header(sp3_df, offline_sats.values) + remove_svs_from_header(sp3_df_cleaned, offline_sats.values) logger.info( f"Dropped offline / nodata sats from {df_friendly_name} SP3 DataFrame (including header): {offline_sats.values}" ) else: logger.info(f"No offline / nodata sats detected to be dropped from {df_friendly_name} SP3 DataFrame") - return sp3_df + return sp3_df_cleaned def filter_by_svs( diff --git a/gnssanalysis/gn_utils.py b/gnssanalysis/gn_utils.py index 8abb9b1..8e2dd4e 100644 --- a/gnssanalysis/gn_utils.py +++ b/gnssanalysis/gn_utils.py @@ -997,6 +997,28 @@ def __exit__(self, type, value, traceback): print(self.readout) +def stringify_warnings(captured_warnings: list[warnings.WarningMessage]) -> str: + """ + Convenience function to convert a list of warning messages to a string. + E.g. output: + Warning message #1: Some warning + Warning message #2: Some other warning + ... + + :param captured_warnings: list of warning message objects (e.g. from UnitTest's _AssertWarnsContext.warnings) + :type captured_warnings: list[warnings.WarningMessage] + :return: rendered string for multi-line log output + :rtype: str + """ + aggregate_message = "" + for i in range(len(captured_warnings)): + w = captured_warnings[i] + aggregate_message += f"Warning message #{i+1}: {str(w.message)}\n" + return aggregate_message + # Alternatively: + # return f"{''.join('MESSAGE -> ' + str(w.message) + NEWLINE for w in captured_warnings)}" + + def sha256(bytes_to_hash: bytes) -> str: """ Convenience wrapper to quickly call hashlib.sha256 and return a hex digest string @@ -1126,7 +1148,7 @@ def ensure_unique_objects(objects: list[object]) -> None: @staticmethod def create_baseline( # Was baseline_pickled_df_list_and_hash() - current_object_list: list[object], + current_object_list: list, # Any kind of object is ok # These are used to describe the calling class and function, and are inferred automatically. If needed they # can be explicitly set here: subdir: Optional[_pathlib.Path] = None, @@ -1203,7 +1225,8 @@ def create_baseline( # Was baseline_pickled_df_list_and_hash() @staticmethod def verify( # Was create_and_verify_pickled_df_list() - current_object_list: list[object], + current_object_list: list, # Can be any type of object (though diff output only supported for some types) + # TODO update to output notice rather than crashing, if type encountered we can't print a diff for. # parent_dir: _pathlib.Path = BASELINE_DATAFRAME_RECORDS_DIR_ROOT_RELATIVE, # Option to strictly enforce that a baseline must exist for anything this function is invoked to check: raise_for_missing_baseline: bool = False, diff --git a/tests/test_clk.py b/tests/test_clk.py index 5e1fb05..a77ab4d 100644 --- a/tests/test_clk.py +++ b/tests/test_clk.py @@ -1,7 +1,9 @@ -from pyfakefs.fake_filesystem_unittest import TestCase +from pandas import DataFrame +from unittest import TestCase import gnssanalysis.gn_io.clk as clk import gnssanalysis.gn_diffaux as gn_diffaux +from gnssanalysis.gn_utils import UnitTestBaseliner, stringify_warnings from test_datasets.clk_test_data import ( # first dataset is a truncated version of file IGS0OPSRAP_20240400000_01D_05M_CLK.CLK: @@ -12,18 +14,13 @@ class TestClk(TestCase): - def setUp(self): - self.setUpPyfakefs() - self.fs.reset() def test_clk_read(self): - self.fs.reset() - file_paths = ["/fake/dir/file0.clk", "/fake/dir/file1.clk"] - self.fs.create_file(file_paths[0], contents=input_data_igs) - self.fs.create_file(file_paths[1], contents=input_data_gfz) + clk_df_igs: DataFrame = clk.read_clk(clk_path_or_bytes=input_data_igs) + clk_df_gfz: DataFrame = clk.read_clk(clk_path_or_bytes=input_data_gfz) - clk_df_igs = clk.read_clk(clk_path=file_paths[0]) - clk_df_gfz = clk.read_clk(clk_path=file_paths[1]) + # To help detect changes / regressions, check the dataframe we constructed against the stored hash. + # If they differ, load stored DF from pickle and print the difference. self.assertEqual(len(clk_df_igs), 93, msg="Check that data generally read into df as expected") self.assertEqual(len(clk_df_gfz), 90, msg="Check that data generally read into df as expected") @@ -34,17 +31,24 @@ def test_clk_read(self): self.assertEqual(clk_df_igs["EST"].iloc[-1], -0.0006105557076344, msg="Check last datapoint is correct") self.assertEqual(clk_df_gfz["EST"].iloc[-1], -0.000610553573006, msg="Check last datapoint is correct") + # Baseline (manually) to disk + # UnitTestBaseliner.mode = "baseline" + # UnitTestBaseliner.record_baseline([clk_df_igs, clk_df_gfz]) + + # Verify against on disk baseline + self.assertTrue(UnitTestBaseliner.verify([clk_df_igs, clk_df_gfz]), "Hash verify should succeed") + def test_diff_clk(self): """ Note this also tests the now deprecated version, compare_clk() """ - self.fs.reset() # Reset pyfakefs to delete any files which may have persisted from a previous test - file_paths = ["/fake/dir/file0.clk", "/fake/dir/file1.clk"] - self.fs.create_file(file_paths[0], contents=input_data_igs) - self.fs.create_file(file_paths[1], contents=input_data_gfz) - clk_df_igs = clk.read_clk(clk_path=file_paths[0]) - clk_df_gfz = clk.read_clk(clk_path=file_paths[1]) + # List of dataframes created during this test, to compare against baselined results on disk (regression check). + dfs_to_verify: list[object] = [] + + # Don't include these in the baseline, as test_clk_read() already looks after that. + clk_df_igs = clk.read_clk(clk_path_or_bytes=input_data_igs) + clk_df_gfz = clk.read_clk(clk_path_or_bytes=input_data_gfz) # Deprecated version # Ensure depreciation warnings are raised, but don't print them. @@ -60,6 +64,7 @@ def test_diff_clk(self): result_epoch_G07 = gn_diffaux.compare_clk(clk_a=clk_df_igs, clk_b=clk_df_gfz, norm_types=["epoch", "G07"]) result_daily_G08 = gn_diffaux.compare_clk(clk_a=clk_df_igs, clk_b=clk_df_gfz, norm_types=["daily", "G08"]) result_G09_G11 = gn_diffaux.compare_clk(clk_a=clk_df_igs, clk_b=clk_df_gfz, norm_types=["G09", "G11"]) + captured_warnings = warning_assessor.warnings self.assertEqual( "compare_clk() is deprecated. Please use diff_clk() and note that the clk inputs are in opposite order", @@ -68,7 +73,10 @@ def test_diff_clk(self): self.assertEqual( len(captured_warnings), 9, - "Expected exactly 9 warnings. Check what other warnings are being raised!", + "Expected exactly 9 warnings. Check what other warnings are being raised! Full list below:\n" + + stringify_warnings(captured_warnings), + # Passing the converted warning strings to the assert may not be very efficient. Consider changing if + # it slows things down. ) # Test index is as expected @@ -79,12 +87,29 @@ def test_diff_clk(self): self.assertEqual(result_epoch_only["G04"].iloc[0], 2.7128617820053325e-12, msg="Check datapoint is correct") self.assertEqual(result_sv_only["G05"].iloc[0], 1.1623200004470119e-10, msg="Check datapoint is correct") self.assertEqual(result_G06["G06"].iloc[0], 0.0, msg="Check datapoint is correct") - self.assertEqual(result_daily_epoch_G04["G07"].iloc[0], 1.3071733365871419e-11, msg="Check datapoint is correct") + self.assertEqual( + result_daily_epoch_G04["G07"].iloc[0], 1.3071733365871419e-11, msg="Check datapoint is correct" + ) self.assertEqual(result_epoch_G07["G08"].iloc[0], -3.3217389966032004e-11, msg="Check datapoint is correct") self.assertEqual(result_daily_G08["G09"].iloc[-1], 1.3818666534399365e-12, msg="Check datapoint is correct") self.assertEqual(result_G09_G11["G11"].iloc[-1], 0.0, msg="Check datapoint is correct") self.assertEqual(result_G09_G11["G01"].iloc[-1], 8.94520000606358e-11, msg="Check datapoint is correct") + # Add all these output DFs to the list to be compared against the baseline on disk + dfs_to_verify.extend( + [ + result_default, + result_daily_only, + result_epoch_only, + result_sv_only, + result_G06, + result_daily_epoch_G04, + result_epoch_G07, + result_daily_G08, + result_G09_G11, + ] + ) + # New version (clk order flipped) result_default = gn_diffaux.diff_clk(clk_baseline=clk_df_gfz, clk_test=clk_df_igs) result_daily_only = gn_diffaux.diff_clk(clk_baseline=clk_df_gfz, clk_test=clk_df_igs, norm_types=["daily"]) @@ -117,3 +142,40 @@ def test_diff_clk(self): self.assertEqual(result_daily_G08["G09"].iloc[-1], 1.3818666534399365e-12, msg="Check datapoint is correct") self.assertEqual(result_G09_G11["G11"].iloc[-1], 0.0, msg="Check datapoint is correct") self.assertEqual(result_G09_G11["G01"].iloc[-1], 8.94520000606358e-11, msg="Check datapoint is correct") + + dfs_to_verify.extend( + [ + result_default, + result_daily_only, + result_epoch_only, + result_sv_only, + result_G06, + result_daily_epoch_G04, + result_epoch_G07, + result_daily_G08, + result_G09_G11, + ] + ) + + # Baseline establishment (manual use only). DO NOT commit this enabled: + # UnitTestBaseliner.mode = "baseline" + # UnitTestBaseliner.record_baseline(dfs_to_verify) + + # Verify all dataframes against recorded baseline on disk + self.assertTrue( + UnitTestBaseliner.verify(dfs_to_verify), "Validation should succeed (unless in baselining mode)" + ) + + +# if __name__ == "__main__": +# # For debugger use + +# logging.basicConfig(format="%(levelname)s: %(message)s") +# logger = logging.getLogger() +# logger.setLevel(logging.DEBUG) + +# os.chdir("./tests") + +# test_clk = TestClk() +# test_clk.test_diff_clk() +# test_clk.test_clk_read() diff --git a/tests/test_igslog.py b/tests/test_igslog.py index 5b8e5da..ddb3819 100644 --- a/tests/test_igslog.py +++ b/tests/test_igslog.py @@ -2,6 +2,7 @@ from pyfakefs.fake_filesystem_unittest import TestCase from gnssanalysis.gn_io import igslog +from gnssanalysis.gn_utils import UnitTestBaseliner from test_datasets.sitelog_test_data import ( abmf_site_log_v1 as v1_data, abmf_site_log_v2 as v2_data, @@ -9,7 +10,7 @@ ) -class TestRegex(unittest.TestCase): +class TestIgsLogRegex(unittest.TestCase): """ Test the various regex expressions used in the parsing of IGS log files """ @@ -20,13 +21,15 @@ def test_determine_log_version(self): self.assertEqual(igslog.determine_log_version(v2_data), "v2.0") # Check that LogVersionError is raised on wrong data - self.assertRaises(igslog.LogVersionError, igslog.determine_log_version, b"Wrong data") + with self.assertRaises(igslog.LogVersionError): + igslog.determine_log_version(b"Wrong data") def test_extract_id_block(self): # Ensure the extract of ID information works and gives correct dome number: self.assertEqual(igslog.extract_id_block(v1_data, "/example/path", "ABMF", "v1.0"), ["ABMF", "97103M001"]) self.assertEqual(igslog.extract_id_block(v2_data, "/example/path", "ABMF", "v2.0"), ["ABMF", "97103M001"]) - # Check automatic version determination works as expected: + # Check that automatic version determination is used when a version is not provided. This + # leverages determine_log_version() which is already tested above: self.assertEqual(igslog.extract_id_block(v1_data, "/example/path", "ABMF"), ["ABMF", "97103M001"]) # Check LogVersionError is raised on no data: @@ -42,6 +45,7 @@ def test_extract_id_block(self): def test_extract_location_block(self): # Version 1 Location description results: v1_location_block = igslog.extract_location_block(v1_data, "/example/path", "v1.0") + # NOTE: this test cannot currently support baselining. This will be addressed in NPI-4492 self.assertEqual(v1_location_block.group(1), b"Les Abymes") self.assertEqual(v1_location_block.group(2), b"Guadeloupe") @@ -86,6 +90,16 @@ def test_extract_receiver_block(self): # Last receiver should not have an end date assigned (i.e. current): self.assertEqual(v2_receiver_block[-1][-1], b"") + objs_to_verify: list[object] = [v1_receiver_block, v2_receiver_block] + + # Baseline (manually) + # UnitTestBaseliner.mode = "baseline" + # UnitTestBaseliner.create_baseline(objs_to_verify) + + # Verify + self.assertTrue(UnitTestBaseliner.verify(objs_to_verify), "Hash verification should pass") + # TODO update verify() to support required datatypes, so it does not crash if hash changes + def test_extract_antenna_block(self): # Testing version 1: v1_antenna_block = igslog.extract_antenna_block(v1_data, "/example/path") @@ -101,8 +115,18 @@ def test_extract_antenna_block(self): # Last antenna should not have an end date assigned (i.e. current): self.assertEqual(v2_antenna_block[-1][-1], b"") + objs_to_verify: list[object] = [v1_antenna_block, v2_antenna_block] -class TestDataParsing(unittest.TestCase): + # Baseline (manually) + # UnitTestBaseliner.mode = "baseline" + # UnitTestBaseliner.create_baseline(objs_to_verify) + + # Verify + self.assertTrue(UnitTestBaseliner.verify(objs_to_verify), "Hash verification should pass") + # TODO update verify() to support required datatypes, so it does not crash if hash changes + + +class TestIgsLogDataParsing(unittest.TestCase): """ Test the integrated functions that gather and parse information from IGS log files """ @@ -122,8 +146,18 @@ def test_parse_igs_log_data(self): # Check last antenna type: self.assertEqual(v2_data_parsed[-1][2], "TRM57971.00") + objs_to_verify: list[object] = [v1_data_parsed, v2_data_parsed] + + # Baseline (manually) + # UnitTestBaseliner.mode = "baseline" + # UnitTestBaseliner.create_baseline(objs_to_verify) -class TestFileParsing(TestCase): + # Verify + self.assertTrue(UnitTestBaseliner.verify(objs_to_verify), "Hash verification should pass") + # TODO update verify() to support required datatypes, so it does not crash if hash changes + + +class TestIgsLogFileParsing(TestCase): """ Test gather_metadata() """ @@ -158,3 +192,25 @@ def test_gather_metadata(self): self.assertEqual(record_3.CODE, "AGGO") # Antenna info: test for antenna serial number self.assertEqual(result[2]["S/N"][4], "726722") + + # As the gather_metadata() function we are testing here, reads from a filesystem and outputs a DataFrame, + # running it without pyfakefs isn't practical. So we temporarily suspend patching in order to run baselining. + # See docs here: + # https://pytest-pyfakefs.readthedocs.io/en/latest/convenience.html#suspending-patching + + # Pause fake filesystem patching to allow access to baseline files. + self.fs.pause() + + # Create a generic (object rather than DF) list, and copy elements across + dfs_to_verify: list[object] = [] + dfs_to_verify.extend(result) + + # Baseline (manually) + # UnitTestBaseliner.mode = "baseline" + # UnitTestBaseliner.create_baseline(dfs_to_verify) + + # Verify + self.assertTrue(UnitTestBaseliner.verify(dfs_to_verify), "Hash verification should pass") + + # Ensure pyfakefs is re-enabled before further tests run + self.fs.resume() diff --git a/tests/test_sp3.py b/tests/test_sp3.py index 15cd553..2e4e6be 100644 --- a/tests/test_sp3.py +++ b/tests/test_sp3.py @@ -4,11 +4,19 @@ import numpy as np import pandas as pd +from pandas import DataFrame from gnssanalysis.filenames import convert_nominal_span, determine_properties_from_filename import gnssanalysis.gn_io.sp3 as sp3 -from gnssanalysis.gn_utils import STRICT_OFF, STRICT_RAISE, STRICT_WARN, trim_line_ends +from gnssanalysis.gn_utils import ( + STRICT_OFF, + STRICT_RAISE, + STRICT_WARN, + UnitTestBaseliner, + stringify_warnings, + trim_line_ends, +) from test_datasets.sp3_test_data import ( fake_header_version_a, fake_header_version_b, @@ -98,12 +106,13 @@ def test_check_sp3_version(self): self.assertEqual( len(captured_warnings), 2, - "Expected only 2 warnings. Check what other warnings are being raised!", + "Expected only 2 warnings. Check what other warnings are being raised! Full list below:\n" + + stringify_warnings(captured_warnings), ) # Our best supported version should return True self.assertEqual( - sp3.check_sp3_version(fake_header_version_d), True, "SP3 version d should be considered best supported" + sp3.check_sp3_version(fake_header_version_d), True, "SP3 version d should be considered supported" ) # StrictModes.STRICT_RAISE should cause a *possibly* supported version to raise an exception. @@ -114,6 +123,11 @@ def test_read_sp3_pOnly(self): result = sp3.read_sp3(input_data, pOnly=True, strict_mode=STRICT_OFF) self.assertEqual(len(result), 6) + # UnitTestBaseliner.mode = "baseline" + # UnitTestBaseliner.create_baseline([result]) # DO NOT commit this line un-commented. + + self.assertTrue(UnitTestBaseliner.verify([result]), "Hash verification should pass") + def test_read_sp3_pv(self): result = sp3.read_sp3(input_data, pOnly=False, strict_mode=STRICT_OFF) self.assertEqual(len(result), 6) @@ -122,11 +136,19 @@ def test_read_sp3_pv(self): self.assertEqual(result.attrs["HEADER"]["HEAD"]["DATETIME"], "2007 4 12 0 0 0.00000000") self.assertEqual(result.index[0][0], 229608000) # Same date, as J2000 + # UnitTestBaseliner.mode = "baseline" + # UnitTestBaseliner.create_baseline([result]) # DO NOT commit this line un-commented. + + self.assertTrue(UnitTestBaseliner.verify([result]), "Hash verification should pass") + def test_read_sp3_pv_with_ev_ep_rows(self): # Expect exception relating to the EV and EP rows (in RAISE mode), as we can't currently handle them properly. with self.assertRaises(NotImplementedError) as raised_exception: sp3.read_sp3(sp3c_example2_data, pOnly=False, strict_mode=STRICT_RAISE, skip_version_check=True) + # Assert that raised exception says what we expect it to + self.assertEqual(raised_exception.exception, "EP and EV flag rows are currently not supported") + def test_read_sp3_header_svs_basic(self): """ Minimal test of reading SVs from header @@ -136,6 +158,12 @@ def test_read_sp3_header_svs_basic(self): self.assertEqual(result.attrs["HEADER"]["SV_INFO"].index[1], "G02", "Second SV should be G02") self.assertEqual(result.attrs["HEADER"]["SV_INFO"].iloc[1], 8, "Second ACC should be 8") + # Somewhat redundant as it tests the same use of the read function as an already basedlined test above + # UnitTestBaseliner.mode = "baseline" + # UnitTestBaseliner.create_baseline([result]) # DO NOT commit this line un-commented. + + self.assertTrue(UnitTestBaseliner.verify([result]), "Hash verification should pass") + def test_read_sp3_header_svs_detailed(self): """ Test header parser's ability to read SVs and their accuracy codes correctly. Uses separate, artificial @@ -182,6 +210,12 @@ def test_read_sp3_header_svs_detailed(self): end_line2_acc = sv_info.iloc[29] self.assertEqual(end_line2_acc, 18, msg="Last ACC on test line 2 (pos 30) should be 18") + # TODO add support for pandas Series + # UnitTestBaseliner.mode = "baseline" + # UnitTestBaseliner.create_baseline([result]) # DO NOT commit this line un-commented. + + self.assertTrue(UnitTestBaseliner.verify([result]), "Hash verification should pass") + def test_read_sp3_validation_sv_count_mismatch_header_vs_content(self): with self.assertRaises(ValueError) as context_manager: sp3.read_sp3( @@ -206,6 +240,12 @@ def test_read_sp3_correct_svs_read_when_ev_ep_present(self): parsed_svs_content = sp3.get_unique_svs(result).astype(str).values self.assertEqual(set(parsed_svs_content), set(["G01", "G02", "G03", "G04", "G05"])) + # TODO add support for pandas Index + # UnitTestBaseliner.mode = "baseline" + # UnitTestBaseliner.create_baseline([result, parsed_svs_content]) # DO NOT commit this line un-commented. + + self.assertTrue(UnitTestBaseliner.verify([result, parsed_svs_content]), "Hash verification should pass") + # TODO Add test(s) for correctly reading header fundamentals (ACC, ORB_TYPE, etc.) # TODO add tests for correctly reading the actual content of the SP3 in addition to the header. @@ -214,26 +254,28 @@ def test_read_sp3_overlong_lines(self): Test overlong content line check """ - test_content_no_overlong: bytes = b"""#dV2007 4 12 0 0 0.00000000 2 ORBIT IGS14 BHN ESOC + test_content_overlong: bytes = b"""#dV2007 4 12 0 0 0.00000000 2 ORBIT IGS14 BHN ESOC ## 1422 345600.00000000 900.00000000 54202 0.0000000000000 THIS LINE IS TOO LONG + 2 G01G02 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 THIS IS OK......... -+ 2 G01G02 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 TOO LONG AGAIN ...... ++ 2 G01G02 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 TOO LONG AGAIN 2...... ++ 2 G01G02 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 TOO LONG AGAIN 3...... ++ 2 G01G02 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 TOO LONG AGAIN 4...... ++ 2 G01G02 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 TOO LONG AGAIN 5...... ++ 2 G01G02 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 TOO LONG AGAIN 6...... """ - # test_content_no_overlong: bytes = b"""#dV2007 4 12 0 0 0.00000000 2 ORBIT IGS14 BHN ESOC - # + 2 G01G02 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 THIS IS OK......... - # """ - # sp3.read_sp3(test_content_no_overlong) with self.assertWarns(Warning) as warning_assessor: with self.assertRaises(ValueError) as read_exception: - sp3.read_sp3(test_content_no_overlong, strictness_comments=STRICT_OFF, strict_mode=STRICT_RAISE) + sp3.read_sp3(test_content_overlong, strictness_comments=STRICT_OFF, strict_mode=STRICT_RAISE) self.assertEqual( str(read_exception.exception), - "2 SP3 epoch data lines were overlong and very likely to parse incorrectly.", + "6 SP3 epoch data lines were overlong and very likely to parse incorrectly.", ) captured_warnings = warning_assessor.warnings self.assertIn("Line of SP3 input exceeded max width:", str(captured_warnings[0].message)) + self.assertIn("TOO LONG AGAIN 5......", str(captured_warnings[4].message)) + self.assertEqual(len(captured_warnings), 5, "Only the first 5 overlong SP3 content lines should be printed") # # Assert that it still warns by default (NOTE: we can't test this with above example data, as it doens't # # contain a full header) @@ -244,11 +286,16 @@ def test_read_sp3_overlong_lines(self): # str(read_warning.msg), "2 SP3 epoch data lines were overlong and very likely to parse incorrectly." # ) + # UnitTestBaseliner.mode = "baseline" + # UnitTestBaseliner.create_baseline(captured_warnings) # DO NOT commit this line un-commented. + + self.assertTrue(UnitTestBaseliner.verify(captured_warnings), "Hash verification should pass") + def test_read_sp3_misalignment_check(self): """ - Test that misaligned columns raise an error (currently only in STRICT mode). - Strictness of comment checking is set to OFF, as the test data has a comment line equal to '*/' not '*/ ' + Test that misaligned columns raise an error in strict_mode=RAISE (by default it's a warning). """ + # NOTE: Strictness of *comment* checking is set to OFF, as the test data has a comment line equal to '*/' not '*/ ' with self.assertRaises(ValueError) as read_exception: sp3.read_sp3(sp3_test_data_misaligned_columns, strict_mode=STRICT_RAISE, strictness_comments=STRICT_OFF) self.assertEqual( @@ -260,26 +307,27 @@ def test_sp3_block_column_check_standalone(self): """ Test that misaligned columns in an epoch block raise an error (currently only in STRICT mode) """ - # Check that misaligned (but artificially not overlong) data line, raises exception - with self.assertRaises(ValueError) as misaligned_ex: - data = """ + + data = """ PG06 -16988.173766 -1949.602010 -20295.348670 13551.688732 PG07 -2270.179246 -18040.766586 19792.234454 13925.747073 PG08-538216.0254931012968.294871-1053208.82032548447864.338317 PG09 -7083.058359 -25531.577633 -1359.151582 14650.575917 """ + # Check that misaligned (but artificially not overlong) data line, raises exception + with self.assertRaises(ValueError) as misaligned_ex: sp3._check_column_alignment_of_sp3_block("* 2025 6 17 6 0 0.00000000", data, strict_mode=STRICT_RAISE) self.assertEqual( "Misaligned data line (unused column did not contain a space): 'PG08-538216.0254931012968.294871-1053208.82032548447864.338317 '", str(misaligned_ex.exception), ) - # Check that misaligned data line (flags) trimmed to 80 chars, raises exception - with self.assertRaises(ValueError) as misaligned_flags: - data = """ + data = """ PG06 -5247.775383 -25963.469495 -106.156584 15892.813576 P P PG07-1245784.756055 252424.937619-521507.7748633049872.304950 P """ + # Check that misaligned data line (flags) trimmed to 80 chars, raises exception + with self.assertRaises(ValueError) as misaligned_flags: sp3._check_column_alignment_of_sp3_block("* 2025 6 17 6 0 0.00000000", data, strict_mode=STRICT_RAISE) self.assertEqual( "Misaligned data line (unused column did not contain a space): 'PG07-1245784.756055 252424.937619-521507.7748633049872.304950 P '", @@ -419,17 +467,54 @@ def get_example_dataframe(template_name: str = "normal", include_simple_header: ], ) - # Merge SV table and header, and store as 'HEADER' + # Merge SV table and header into a single Series object, and store that as 'HEADER' df.attrs["HEADER"] = pd.concat([sp3_heading, sv_tbl], keys=["HEAD", "SV_INFO"], axis=0) return df + def baseline_get_example_dataframe(self): + + # NOTE: this function creates a baseline, but does not do any testing beyond that. + # I.e. it will detect regressions, but does not assert that the starting value is correct. + + # TODO enable these once the default template is implemented + # ex_df_default = TestSP3.get_example_dataframe() + # ex_df_default_no_header = TestSP3.get_example_dataframe(include_simple_header=False) + + ex_df_dupe = TestSP3.get_example_dataframe(template_name="dupe_epoch_offline_sat_empty_epoch") + ex_df_dupe_no_header = TestSP3.get_example_dataframe( + template_name="dupe_epoch_offline_sat_empty_epoch", include_simple_header=False + ) + + ex_df_offline_nan = TestSP3.get_example_dataframe(template_name="offline_sat_nan") + ex_df_offline_zero = TestSP3.get_example_dataframe(template_name="offline_sat_zero") + + objects_to_verify: list = [ + ex_df_dupe, + ex_df_dupe_no_header, + ex_df_offline_nan, + ex_df_offline_zero, + ] + + # TODO baseline outputs + # UnitTestBaseliner.mode = "baseline" + # UnitTestBaseliner.create_baseline(objects_to_verify) # DO NOT commit this line un-commented. + + self.assertTrue(UnitTestBaseliner.verify(objects_to_verify), "Hash verification should pass") + + # TODO implement the following to actually test the example DF function, not just check for regressions against + # the current value + # def test_get_example_dataframe(self): + def test_clean_sp3_orb(self): """ Tests cleaning an SP3 DataFrame of duplicates, leading or trailing nodata values, and offline sats """ + objects_to_verify: list = [] + # Create dataframe manually, as read function does deduplication itself. This also makes the test more self-contained sp3_df = TestSP3.get_example_dataframe("dupe_epoch_offline_sat_empty_epoch") + objects_to_verify.append(sp3_df) self.assertTrue( # Alterantively you can use all(array == array) to do an elementwise equality check @@ -454,6 +539,8 @@ def test_clean_sp3_orb(self): with self.assertWarns(Warning) as warning_assessor: sp3_df_no_offline_removal = sp3.clean_sp3_orb(sp3_df, False) + objects_to_verify.append(sp3_df_no_offline_removal) + captured_warnings = warning_assessor.warnings self.assertIn( "Failed to grab filename from sp3 dataframe for error output purposes:", str(captured_warnings[0].message) @@ -461,7 +548,8 @@ def test_clean_sp3_orb(self): self.assertEqual( len(captured_warnings), 1, - "Only expected one warning, about failing to get path. Check what other warnings are being raised!", + "Only expected one warning, about failing to get path. Check what other warnings are being raised. Full list below:\n" + + stringify_warnings(captured_warnings), ) self.assertTrue( @@ -485,21 +573,31 @@ def test_clean_sp3_orb(self): with self.assertWarns(Warning) as warning_assessor: # Now check with offline sat removal enabled too sp3_df_with_offline_removal = sp3.clean_sp3_orb(sp3_df, True) - # Check that we still seem to have one epoch with no dupe sats, and now with the offline sat removed - self.assertTrue( - np.array_equal(sp3_df_with_offline_removal.index.get_level_values(1), ["G01", "G02"]), - "After cleaning there should be no dupe PRNs (and with offline removal, offline sat should be gone)", - ) + + objects_to_verify.append(sp3_df_with_offline_removal) + + # Check that we still seem to have one epoch with no dupe sats, and now with the offline sat removed + self.assertTrue( + np.array_equal(sp3_df_with_offline_removal.index.get_level_values(1), ["G01", "G02"]), + "After cleaning there should be no dupe PRNs (and with offline removal, offline sat should be gone)", + ) + captured_warnings = warning_assessor.warnings self.assertIn( "Failed to grab filename from sp3 dataframe for error output purposes:", str(captured_warnings[0].message) ) self.assertEqual( len(captured_warnings), - 1, - "Only expected one warning, about failing to get path. Check what other warnings are being raised!", + 1, # Second warning is about pandas 3 deprecations. TODO update... + "Only expected one warning, about failing to get path. " + f"Check all warnings below:\n{stringify_warnings(captured_warnings)}", ) + # UnitTestBaseliner.mode = "baseline" + # UnitTestBaseliner.create_baseline(objects_to_verify) # DO NOT commit this line un-commented. + + self.assertTrue(UnitTestBaseliner.verify(objects_to_verify), "Hash verification should pass") + def test_gen_sp3_fundamentals(self): """ Tests that the SP3 header and content generation functions produce output that (apart from trailing @@ -507,20 +605,26 @@ def test_gen_sp3_fundamentals(self): NOTE: leverages read_sp3() to pull in sample data, so is prone to errors in that function. """ + objects_to_verify: list = [] + # Prep the baseline data to test against, including stripping each line of trailing whitespace. baseline_header_lines = trim_line_ends(sp3_test_data_short_cod_final_header).splitlines() baseline_content_lines = trim_line_ends(sp3_test_data_short_cod_final_content).splitlines() + objects_to_verify.extend([baseline_header_lines, baseline_content_lines]) # Note this is suboptimal from a testing standpoint, but for now is a lot easier than manually constructing # the DataFrame. sp3_df = sp3.read_sp3(bytes(sp3_test_data_short_cod_final)) + objects_to_verify.append(sp3_df) generated_sp3_header = sp3.gen_sp3_header(sp3_df, output_comments=True) generated_sp3_content = sp3.gen_sp3_content(sp3_df) + objects_to_verify.extend([generated_sp3_header, generated_sp3_content]) # As with the baseline data, prep the data under test, for comparison. test_header_lines = trim_line_ends(generated_sp3_header).splitlines() test_content_lines = trim_line_ends(generated_sp3_content).splitlines() + objects_to_verify.extend([test_header_lines, test_content_lines]) # TODO maybe we don't want to split the content, just the header @@ -551,6 +655,11 @@ def test_gen_sp3_fundamentals(self): f"Content line {i} didn't match", ) + # UnitTestBaseliner.mode = "baseline" + # UnitTestBaseliner.create_baseline(objects_to_verify) # DO NOT commit this line un-commented. + + self.assertTrue(UnitTestBaseliner.verify(objects_to_verify), "Hash verification should pass") + # TODO add tests for correctly generating sp3 output content with gen_sp3_content() and gen_sp3_header() # These tests should include: # - Correct alignment of POS, CLK, STDPOS STDCLK, (not velocity yet), FLAGS @@ -562,6 +671,9 @@ def test_gen_sp3_fundamentals(self): def test_get_sp3_comments(self): # Somewhat standalone test to check fetching of SP3 comments from a DataFrame + + objects_to_verify: list = [] + expected_comments = [ "/* EUROPEAN SPACE OPERATIONS CENTRE - DARMSTADT, GERMANY", "/* ---------------------------------------------------------", @@ -569,11 +681,31 @@ def test_get_sp3_comments(self): "/* PCV:IGS14_2022 OL/AL:EOT11A NONE YN ORB:CoN CLK:CoN", ] sp3_df: pd.DataFrame = sp3.read_sp3(input_data, strict_mode=STRICT_OFF) - self.assertEqual(sp3.get_sp3_comments(sp3_df), expected_comments, "SP3 comments read should match expectation") - self.assertEqual(sp3_df.attrs["COMMENTS"], expected_comments, "Manual read of SP3 comments should match") + automated_comment_read = sp3.get_sp3_comments(sp3_df) + manual_comment_read = sp3_df.attrs["COMMENTS"] + + self.assertEqual(automated_comment_read, expected_comments, "SP3 comments read should match expectation") + self.assertEqual(manual_comment_read, expected_comments, "Manual read of SP3 comments should match") + self.assertEqual( + id(automated_comment_read), + id(manual_comment_read), + "Manual and automated comment read should return the same object", + ) + + # We don't pass the second comment object here, as we have verified it is the same object, and the + # verifier will raise on duplicates. + objects_to_verify.extend([sp3_df, automated_comment_read]) + + # UnitTestBaseliner.mode = "baseline" + # UnitTestBaseliner.create_baseline(objects_to_verify) # DO NOT commit this line un-commented. + + self.assertTrue(UnitTestBaseliner.verify(objects_to_verify), "Hash verification should pass") def test_update_sp3_comments(self): # Somewhat standalone test to check updating SP3 comments in a DataFrame + + objects_to_verify: list = [] + expected_comments = [ "/* EUROPEAN SPACE OPERATIONS CENTRE - DARMSTADT, GERMANY", "/* ---------------------------------------------------------", @@ -584,6 +716,7 @@ def test_update_sp3_comments(self): sp3_df: pd.DataFrame = sp3.read_sp3(input_data, strict_mode=STRICT_OFF) # Load DataFrame # Read comments directly from DataFrame to check they are as expected self.assertEqual(sp3_df.attrs["COMMENTS"], expected_comments, "SP3 initial comments read were not as expected") + objects_to_verify.append(list(sp3_df.attrs["COMMENTS"])) # Append list of comments (do not unpack elements) # Introduce invalid but not overlong comment to check lead-in writing part of validation sp3_df.attrs["COMMENTS"] = [ @@ -598,6 +731,7 @@ def test_update_sp3_comments(self): ["/* malformed comment is missing lead-in", "/* malformed comment is missing space", "/* ", "/* "], "Lead in and spacing should be added to existing comments if missing", ) + objects_to_verify.append(list(sp3_df.attrs["COMMENTS"])) # Introduce overlong comment to check exception handling part of validation sp3_df.attrs["COMMENTS"] = [ @@ -621,24 +755,31 @@ def test_update_sp3_comments(self): "/* ", "Padding comment expected on second line", ) + objects_to_verify.append(list(sp3_df.attrs["COMMENTS"])) # Check deletion of all comments sp3.update_sp3_comments(sp3_df, ammend=False) self.assertEqual( sp3_df.attrs["COMMENTS"], ["/* ", "/* ", "/* ", "/* "], - "Should be no comments besides 4 padding ones, after running ammend with no input", + "Should be no comments besides 4 padding ones, after running with ammend=False and no input", ) + objects_to_verify.append(list(sp3_df.attrs["COMMENTS"])) # Write initial comment lines sp3.update_sp3_comments(sp3_df, comment_lines=["line 1", "line 2", "line 3", "line 4"], ammend=False) self.assertEqual(sp3_df.attrs["COMMENTS"], ["/* line 1", "/* line 2", "/* line 3", "/* line 4"]) + objects_to_verify.append(list(sp3_df.attrs["COMMENTS"])) # Write more lines sp3.update_sp3_comments(sp3_df, comment_lines=["line 5", "line 6"], ammend=True) self.assertEqual( sp3_df.attrs["COMMENTS"], ["/* line 1", "/* line 2", "/* line 3", "/* line 4", "/* line 5", "/* line 6"] ) + # NOTE: Creating a new list captures the immutable strings it contains, at this point in time. Without + # constructing a new list, we would just capture a reference to the list itself (which is added to rather + # than replaced when ammend=True) + objects_to_verify.append(list(sp3_df.attrs["COMMENTS"])) # Write more lines, free form sp3.update_sp3_comments(sp3_df, comment_string="arbitrary length line", ammend=True) @@ -646,6 +787,7 @@ def test_update_sp3_comments(self): sp3_df.attrs["COMMENTS"], ["/* line 1", "/* line 2", "/* line 3", "/* line 4", "/* line 5", "/* line 6", "/* arbitrary length line"], ) + objects_to_verify.append(list(sp3_df.attrs["COMMENTS"])) # Write more lines, both modes at once sp3.update_sp3_comments(sp3_df, comment_lines=["line 8"], comment_string="some other comment", ammend=True) @@ -663,7 +805,9 @@ def test_update_sp3_comments(self): "/* some other comment", ], ) + objects_to_verify.append(list(sp3_df.attrs["COMMENTS"])) + # Same as above but truncating existing comments (starting again) sp3.update_sp3_comments(sp3_df, comment_lines=["new line"], comment_string="some new comment", ammend=False) self.assertEqual( sp3_df.attrs["COMMENTS"], @@ -674,7 +818,9 @@ def test_update_sp3_comments(self): "/* ", ], ) + objects_to_verify.append(list(sp3_df.attrs["COMMENTS"])) + # And free form string mode with no ammending (truncate) sp3.update_sp3_comments(sp3_df, comment_string="some other new comment", ammend=False) self.assertEqual( sp3_df.attrs["COMMENTS"], @@ -685,9 +831,21 @@ def test_update_sp3_comments(self): "/* ", ], ) + objects_to_verify.append(list(sp3_df.attrs["COMMENTS"])) + + # NOTE: comment reflow not tested above. This is done in test_sp3_comment_reflow() + + # NOTE: all key changes are explicitly checked with asserts above: baselining is not strictly necessary. + + # UnitTestBaseliner.mode = "baseline" + # UnitTestBaseliner.create_baseline(objects_to_verify) # DO NOT commit this line un-commented. + + self.assertTrue(UnitTestBaseliner.verify(objects_to_verify), "Hash verification should pass") def test_sp3_comment_validation_standalone(self): + objects_to_verify: list = [] + # Other examples of valid and invalid lines we could use. # valid_lines: list[str] = [ @@ -712,37 +870,47 @@ def test_sp3_comment_validation_standalone(self): # ] # Insufficient number of lines should fail validation - self.assertFalse(sp3.validate_sp3_comment_lines(["/* Must have >= 4 comment lines!"], STRICT_OFF)) + comment_lines = ["/* Must have >= 4 comment lines!"] + self.assertFalse(sp3.validate_sp3_comment_lines(comment_lines, STRICT_OFF)) + objects_to_verify.append(list(comment_lines)) + + comment_lines = [ + "/* Must have >= 4 comment lines!", + "/* Must have >= 4 comment lines!", + "/* Must have >= 4 comment lines!", + ] self.assertFalse( sp3.validate_sp3_comment_lines( - [ - "/* Must have >= 4 comment lines!", - "/* Must have >= 4 comment lines!", - "/* Must have >= 4 comment lines!", - ], + comment_lines, STRICT_OFF, ) ) + objects_to_verify.append(list(comment_lines)) + + comment_lines = [ + "/* Must have >= 4 comment lines!", + "/* Must have >= 4 comment lines!", + "/* Must have >= 4 comment lines!", + "/* Ok we're good now", + ] self.assertTrue( sp3.validate_sp3_comment_lines( - [ - "/* Must have >= 4 comment lines!", - "/* Must have >= 4 comment lines!", - "/* Must have >= 4 comment lines!", - "/* Ok we're good now", - ], + comment_lines, STRICT_OFF, ) ) + objects_to_verify.append(list(comment_lines)) # We have a convenience flag to turn that one off, to make testing less cumbersome: + comment_lines = ["/* Must have >= 4 comment lines! ...Unless that check is turned off"] self.assertTrue( sp3.validate_sp3_comment_lines( - ["/* Must have >= 4 comment lines! ...Unless that check is turned off"], + comment_lines, STRICT_OFF, skip_min_4_lines_test=True, ) ) + objects_to_verify.append(list(comment_lines)) # # The bulk tests may be overkill. # # Bulk test valid and invalid lines, with different settings @@ -774,46 +942,54 @@ def test_sp3_comment_validation_standalone(self): # ) # Uneventful cases - self.assertTrue( - sp3.validate_sp3_comment_lines(["/* this line is fine"], STRICT_RAISE, skip_min_4_lines_test=True) - ) + comment_lines = ["/* this line is fine"] + self.assertTrue(sp3.validate_sp3_comment_lines(comment_lines, STRICT_RAISE, skip_min_4_lines_test=True)) + objects_to_verify.append(list(comment_lines)) + + comment_lines = ["/* line 1", "/* line 2"] self.assertTrue( sp3.validate_sp3_comment_lines( - ["/* line 1", "/* line 2"], + comment_lines, STRICT_OFF, skip_min_4_lines_test=True, attempt_fixes=False, fail_on_fixed_issues=True, ) ) + objects_to_verify.append(list(comment_lines)) # Turning off fail_on_fixed_issues should make no difference here. + comment_lines = ["/* line 1", "/* line 2"] self.assertTrue( sp3.validate_sp3_comment_lines( - ["/* line 1", "/* line 2"], + comment_lines, STRICT_OFF, skip_min_4_lines_test=True, attempt_fixes=False, fail_on_fixed_issues=False, ) ) + objects_to_verify.append(list(comment_lines)) # Strict mode shouldn't change how valid lines are handled + comment_lines = ["/* line 1", "/* line 2"] self.assertTrue( sp3.validate_sp3_comment_lines( - ["/* line 1", "/* line 2"], + comment_lines, STRICT_RAISE, skip_min_4_lines_test=True, attempt_fixes=False, fail_on_fixed_issues=False, ) ) + objects_to_verify.append(list(comment_lines)) # With strictness off, invalid lines shouldn't raise exceptions, but should still fail validation # Note that fail-on-fixed currently has no effect if attempt_fixes is off. + comment_lines = ["this line has no lead-in"] self.assertFalse( sp3.validate_sp3_comment_lines( - ["this line has no lead-in"], + comment_lines, STRICT_OFF, skip_min_4_lines_test=True, attempt_fixes=False, @@ -821,6 +997,8 @@ def test_sp3_comment_validation_standalone(self): ), "Invalid comment line should fail validation but not raise exception as strict mode is off", ) + self.assertEqual(comment_lines, ["this line has no lead-in"], "No fix should be made when attempt_fixes=False") + # No need to add this one to the baseline, we have a full coverage assert here. with self.assertRaises(ValueError): sp3.validate_sp3_comment_lines( @@ -862,6 +1040,7 @@ def test_sp3_comment_validation_standalone(self): ["/* this line has missing space after lead-in"], "Missing space should be addressed in place", ) + objects_to_verify.append(list(comment_lines)) # With fail on fixed: fail validation because the input was wrong, even though we were able to remedy it. comment_lines = ["/*this line has missing space after lead-in"] @@ -880,6 +1059,7 @@ def test_sp3_comment_validation_standalone(self): ["/* this line has missing space after lead-in"], "Missing space should be addressed in place", ) + objects_to_verify.append(list(comment_lines)) # Same as above, but with strict mode: raise, that should be an exception. comment_lines = ["/*this line has missing space after lead-in"] @@ -913,6 +1093,11 @@ def test_sp3_comment_validation_standalone(self): attempt_fixes=True, ) + # UnitTestBaseliner.mode = "baseline" + # UnitTestBaseliner.create_baseline(objects_to_verify) # DO NOT commit this line un-commented. + + self.assertTrue(UnitTestBaseliner.verify(objects_to_verify), "Hash verification should pass") + def test_sp3_comment_reflow(self): # Test that string reflow utility correctly splits a string and converts it into SP3 comment lines. comment_string_to_reflow = """SP3 comment reflow test. This should not break words if possible. \ @@ -985,24 +1170,40 @@ def test_gen_sp3_content_velocity_exception_handling(self): """ gen_sp3_content() can't yet output velocity data. Ensure raises by default, and removes vel columns with warning """ + + objects_to_verify: list = [] # Input data passed as bytes here, rather than using a mock file, because the mock file setup seems to break # part of Pandas Styler, which is used by gen_sp3_content(). Specifically, some part of Styler's attempt to # load style config files leads to a crash, despite some style config files appearing to read successfully) input_data_fresh = input_data + b"" # Lazy attempt at not passing a reference sp3_df = sp3.read_sp3(bytes(input_data_fresh), pOnly=False) + objects_to_verify.append(sp3_df) with self.assertRaises(NotImplementedError): generated_sp3_content = sp3.gen_sp3_content(sp3_df, continue_on_unhandled_velocity_data=False) + objects_to_verify.append(generated_sp3_content) with self.assertWarns(Warning) as warning_accessor: generated_sp3_content = sp3.gen_sp3_content(sp3_df, continue_on_unhandled_velocity_data=True) self.assertTrue("VX" not in generated_sp3_content, "Velocity data should be removed before outputting SP3") + objects_to_verify.append(generated_sp3_content) captured_warnings = warning_accessor.warnings self.assertEqual( "SP3 velocity output not currently supported! Dropping velocity columns before writing out.", str(captured_warnings[0].message), ) + self.assertEqual( + len(captured_warnings), + 1, + "Expected only 1 warning. Check what other warnings are being raised! Full list below:\n" + + stringify_warnings(captured_warnings), + ) + + # UnitTestBaseliner.mode = "baseline" + # UnitTestBaseliner.create_baseline(objects_to_verify) # DO NOT commit this line un-commented. + + self.assertTrue(UnitTestBaseliner.verify(objects_to_verify), "Hash verification should pass") def test_sp3_clock_nodata_to_nan(self): sp3_df = pd.DataFrame({("EST", "CLK"): [999999.999999, 123456.789, 999999.999999, 987654.321]}) @@ -1010,6 +1211,8 @@ def test_sp3_clock_nodata_to_nan(self): expected_result = pd.DataFrame({("EST", "CLK"): [np.nan, 123456.789, np.nan, 987654.321]}) self.assertTrue(sp3_df.equals(expected_result)) + # Note while this does not test a full dataframe, it does use DF.equals(), so we are not adding baselining. + def test_sp3_pos_nodata_to_nan(self): """ This test data represents four 'rows' of data, each with an X, Y and Z component of the Position vector. @@ -1041,17 +1244,35 @@ def test_velinterpolation(self): is to check if the function runs without errors TODO: update that to check actual expected values """ + + # TODO note we do not currntly check for a confirmed correct answer. We just check that the answer has + # not changed from our baseline. + objects_to_verify: list = [] + result = sp3.read_sp3(input_data, pOnly=True, strict_mode=STRICT_OFF) + objects_to_verify.append(result) + r = sp3.getVelSpline(result) - r2 = sp3.getVelPoly(result, 2) self.assertIsNotNone(r) + objects_to_verify.append(r) + + r2 = sp3.getVelPoly(result, 2) self.assertIsNotNone(r2) + objects_to_verify.append(r2) + + # UnitTestBaseliner.mode = "baseline" + # UnitTestBaseliner.create_baseline(objects_to_verify) # DO NOT commit this line un-commented. + + self.assertTrue(UnitTestBaseliner.verify(objects_to_verify), "Hash verification should pass") def test_sp3_offline_sat_removal_standalone(self): """ Standalone test for remove_offline_sats() using manually constructed DataFrame to avoid dependency on read_sp3() """ + + objects_to_verify: list = [] + sp3_df_nans = TestSP3.get_example_dataframe("offline_sat_nan") sp3_df_zeros = TestSP3.get_example_dataframe("offline_sat_zero") @@ -1065,6 +1286,7 @@ def test_sp3_offline_sat_removal_standalone(self): ["G01", "G02", "G03"], "Should start with 3 SVs", ) + objects_to_verify.extend([sp3_df_nans, sp3_df_zeros]) sp3_df_zeros_removed = sp3.remove_offline_sats(sp3_df_zeros) sp3_df_nans_removed = sp3.remove_offline_sats(sp3_df_nans) @@ -1080,8 +1302,18 @@ def test_sp3_offline_sat_removal_standalone(self): "Should be two SVs after removing offline ones", ) + objects_to_verify.extend([sp3_df_zeros_removed, sp3_df_nans_removed]) + + # UnitTestBaseliner.mode = "baseline" + # UnitTestBaseliner.create_baseline(objects_to_verify) # DO NOT commit this line un-commented. + + self.assertTrue(UnitTestBaseliner.verify(objects_to_verify), "Hash verification should pass") + def test_sp3_offline_sat_removal(self): + objects_to_verify: list = [] + sp3_df = sp3.read_sp3(offline_sat_test_data, pOnly=False, strict_mode=STRICT_OFF) + objects_to_verify.append(sp3_df) # Confirm starting state of content self.assertEqual( @@ -1100,26 +1332,39 @@ def test_sp3_offline_sat_removal(self): sp3_df.attrs["HEADER"].HEAD.SV_COUNT_STATED, "3", "Header should have 2 SVs before removing offline" ) + df_snapshot = DataFrame(sp3_df) # Now make the changes - this should also update the header - sp3_df = sp3.remove_offline_sats(sp3_df) + sp3_df_cleaned = sp3.remove_offline_sats(sp3_df) + objects_to_verify.append(sp3_df_cleaned) + # Ensure the source DF did NOT get modified... + df_snapshot_after = DataFrame(sp3_df) + self.assertTrue( + df_snapshot.equals(df_snapshot_after), + "Original DF should not be modified by function that returns a new copy", + ) # Check contents self.assertEqual( - sp3_df.index.get_level_values(1).unique().array.tolist(), + sp3_df_cleaned.index.get_level_values(1).unique().array.tolist(), ["G02", "G03"], "Should be two SVs after removing offline ones", ) # Check header self.assertEqual( - sp3_df.attrs["HEADER"].SV_INFO.index.array.tolist(), + sp3_df_cleaned.attrs["HEADER"].SV_INFO.index.array.tolist(), ["G02", "G03"], "Should be two SVs in parsed header after removing offline ones", ) self.assertEqual( - sp3_df.attrs["HEADER"].HEAD.SV_COUNT_STATED, "2", "Header should have 2 SVs after removing offline" + sp3_df_cleaned.attrs["HEADER"].HEAD.SV_COUNT_STATED, "2", "Header should have 2 SVs after removing offline" ) + # UnitTestBaseliner.mode = "baseline" + # UnitTestBaseliner.create_baseline(objects_to_verify) # DO NOT commit this line un-commented. + + self.assertTrue(UnitTestBaseliner.verify(objects_to_verify), "Hash verification should pass") + # sp3_test_data_truncated_cod_final is input_data2 def test_filter_by_svs(self): sp3_df = sp3.read_sp3(input_data2, pOnly=False) diff --git a/tests/unittest_baselines/TestClk/test_clk_read.pickledlist b/tests/unittest_baselines/TestClk/test_clk_read.pickledlist new file mode 100644 index 0000000..3cd94bb Binary files /dev/null and b/tests/unittest_baselines/TestClk/test_clk_read.pickledlist differ diff --git a/tests/unittest_baselines/TestClk/test_clk_read.pickledlist_sha256 b/tests/unittest_baselines/TestClk/test_clk_read.pickledlist_sha256 new file mode 100644 index 0000000..2bf0321 --- /dev/null +++ b/tests/unittest_baselines/TestClk/test_clk_read.pickledlist_sha256 @@ -0,0 +1 @@ +9a3a373545bb236948a91103327afe6be69badda0bfbfb4a91bf093c29a1a6c5 \ No newline at end of file diff --git a/tests/unittest_baselines/TestClk/test_diff_clk.pickledlist b/tests/unittest_baselines/TestClk/test_diff_clk.pickledlist new file mode 100644 index 0000000..74cc639 Binary files /dev/null and b/tests/unittest_baselines/TestClk/test_diff_clk.pickledlist differ diff --git a/tests/unittest_baselines/TestClk/test_diff_clk.pickledlist_sha256 b/tests/unittest_baselines/TestClk/test_diff_clk.pickledlist_sha256 new file mode 100644 index 0000000..23a3923 --- /dev/null +++ b/tests/unittest_baselines/TestClk/test_diff_clk.pickledlist_sha256 @@ -0,0 +1 @@ +481749f75c2bc933595938c60ded2556e65b8afe958f401cbb50194bdf519e90 \ No newline at end of file diff --git a/tests/unittest_baselines/TestIgsLogDataParsing/test_parse_igs_log_data.pickledlist b/tests/unittest_baselines/TestIgsLogDataParsing/test_parse_igs_log_data.pickledlist new file mode 100644 index 0000000..39e99d6 Binary files /dev/null and b/tests/unittest_baselines/TestIgsLogDataParsing/test_parse_igs_log_data.pickledlist differ diff --git a/tests/unittest_baselines/TestIgsLogDataParsing/test_parse_igs_log_data.pickledlist_sha256 b/tests/unittest_baselines/TestIgsLogDataParsing/test_parse_igs_log_data.pickledlist_sha256 new file mode 100644 index 0000000..4e916d3 --- /dev/null +++ b/tests/unittest_baselines/TestIgsLogDataParsing/test_parse_igs_log_data.pickledlist_sha256 @@ -0,0 +1 @@ +bc48a66d5b5b5e128fa295e30da7476c8f31eb384ebddea884344332aabc0aea \ No newline at end of file diff --git a/tests/unittest_baselines/TestIgsLogFileParsing/test_gather_metadata.pickledlist b/tests/unittest_baselines/TestIgsLogFileParsing/test_gather_metadata.pickledlist new file mode 100644 index 0000000..35b6c67 Binary files /dev/null and b/tests/unittest_baselines/TestIgsLogFileParsing/test_gather_metadata.pickledlist differ diff --git a/tests/unittest_baselines/TestIgsLogFileParsing/test_gather_metadata.pickledlist_sha256 b/tests/unittest_baselines/TestIgsLogFileParsing/test_gather_metadata.pickledlist_sha256 new file mode 100644 index 0000000..980e4f6 --- /dev/null +++ b/tests/unittest_baselines/TestIgsLogFileParsing/test_gather_metadata.pickledlist_sha256 @@ -0,0 +1 @@ +ada6360381393205d3da207a9af0fc8813fddc74c6922b0fdf6c117b195f394d \ No newline at end of file diff --git a/tests/unittest_baselines/TestIgsLogRegex/test_extract_antenna_block.pickledlist b/tests/unittest_baselines/TestIgsLogRegex/test_extract_antenna_block.pickledlist new file mode 100644 index 0000000..0231f38 Binary files /dev/null and b/tests/unittest_baselines/TestIgsLogRegex/test_extract_antenna_block.pickledlist differ diff --git a/tests/unittest_baselines/TestIgsLogRegex/test_extract_antenna_block.pickledlist_sha256 b/tests/unittest_baselines/TestIgsLogRegex/test_extract_antenna_block.pickledlist_sha256 new file mode 100644 index 0000000..a0e20de --- /dev/null +++ b/tests/unittest_baselines/TestIgsLogRegex/test_extract_antenna_block.pickledlist_sha256 @@ -0,0 +1 @@ +45e36f43cdbd37e1ea7e5d41cdec62df5af83792ba9588cfbf5ea7c969111573 \ No newline at end of file diff --git a/tests/unittest_baselines/TestIgsLogRegex/test_extract_receiver_block.pickledlist b/tests/unittest_baselines/TestIgsLogRegex/test_extract_receiver_block.pickledlist new file mode 100644 index 0000000..d22f6cb Binary files /dev/null and b/tests/unittest_baselines/TestIgsLogRegex/test_extract_receiver_block.pickledlist differ diff --git a/tests/unittest_baselines/TestIgsLogRegex/test_extract_receiver_block.pickledlist_sha256 b/tests/unittest_baselines/TestIgsLogRegex/test_extract_receiver_block.pickledlist_sha256 new file mode 100644 index 0000000..7d0756a --- /dev/null +++ b/tests/unittest_baselines/TestIgsLogRegex/test_extract_receiver_block.pickledlist_sha256 @@ -0,0 +1 @@ +afdd996360daac38e7ac69bd045668134e6a28ed34d703d81771f91ac9a2c23c \ No newline at end of file diff --git a/tests/unittest_baselines/TestSP3/baseline_get_example_dataframe.pickledlist b/tests/unittest_baselines/TestSP3/baseline_get_example_dataframe.pickledlist new file mode 100644 index 0000000..40986f9 Binary files /dev/null and b/tests/unittest_baselines/TestSP3/baseline_get_example_dataframe.pickledlist differ diff --git a/tests/unittest_baselines/TestSP3/baseline_get_example_dataframe.pickledlist_sha256 b/tests/unittest_baselines/TestSP3/baseline_get_example_dataframe.pickledlist_sha256 new file mode 100644 index 0000000..b1aa353 --- /dev/null +++ b/tests/unittest_baselines/TestSP3/baseline_get_example_dataframe.pickledlist_sha256 @@ -0,0 +1 @@ +59c8f38083406d03463b4151853627e4d0689f277697d82f9aa09a84670254de \ No newline at end of file diff --git a/tests/unittest_baselines/TestSP3/test_clean_sp3_orb.pickledlist b/tests/unittest_baselines/TestSP3/test_clean_sp3_orb.pickledlist new file mode 100644 index 0000000..e8be54e Binary files /dev/null and b/tests/unittest_baselines/TestSP3/test_clean_sp3_orb.pickledlist differ diff --git a/tests/unittest_baselines/TestSP3/test_clean_sp3_orb.pickledlist_sha256 b/tests/unittest_baselines/TestSP3/test_clean_sp3_orb.pickledlist_sha256 new file mode 100644 index 0000000..d78807c --- /dev/null +++ b/tests/unittest_baselines/TestSP3/test_clean_sp3_orb.pickledlist_sha256 @@ -0,0 +1 @@ +d4d7f3a3810de259923261d47af1a81b7311aeb0e869d1c98eea78a4d4797eb6 \ No newline at end of file diff --git a/tests/unittest_baselines/TestSP3/test_gen_sp3_content_velocity_exception_handling.pickledlist b/tests/unittest_baselines/TestSP3/test_gen_sp3_content_velocity_exception_handling.pickledlist new file mode 100644 index 0000000..e038f8e Binary files /dev/null and b/tests/unittest_baselines/TestSP3/test_gen_sp3_content_velocity_exception_handling.pickledlist differ diff --git a/tests/unittest_baselines/TestSP3/test_gen_sp3_content_velocity_exception_handling.pickledlist_sha256 b/tests/unittest_baselines/TestSP3/test_gen_sp3_content_velocity_exception_handling.pickledlist_sha256 new file mode 100644 index 0000000..62754d6 --- /dev/null +++ b/tests/unittest_baselines/TestSP3/test_gen_sp3_content_velocity_exception_handling.pickledlist_sha256 @@ -0,0 +1 @@ +631133b8067e6255655b3f0e9238925af0c532851f3eae3c7688ef7c34717fa0 \ No newline at end of file diff --git a/tests/unittest_baselines/TestSP3/test_gen_sp3_fundamentals.pickledlist b/tests/unittest_baselines/TestSP3/test_gen_sp3_fundamentals.pickledlist new file mode 100644 index 0000000..f416a20 Binary files /dev/null and b/tests/unittest_baselines/TestSP3/test_gen_sp3_fundamentals.pickledlist differ diff --git a/tests/unittest_baselines/TestSP3/test_gen_sp3_fundamentals.pickledlist_sha256 b/tests/unittest_baselines/TestSP3/test_gen_sp3_fundamentals.pickledlist_sha256 new file mode 100644 index 0000000..cc1f04d --- /dev/null +++ b/tests/unittest_baselines/TestSP3/test_gen_sp3_fundamentals.pickledlist_sha256 @@ -0,0 +1 @@ +e9a3ecaa521de177349d67719036b48929d46705f21bd7b6c113479640b32a37 \ No newline at end of file diff --git a/tests/unittest_baselines/TestSP3/test_get_sp3_comments.pickledlist b/tests/unittest_baselines/TestSP3/test_get_sp3_comments.pickledlist new file mode 100644 index 0000000..7e4503d Binary files /dev/null and b/tests/unittest_baselines/TestSP3/test_get_sp3_comments.pickledlist differ diff --git a/tests/unittest_baselines/TestSP3/test_get_sp3_comments.pickledlist_sha256 b/tests/unittest_baselines/TestSP3/test_get_sp3_comments.pickledlist_sha256 new file mode 100644 index 0000000..349d1ec --- /dev/null +++ b/tests/unittest_baselines/TestSP3/test_get_sp3_comments.pickledlist_sha256 @@ -0,0 +1 @@ +0c388d515c4dfff103b81497bf25af4beafa3a307046fb7309eb14cbf9917fb0 \ No newline at end of file diff --git a/tests/unittest_baselines/TestSP3/test_read_sp3_correct_svs_read_when_ev_ep_present.pickledlist b/tests/unittest_baselines/TestSP3/test_read_sp3_correct_svs_read_when_ev_ep_present.pickledlist new file mode 100644 index 0000000..5feb8c6 Binary files /dev/null and b/tests/unittest_baselines/TestSP3/test_read_sp3_correct_svs_read_when_ev_ep_present.pickledlist differ diff --git a/tests/unittest_baselines/TestSP3/test_read_sp3_correct_svs_read_when_ev_ep_present.pickledlist_sha256 b/tests/unittest_baselines/TestSP3/test_read_sp3_correct_svs_read_when_ev_ep_present.pickledlist_sha256 new file mode 100644 index 0000000..1e7baf9 --- /dev/null +++ b/tests/unittest_baselines/TestSP3/test_read_sp3_correct_svs_read_when_ev_ep_present.pickledlist_sha256 @@ -0,0 +1 @@ +b0aa87283674571fddd8d240f4686025ef608bbec0860755014c9f3f3b2bb025 \ No newline at end of file diff --git a/tests/unittest_baselines/TestSP3/test_read_sp3_header_svs_basic.pickledlist b/tests/unittest_baselines/TestSP3/test_read_sp3_header_svs_basic.pickledlist new file mode 100644 index 0000000..88659eb Binary files /dev/null and b/tests/unittest_baselines/TestSP3/test_read_sp3_header_svs_basic.pickledlist differ diff --git a/tests/unittest_baselines/TestSP3/test_read_sp3_header_svs_basic.pickledlist_sha256 b/tests/unittest_baselines/TestSP3/test_read_sp3_header_svs_basic.pickledlist_sha256 new file mode 100644 index 0000000..1a52a79 --- /dev/null +++ b/tests/unittest_baselines/TestSP3/test_read_sp3_header_svs_basic.pickledlist_sha256 @@ -0,0 +1 @@ +a7087d6d1ce09694262ed7915f13d622b9381e1f82b2aeda39eb11c87adbcb6d \ No newline at end of file diff --git a/tests/unittest_baselines/TestSP3/test_read_sp3_header_svs_detailed.pickledlist b/tests/unittest_baselines/TestSP3/test_read_sp3_header_svs_detailed.pickledlist new file mode 100644 index 0000000..74c1d8b Binary files /dev/null and b/tests/unittest_baselines/TestSP3/test_read_sp3_header_svs_detailed.pickledlist differ diff --git a/tests/unittest_baselines/TestSP3/test_read_sp3_header_svs_detailed.pickledlist_sha256 b/tests/unittest_baselines/TestSP3/test_read_sp3_header_svs_detailed.pickledlist_sha256 new file mode 100644 index 0000000..8df4b1b --- /dev/null +++ b/tests/unittest_baselines/TestSP3/test_read_sp3_header_svs_detailed.pickledlist_sha256 @@ -0,0 +1 @@ +f58037c1fe36f353ceb5ecae17aac3c07fbb4fa44245fdd36838a207d7d27ac4 \ No newline at end of file diff --git a/tests/unittest_baselines/TestSP3/test_read_sp3_overlong_lines.pickledlist b/tests/unittest_baselines/TestSP3/test_read_sp3_overlong_lines.pickledlist new file mode 100644 index 0000000..93b2f47 Binary files /dev/null and b/tests/unittest_baselines/TestSP3/test_read_sp3_overlong_lines.pickledlist differ diff --git a/tests/unittest_baselines/TestSP3/test_read_sp3_overlong_lines.pickledlist_sha256 b/tests/unittest_baselines/TestSP3/test_read_sp3_overlong_lines.pickledlist_sha256 new file mode 100644 index 0000000..d8278b7 --- /dev/null +++ b/tests/unittest_baselines/TestSP3/test_read_sp3_overlong_lines.pickledlist_sha256 @@ -0,0 +1 @@ +774d383a3aec8cfe9cfe889b50df526e8b91e3cf82684fdddf9c1d911b5d49ac \ No newline at end of file diff --git a/tests/unittest_baselines/TestSP3/test_read_sp3_pOnly.pickledlist b/tests/unittest_baselines/TestSP3/test_read_sp3_pOnly.pickledlist new file mode 100644 index 0000000..0d1b3e9 Binary files /dev/null and b/tests/unittest_baselines/TestSP3/test_read_sp3_pOnly.pickledlist differ diff --git a/tests/unittest_baselines/TestSP3/test_read_sp3_pOnly.pickledlist_sha256 b/tests/unittest_baselines/TestSP3/test_read_sp3_pOnly.pickledlist_sha256 new file mode 100644 index 0000000..1e75300 --- /dev/null +++ b/tests/unittest_baselines/TestSP3/test_read_sp3_pOnly.pickledlist_sha256 @@ -0,0 +1 @@ +59ae881664375f40267e6269ba3a91a4224d3595be4c11a5ffe1b49dd8b46c89 \ No newline at end of file diff --git a/tests/unittest_baselines/TestSP3/test_read_sp3_pv.pickledlist b/tests/unittest_baselines/TestSP3/test_read_sp3_pv.pickledlist new file mode 100644 index 0000000..88659eb Binary files /dev/null and b/tests/unittest_baselines/TestSP3/test_read_sp3_pv.pickledlist differ diff --git a/tests/unittest_baselines/TestSP3/test_read_sp3_pv.pickledlist_sha256 b/tests/unittest_baselines/TestSP3/test_read_sp3_pv.pickledlist_sha256 new file mode 100644 index 0000000..1a52a79 --- /dev/null +++ b/tests/unittest_baselines/TestSP3/test_read_sp3_pv.pickledlist_sha256 @@ -0,0 +1 @@ +a7087d6d1ce09694262ed7915f13d622b9381e1f82b2aeda39eb11c87adbcb6d \ No newline at end of file diff --git a/tests/unittest_baselines/TestSP3/test_sp3_comment_validation_standalone.pickledlist b/tests/unittest_baselines/TestSP3/test_sp3_comment_validation_standalone.pickledlist new file mode 100644 index 0000000..f38b8b6 Binary files /dev/null and b/tests/unittest_baselines/TestSP3/test_sp3_comment_validation_standalone.pickledlist differ diff --git a/tests/unittest_baselines/TestSP3/test_sp3_comment_validation_standalone.pickledlist_sha256 b/tests/unittest_baselines/TestSP3/test_sp3_comment_validation_standalone.pickledlist_sha256 new file mode 100644 index 0000000..852d5d6 --- /dev/null +++ b/tests/unittest_baselines/TestSP3/test_sp3_comment_validation_standalone.pickledlist_sha256 @@ -0,0 +1 @@ +a9e8c4308b4311a016f64b3ce8135e265331ac94c1fa2d80dc519350b09ad71a \ No newline at end of file diff --git a/tests/unittest_baselines/TestSP3/test_sp3_offline_sat_removal.pickledlist b/tests/unittest_baselines/TestSP3/test_sp3_offline_sat_removal.pickledlist new file mode 100644 index 0000000..cf86ef7 Binary files /dev/null and b/tests/unittest_baselines/TestSP3/test_sp3_offline_sat_removal.pickledlist differ diff --git a/tests/unittest_baselines/TestSP3/test_sp3_offline_sat_removal.pickledlist_sha256 b/tests/unittest_baselines/TestSP3/test_sp3_offline_sat_removal.pickledlist_sha256 new file mode 100644 index 0000000..3c5084a --- /dev/null +++ b/tests/unittest_baselines/TestSP3/test_sp3_offline_sat_removal.pickledlist_sha256 @@ -0,0 +1 @@ +79d5e639e0f76f7d83825e43a862f907fecf7d28cb606ee25621aeb951e7ee5f \ No newline at end of file diff --git a/tests/unittest_baselines/TestSP3/test_sp3_offline_sat_removal_standalone.pickledlist b/tests/unittest_baselines/TestSP3/test_sp3_offline_sat_removal_standalone.pickledlist new file mode 100644 index 0000000..5468dc7 Binary files /dev/null and b/tests/unittest_baselines/TestSP3/test_sp3_offline_sat_removal_standalone.pickledlist differ diff --git a/tests/unittest_baselines/TestSP3/test_sp3_offline_sat_removal_standalone.pickledlist_sha256 b/tests/unittest_baselines/TestSP3/test_sp3_offline_sat_removal_standalone.pickledlist_sha256 new file mode 100644 index 0000000..ff4b004 --- /dev/null +++ b/tests/unittest_baselines/TestSP3/test_sp3_offline_sat_removal_standalone.pickledlist_sha256 @@ -0,0 +1 @@ +5bf08cd846d30e3cf2bdb0a82c1c43822f49aae1f0f09354ffa9641f47dd4a4c \ No newline at end of file diff --git a/tests/unittest_baselines/TestSP3/test_update_sp3_comments.pickledlist b/tests/unittest_baselines/TestSP3/test_update_sp3_comments.pickledlist new file mode 100644 index 0000000..01a0510 Binary files /dev/null and b/tests/unittest_baselines/TestSP3/test_update_sp3_comments.pickledlist differ diff --git a/tests/unittest_baselines/TestSP3/test_update_sp3_comments.pickledlist_sha256 b/tests/unittest_baselines/TestSP3/test_update_sp3_comments.pickledlist_sha256 new file mode 100644 index 0000000..f0c5b91 --- /dev/null +++ b/tests/unittest_baselines/TestSP3/test_update_sp3_comments.pickledlist_sha256 @@ -0,0 +1 @@ +61389ff40aedf4db6ac3cdff76ba03663a1d01097f5d1d7bb391760aee96bd0a \ No newline at end of file diff --git a/tests/unittest_baselines/TestSP3/test_velinterpolation.pickledlist b/tests/unittest_baselines/TestSP3/test_velinterpolation.pickledlist new file mode 100644 index 0000000..cae8444 Binary files /dev/null and b/tests/unittest_baselines/TestSP3/test_velinterpolation.pickledlist differ diff --git a/tests/unittest_baselines/TestSP3/test_velinterpolation.pickledlist_sha256 b/tests/unittest_baselines/TestSP3/test_velinterpolation.pickledlist_sha256 new file mode 100644 index 0000000..18e9109 --- /dev/null +++ b/tests/unittest_baselines/TestSP3/test_velinterpolation.pickledlist_sha256 @@ -0,0 +1 @@ +22c1554ab1b7eebdf60995e9eefd2eb5b7f582061d3f1e7813fb0f6b2b5085f9 \ No newline at end of file