88from gnssanalysis .filenames import convert_nominal_span , determine_properties_from_filename
99import gnssanalysis .gn_io .sp3 as sp3
1010
11- from gnssanalysis .gn_utils import STRICT_OFF , STRICT_RAISE , STRICT_WARN , stringify_warnings , trim_line_ends
11+ from gnssanalysis .gn_utils import (
12+ STRICT_OFF ,
13+ STRICT_RAISE ,
14+ STRICT_WARN ,
15+ UnitTestBaseliner ,
16+ stringify_warnings ,
17+ trim_line_ends ,
18+ )
1219from test_datasets .sp3_test_data import (
1320 fake_header_version_a ,
1421 fake_header_version_b ,
@@ -98,12 +105,13 @@ def test_check_sp3_version(self):
98105 self .assertEqual (
99106 len (captured_warnings ),
100107 2 ,
101- "Expected only 2 warnings. Check what other warnings are being raised!" ,
108+ "Expected only 2 warnings. Check what other warnings are being raised! Full list below:\n "
109+ + stringify_warnings (captured_warnings ),
102110 )
103111
104112 # Our best supported version should return True
105113 self .assertEqual (
106- sp3 .check_sp3_version (fake_header_version_d ), True , "SP3 version d should be considered best supported"
114+ sp3 .check_sp3_version (fake_header_version_d ), True , "SP3 version d should be considered supported"
107115 )
108116
109117 # StrictModes.STRICT_RAISE should cause a *possibly* supported version to raise an exception.
@@ -114,6 +122,11 @@ def test_read_sp3_pOnly(self):
114122 result = sp3 .read_sp3 (input_data , pOnly = True , strict_mode = STRICT_OFF )
115123 self .assertEqual (len (result ), 6 )
116124
125+ # UnitTestBaseliner.mode = "baseline"
126+ # UnitTestBaseliner.create_baseline([result])
127+
128+ self .assertTrue (UnitTestBaseliner .verify ([result ]), "Hash verification should pass" )
129+
117130 def test_read_sp3_pv (self ):
118131 result = sp3 .read_sp3 (input_data , pOnly = False , strict_mode = STRICT_OFF )
119132 self .assertEqual (len (result ), 6 )
@@ -122,11 +135,19 @@ def test_read_sp3_pv(self):
122135 self .assertEqual (result .attrs ["HEADER" ]["HEAD" ]["DATETIME" ], "2007 4 12 0 0 0.00000000" )
123136 self .assertEqual (result .index [0 ][0 ], 229608000 ) # Same date, as J2000
124137
138+ # UnitTestBaseliner.mode = "baseline"
139+ # UnitTestBaseliner.create_baseline([result])
140+
141+ self .assertTrue (UnitTestBaseliner .verify ([result ]), "Hash verification should pass" )
142+
125143 def test_read_sp3_pv_with_ev_ep_rows (self ):
126144 # Expect exception relating to the EV and EP rows (in RAISE mode), as we can't currently handle them properly.
127145 with self .assertRaises (NotImplementedError ) as raised_exception :
128146 sp3 .read_sp3 (sp3c_example2_data , pOnly = False , strict_mode = STRICT_RAISE , skip_version_check = True )
129147
148+ # Assert that raised exception says what we expect it to
149+ self .assertEqual (raised_exception .exception , "EP and EV flag rows are currently not supported" )
150+
130151 def test_read_sp3_header_svs_basic (self ):
131152 """
132153 Minimal test of reading SVs from header
@@ -136,6 +157,12 @@ def test_read_sp3_header_svs_basic(self):
136157 self .assertEqual (result .attrs ["HEADER" ]["SV_INFO" ].index [1 ], "G02" , "Second SV should be G02" )
137158 self .assertEqual (result .attrs ["HEADER" ]["SV_INFO" ].iloc [1 ], 8 , "Second ACC should be 8" )
138159
160+ # Somewhat redundant as it tests the same use of the read function as an already basedlined test above
161+ # UnitTestBaseliner.mode = "baseline"
162+ # UnitTestBaseliner.create_baseline([result])
163+
164+ self .assertTrue (UnitTestBaseliner .verify ([result ]), "Hash verification should pass" )
165+
139166 def test_read_sp3_header_svs_detailed (self ):
140167 """
141168 Test header parser's ability to read SVs and their accuracy codes correctly. Uses separate, artificial
@@ -182,6 +209,12 @@ def test_read_sp3_header_svs_detailed(self):
182209 end_line2_acc = sv_info .iloc [29 ]
183210 self .assertEqual (end_line2_acc , 18 , msg = "Last ACC on test line 2 (pos 30) should be 18" )
184211
212+ # TODO add support for pandas Series
213+ # UnitTestBaseliner.mode = "baseline"
214+ # UnitTestBaseliner.create_baseline([result])
215+
216+ self .assertTrue (UnitTestBaseliner .verify ([result ]), "Hash verification should pass" )
217+
185218 def test_read_sp3_validation_sv_count_mismatch_header_vs_content (self ):
186219 with self .assertRaises (ValueError ) as context_manager :
187220 sp3 .read_sp3 (
@@ -206,6 +239,12 @@ def test_read_sp3_correct_svs_read_when_ev_ep_present(self):
206239 parsed_svs_content = sp3 .get_unique_svs (result ).astype (str ).values
207240 self .assertEqual (set (parsed_svs_content ), set (["G01" , "G02" , "G03" , "G04" , "G05" ]))
208241
242+ # TODO add support for pandas Index
243+ # UnitTestBaseliner.mode = "baseline"
244+ # UnitTestBaseliner.create_baseline([result, parsed_svs_content])
245+
246+ self .assertTrue (UnitTestBaseliner .verify ([result , parsed_svs_content ]), "Hash verification should pass" )
247+
209248 # TODO Add test(s) for correctly reading header fundamentals (ACC, ORB_TYPE, etc.)
210249 # TODO add tests for correctly reading the actual content of the SP3 in addition to the header.
211250
@@ -214,7 +253,7 @@ def test_read_sp3_overlong_lines(self):
214253 Test overlong content line check
215254 """
216255
217- test_content_no_overlong : bytes = b"""#dV2007 4 12 0 0 0.00000000 2 ORBIT IGS14 BHN ESOC
256+ test_content_overlong : bytes = b"""#dV2007 4 12 0 0 0.00000000 2 ORBIT IGS14 BHN ESOC
218257## 1422 345600.00000000 900.00000000 54202 0.0000000000000 THIS LINE IS TOO LONG
219258+ 2 G01G02 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 THIS IS OK.........
220259+ 2 G01G02 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 TOO LONG AGAIN ......
@@ -227,7 +266,7 @@ def test_read_sp3_overlong_lines(self):
227266 with self .assertWarns (Warning ) as warning_assessor :
228267
229268 with self .assertRaises (ValueError ) as read_exception :
230- sp3 .read_sp3 (test_content_no_overlong , strictness_comments = STRICT_OFF , strict_mode = STRICT_RAISE )
269+ sp3 .read_sp3 (test_content_overlong , strictness_comments = STRICT_OFF , strict_mode = STRICT_RAISE )
231270 self .assertEqual (
232271 str (read_exception .exception ),
233272 "2 SP3 epoch data lines were overlong and very likely to parse incorrectly." ,
@@ -428,8 +467,11 @@ def test_clean_sp3_orb(self):
428467 Tests cleaning an SP3 DataFrame of duplicates, leading or trailing nodata values, and offline sats
429468 """
430469
470+ objects_to_verify : list = []
471+
431472 # Create dataframe manually, as read function does deduplication itself. This also makes the test more self-contained
432473 sp3_df = TestSP3 .get_example_dataframe ("dupe_epoch_offline_sat_empty_epoch" )
474+ objects_to_verify .append (sp3_df )
433475
434476 self .assertTrue (
435477 # Alterantively you can use all(array == array) to do an elementwise equality check
@@ -454,14 +496,17 @@ def test_clean_sp3_orb(self):
454496 with self .assertWarns (Warning ) as warning_assessor :
455497 sp3_df_no_offline_removal = sp3 .clean_sp3_orb (sp3_df , False )
456498
499+ objects_to_verify .append (sp3_df_no_offline_removal )
500+
457501 captured_warnings = warning_assessor .warnings
458502 self .assertIn (
459503 "Failed to grab filename from sp3 dataframe for error output purposes:" , str (captured_warnings [0 ].message )
460504 )
461505 self .assertEqual (
462506 len (captured_warnings ),
463507 1 ,
464- "Only expected one warning, about failing to get path. Check what other warnings are being raised!" ,
508+ "Only expected one warning, about failing to get path. Check what other warnings are being raised. Full list below:\n "
509+ + stringify_warnings (captured_warnings ),
465510 )
466511
467512 self .assertTrue (
@@ -485,43 +530,58 @@ def test_clean_sp3_orb(self):
485530 with self .assertWarns (Warning ) as warning_assessor :
486531 # Now check with offline sat removal enabled too
487532 sp3_df_with_offline_removal = sp3 .clean_sp3_orb (sp3_df , True )
488- # Check that we still seem to have one epoch with no dupe sats, and now with the offline sat removed
489- self .assertTrue (
490- np .array_equal (sp3_df_with_offline_removal .index .get_level_values (1 ), ["G01" , "G02" ]),
491- "After cleaning there should be no dupe PRNs (and with offline removal, offline sat should be gone)" ,
492- )
533+
534+ objects_to_verify .append (sp3_df_with_offline_removal )
535+
536+ # Check that we still seem to have one epoch with no dupe sats, and now with the offline sat removed
537+ self .assertTrue (
538+ np .array_equal (sp3_df_with_offline_removal .index .get_level_values (1 ), ["G01" , "G02" ]),
539+ "After cleaning there should be no dupe PRNs (and with offline removal, offline sat should be gone)" ,
540+ )
541+
493542 captured_warnings = warning_assessor .warnings
494543 self .assertIn (
495544 "Failed to grab filename from sp3 dataframe for error output purposes:" , str (captured_warnings [0 ].message )
496545 )
497546 self .assertEqual (
498547 len (captured_warnings ),
499- 1 , # Second warning is about pandas 3 deprecations.
548+ 1 , # Second warning is about pandas 3 deprecations. TODO update...
500549 "Only expected one warning, about failing to get path. "
501550 f"Check all warnings below:\n { stringify_warnings (captured_warnings )} " ,
502551 )
503552
553+ # UnitTestBaseliner.mode = "baseline"
554+ # UnitTestBaseliner.create_baseline(objects_to_verify)
555+
556+ self .assertTrue (UnitTestBaseliner .verify (objects_to_verify ), "Hash verification should pass" )
557+
504558 def test_gen_sp3_fundamentals (self ):
505559 """
506560 Tests that the SP3 header and content generation functions produce output that (apart from trailing
507561 whitespace), match a known good value.
508562 NOTE: leverages read_sp3() to pull in sample data, so is prone to errors in that function.
509563 """
510564
565+ objects_to_verify : list = []
566+
511567 # Prep the baseline data to test against, including stripping each line of trailing whitespace.
512568 baseline_header_lines = trim_line_ends (sp3_test_data_short_cod_final_header ).splitlines ()
513569 baseline_content_lines = trim_line_ends (sp3_test_data_short_cod_final_content ).splitlines ()
570+ objects_to_verify .extend ([baseline_header_lines , baseline_content_lines ])
514571
515572 # Note this is suboptimal from a testing standpoint, but for now is a lot easier than manually constructing
516573 # the DataFrame.
517574 sp3_df = sp3 .read_sp3 (bytes (sp3_test_data_short_cod_final ))
575+ objects_to_verify .append (sp3_df )
518576
519577 generated_sp3_header = sp3 .gen_sp3_header (sp3_df , output_comments = True )
520578 generated_sp3_content = sp3 .gen_sp3_content (sp3_df )
579+ objects_to_verify .extend ([generated_sp3_header , generated_sp3_content ])
521580
522581 # As with the baseline data, prep the data under test, for comparison.
523582 test_header_lines = trim_line_ends (generated_sp3_header ).splitlines ()
524583 test_content_lines = trim_line_ends (generated_sp3_content ).splitlines ()
584+ objects_to_verify .extend ([test_header_lines , test_content_lines ])
525585
526586 # TODO maybe we don't want to split the content, just the header
527587
@@ -552,6 +612,11 @@ def test_gen_sp3_fundamentals(self):
552612 f"Content line { i } didn't match" ,
553613 )
554614
615+ # UnitTestBaseliner.mode = "baseline"
616+ # UnitTestBaseliner.create_baseline(objects_to_verify)
617+
618+ self .assertTrue (UnitTestBaseliner .verify (objects_to_verify ), "Hash verification should pass" )
619+
555620 # TODO add tests for correctly generating sp3 output content with gen_sp3_content() and gen_sp3_header()
556621 # These tests should include:
557622 # - Correct alignment of POS, CLK, STDPOS STDCLK, (not velocity yet), FLAGS
@@ -563,15 +628,35 @@ def test_gen_sp3_fundamentals(self):
563628
564629 def test_get_sp3_comments (self ):
565630 # Somewhat standalone test to check fetching of SP3 comments from a DataFrame
631+
632+ objects_to_verify : list = []
633+
566634 expected_comments = [
567635 "/* EUROPEAN SPACE OPERATIONS CENTRE - DARMSTADT, GERMANY" ,
568636 "/* ---------------------------------------------------------" ,
569637 "/* SP3 FILE GENERATED BY NAPEOS BAHN TOOL (DETERMINATION)" ,
570638 "/* PCV:IGS14_2022 OL/AL:EOT11A NONE YN ORB:CoN CLK:CoN" ,
571639 ]
572640 sp3_df : pd .DataFrame = sp3 .read_sp3 (input_data , strict_mode = STRICT_OFF )
573- self .assertEqual (sp3 .get_sp3_comments (sp3_df ), expected_comments , "SP3 comments read should match expectation" )
574- self .assertEqual (sp3_df .attrs ["COMMENTS" ], expected_comments , "Manual read of SP3 comments should match" )
641+ automated_comment_read = sp3 .get_sp3_comments (sp3_df )
642+ manual_comment_read = sp3_df .attrs ["COMMENTS" ]
643+
644+ self .assertEqual (automated_comment_read , expected_comments , "SP3 comments read should match expectation" )
645+ self .assertEqual (manual_comment_read , expected_comments , "Manual read of SP3 comments should match" )
646+ self .assertEqual (
647+ id (automated_comment_read ),
648+ id (manual_comment_read ),
649+ "Manual and automated comment read should return the same object" ,
650+ )
651+
652+ # We don't pass the second comment object here, as we have verified it is the same object, and the
653+ # verifier will raise on duplicates.
654+ objects_to_verify .extend ([sp3_df , automated_comment_read ])
655+
656+ # UnitTestBaseliner.mode = "baseline"
657+ # UnitTestBaseliner.create_baseline(objects_to_verify)
658+
659+ self .assertTrue (UnitTestBaseliner .verify (objects_to_verify ), "Hash verification should pass" )
575660
576661 def test_update_sp3_comments (self ):
577662 # Somewhat standalone test to check updating SP3 comments in a DataFrame
0 commit comments