44import shutil
55import sys
66import textwrap
7- from datetime import datetime
87from os import PathLike , environ
98from pathlib import Path
109from pprint import pprint
1514
1615import pytest
1716from benchmark import run_benchmarks
18- from flaky import flaky
1917from modflow_devtools .build import meson_build
2018from modflow_devtools .download import (
2119 download_and_unzip ,
22- download_artifact ,
2320 get_release ,
24- list_artifacts ,
2521)
26- from modflow_devtools .markers import no_parallel , requires_exe , requires_github
22+ from modflow_devtools .markers import no_parallel , requires_exe
2723from modflow_devtools .misc import run_cmd , run_py_script , set_dir
2824
2925from utils import assert_match , convert_line_endings , get_project_root_path , glob , match
7167]
7268
7369
74- def download_benchmarks (
75- output_path : PathLike ,
76- verbose : bool = False ,
77- repo_owner : str = "MODFLOW-USGS" ,
78- ) -> Optional [Path ]:
79- """Try to download MF6 benchmarks from GitHub Actions."""
80-
81- output_path = Path (output_path ).expanduser ().absolute ()
82- name = "run-time-comparison" # todo make configurable
83- repo = f"{ repo_owner } /modflow6" # todo make configurable, add pytest/cli args
84- artifacts = list_artifacts (repo , name = name , verbose = verbose )
85- artifacts = sorted (
86- artifacts ,
87- key = lambda a : datetime .strptime (a ["created_at" ], "%Y-%m-%dT%H:%M:%SZ" ),
88- reverse = True ,
89- )
90- artifacts = [
91- a
92- for a in artifacts
93- if a ["workflow_run" ]["head_branch" ] == "develop" # todo make configurable
94- ]
95- most_recent = next (iter (artifacts ), None )
96- print (f"Found most recent benchmarks (artifact { most_recent ['id' ]} )" )
97- if most_recent :
98- print (f"Downloading benchmarks (artifact { most_recent ['id' ]} )" )
99- download_artifact (repo , id = most_recent ["id" ], path = output_path , verbose = verbose )
100- print (f"Downloaded benchmarks to { output_path } " )
101- path = output_path / f"{ name } .md"
102- assert path .is_file ()
103- return path
104- else :
105- print ("No benchmarks found" )
106- return None
107-
108-
10970@pytest .fixture
11071def github_user () -> Optional [str ]:
11172 return environ .get ("GITHUB_USER" , None )
11273
11374
114- @flaky
115- @no_parallel
116- @requires_github
117- def test_download_benchmarks (tmp_path , github_user ):
118- path = download_benchmarks (
119- tmp_path ,
120- verbose = True ,
121- repo_owner = github_user if github_user else "MODFLOW-USGS" ,
122- )
123- if path :
124- assert path .name == "run-time-comparison.md"
125-
126-
12775def build_benchmark_tex (
12876 output_path : PathLike ,
12977 force : bool = False ,
130- repo_owner : str = "MODFLOW-USGS" ,
13178):
13279 """Build LaTeX files for MF6 performance benchmarks to go into the release notes."""
13380
13481 BENCHMARKS_PATH .mkdir (parents = True , exist_ok = True )
13582 benchmarks_path = BENCHMARKS_PATH / "run-time-comparison.md"
13683
137- # download benchmark artifacts if any exist on GitHub
138- if not benchmarks_path .is_file ():
139- benchmarks_path = download_benchmarks (BENCHMARKS_PATH , repo_owner = repo_owner )
140-
14184 # run benchmarks again if no benchmarks found on GitHub or overwrite requested
14285 if force or not benchmarks_path .is_file ():
14386 run_benchmarks (
@@ -162,20 +105,6 @@ def build_benchmark_tex(
162105 assert (RELEASE_NOTES_PATH / f"{ benchmarks_path .stem } .tex" ).is_file ()
163106
164107
165- @flaky
166- @no_parallel
167- @requires_github
168- def test_build_benchmark_tex (tmp_path ):
169- benchmarks_path = BENCHMARKS_PATH / "run-time-comparison.md"
170- tex_path = DISTRIBUTION_PATH / f"{ benchmarks_path .stem } .tex"
171-
172- try :
173- build_benchmark_tex (tmp_path )
174- assert benchmarks_path .is_file ()
175- finally :
176- tex_path .unlink (missing_ok = True )
177-
178-
179108def build_deprecations_tex (force : bool = False ):
180109 """Build LaTeX files for the deprecations table to go into the release notes."""
181110
0 commit comments