9
9
import traceback
10
10
import typing
11
11
from dataclasses import dataclass
12
- from typing import Literal , cast
12
+ from typing import AnyStr , Literal , cast
13
13
14
14
from .models .pr_info import PullRequestInfoSpecial
15
15
from .models .pr_json import PullRequestData , PullRequestState
46
46
)
47
47
from conda_forge_tick .lazy_json_backends import (
48
48
LazyJson ,
49
+ does_key_exist_in_hashmap ,
49
50
get_all_keys_for_hashmap ,
50
51
lazy_json_transaction ,
51
52
remove_key_for_hashmap ,
@@ -662,7 +663,7 @@ def run(
662
663
return migration_run_data ["migrate_return_value" ], pr_lazy_json
663
664
664
665
665
- def _compute_time_per_migrator (mctx , migrators ):
666
+ def _compute_time_per_migrator (migrators ):
666
667
# we weight each migrator by the number of available nodes to migrate
667
668
num_nodes = []
668
669
for migrator in tqdm .tqdm (migrators , ncols = 80 , desc = "computing time per migrator" ):
@@ -918,7 +919,26 @@ def _is_migrator_done(_mg_start, good_prs, time_per, pr_limit):
918
919
return False
919
920
920
921
921
- def _run_migrator (migrator , mctx , temp , time_per , git_backend : GitPlatformBackend ):
922
+ def _run_migrator (
923
+ migrator : Migrator ,
924
+ mctx : MigratorSessionContext ,
925
+ temp : list [AnyStr ],
926
+ time_per : float ,
927
+ git_backend : GitPlatformBackend ,
928
+ package : str | None = None ,
929
+ ) -> int :
930
+ """
931
+ Run a migrator.
932
+
933
+ :param migrator: The migrator to run.
934
+ :param mctx: The migrator session context.
935
+ :param temp: The list of temporary files.
936
+ :param time_per: The time limit of this migrator.
937
+ :param git_backend: The GitPlatformBackend instance to use.
938
+ :param package: The package to update, if None, all packages are updated.
939
+
940
+ :return: The number of "good" PRs created by the migrator.
941
+ """
922
942
_mg_start = time .time ()
923
943
924
944
migrator_name = get_migrator_name (migrator )
@@ -940,6 +960,14 @@ def _run_migrator(migrator, mctx, temp, time_per, git_backend: GitPlatformBacken
940
960
941
961
possible_nodes = list (migrator .order (effective_graph , mctx .graph ))
942
962
963
+ if package :
964
+ if package not in possible_nodes :
965
+ logger .warning (
966
+ f"Package { package } is not a candidate for migration of { migrator_name } "
967
+ )
968
+ return 0
969
+ possible_nodes = [package ]
970
+
943
971
# version debugging info
944
972
if isinstance (migrator , Version ):
945
973
print ("possible version migrations:" , flush = True )
@@ -1084,18 +1112,26 @@ def _setup_limits():
1084
1112
resource .setrlimit (resource .RLIMIT_AS , (limit_int , limit_int ))
1085
1113
1086
1114
1087
- def _update_nodes_with_bot_rerun (gx : nx .DiGraph ):
1088
- """Go through all the open PRs and check if they are rerun"""
1115
+ def _update_nodes_with_bot_rerun (gx : nx .DiGraph , package : str | None = None ):
1116
+ """
1117
+ Go through all the open PRs and check if they are rerun
1118
+
1119
+ :param gx: the dependency graph
1120
+ :param package: the package to update, if None, all packages are updated
1121
+ """
1089
1122
1090
1123
print ("processing bot-rerun labels" , flush = True )
1091
1124
1092
- for i , (name , node ) in enumerate (gx .nodes .items ()):
1125
+ nodes = gx .nodes .items () if not package else [(package , gx .nodes [package ])]
1126
+
1127
+ for i , (name , node ) in nodes :
1093
1128
# logger.info(
1094
1129
# f"node: {i} memory usage: "
1095
1130
# f"{psutil.Process().memory_info().rss // 1024 ** 2}MB",
1096
1131
# )
1097
1132
with node ["payload" ] as payload :
1098
1133
if payload .get ("archived" , False ):
1134
+ logger .debug (f"skipping archived package { name } " )
1099
1135
continue
1100
1136
with payload ["pr_info" ] as pri , payload ["version_pr_info" ] as vpri :
1101
1137
# reset bad
@@ -1145,12 +1181,21 @@ def _filter_ignored_versions(attrs, version):
1145
1181
return version
1146
1182
1147
1183
1148
- def _update_nodes_with_new_versions (gx ):
1149
- """Updates every node with it's new version (when available)"""
1184
+ def _update_nodes_with_new_versions (gx : nx .DiGraph , package : str | None = None ):
1185
+ """
1186
+ Updates every node with its new version (when available)
1187
+
1188
+ :param gx: the dependency graph
1189
+ :param package: the package to update, if None, all packages are updated
1190
+ """
1150
1191
1151
1192
print ("updating nodes with new versions" , flush = True )
1152
1193
1153
- version_nodes = get_all_keys_for_hashmap ("versions" )
1194
+ if package and not does_key_exist_in_hashmap ("versions" , package ):
1195
+ logger .warning (f"Package { package } not found in versions hashmap" )
1196
+ return
1197
+
1198
+ version_nodes = get_all_keys_for_hashmap ("versions" ) if not package else [package ]
1154
1199
1155
1200
for node in version_nodes :
1156
1201
version_data = LazyJson (f"versions/{ node } .json" ).data
@@ -1176,13 +1221,35 @@ def _update_nodes_with_new_versions(gx):
1176
1221
vpri ["new_version" ] = version_from_data
1177
1222
1178
1223
1179
- def _remove_closed_pr_json ():
1224
+ def _remove_closed_pr_json (package : str | None = None ):
1225
+ """
1226
+ Remove the pull request information for closed PRs.
1227
+
1228
+ :param package: The package to remove the PR information for. If None, all PR information is removed. If you pass
1229
+ a package, closed pr_json files are not removed because this would require iterating all pr_json files.
1230
+ """
1180
1231
print ("collapsing closed PR json" , flush = True )
1181
1232
1233
+ if package :
1234
+ pr_info_nodes = (
1235
+ [package ] if does_key_exist_in_hashmap ("pr_info" , package ) else []
1236
+ )
1237
+ version_pr_info_nodes = (
1238
+ [package ] if does_key_exist_in_hashmap ("version_pr_info" , package ) else []
1239
+ )
1240
+
1241
+ if not pr_info_nodes :
1242
+ logger .warning (f"Package { package } not found in pr_info hashmap" )
1243
+ if not version_pr_info_nodes :
1244
+ logger .warning (f"Package { package } not found in version_pr_info hashmap" )
1245
+ else :
1246
+ pr_info_nodes = get_all_keys_for_hashmap ("pr_info" )
1247
+ version_pr_info_nodes = get_all_keys_for_hashmap ("version_pr_info" )
1248
+
1182
1249
# first we go from nodes to pr json and update the pr info and remove the data
1183
1250
name_nodes = [
1184
- ("pr_info" , get_all_keys_for_hashmap ( "pr_info" ) ),
1185
- ("version_pr_info" , get_all_keys_for_hashmap ( "version_pr_info" ) ),
1251
+ ("pr_info" , pr_info_nodes ),
1252
+ ("version_pr_info" , version_pr_info_nodes ),
1186
1253
]
1187
1254
for name , nodes in name_nodes :
1188
1255
for node in nodes :
@@ -1215,6 +1282,11 @@ def _remove_closed_pr_json():
1215
1282
1216
1283
# at this point, any json blob referenced in the pr info is state != closed
1217
1284
# so we can remove anything that is empty or closed
1285
+ if package :
1286
+ logger .info (
1287
+ "Since you requested a run for a specific package, we are not removing closed pr_json files."
1288
+ )
1289
+ return
1218
1290
nodes = get_all_keys_for_hashmap ("pr_json" )
1219
1291
for node in nodes :
1220
1292
pr = LazyJson (f"pr_json/{ node } .json" )
@@ -1225,22 +1297,22 @@ def _remove_closed_pr_json():
1225
1297
)
1226
1298
1227
1299
1228
- def _update_graph_with_pr_info ():
1229
- _remove_closed_pr_json ()
1300
+ def _update_graph_with_pr_info (package : str | None = None ):
1301
+ _remove_closed_pr_json (package )
1230
1302
gx = load_existing_graph ()
1231
- _update_nodes_with_bot_rerun (gx )
1232
- _update_nodes_with_new_versions (gx )
1303
+ _update_nodes_with_bot_rerun (gx , package )
1304
+ _update_nodes_with_new_versions (gx , package )
1233
1305
dump_graph (gx )
1234
1306
1235
1307
1236
- def main (ctx : CliContext ) -> None :
1308
+ def main (ctx : CliContext , package : str | None = None ) -> None :
1237
1309
global START_TIME
1238
1310
START_TIME = time .time ()
1239
1311
1240
1312
_setup_limits ()
1241
1313
1242
1314
with fold_log_lines ("updating graph with PR info" ):
1243
- _update_graph_with_pr_info ()
1315
+ _update_graph_with_pr_info (package )
1244
1316
deploy (ctx , dirs_to_deploy = ["version_pr_info" , "pr_json" , "pr_info" ])
1245
1317
1246
1318
# record tmp dir so we can be sure to clean it later
@@ -1259,7 +1331,6 @@ def main(ctx: CliContext) -> None:
1259
1331
graph = gx ,
1260
1332
smithy_version = smithy_version ,
1261
1333
pinning_version = pinning_version ,
1262
- dry_run = ctx .dry_run ,
1263
1334
)
1264
1335
migrators = load_migrators ()
1265
1336
@@ -1271,7 +1342,6 @@ def main(ctx: CliContext) -> None:
1271
1342
time_per_migrator ,
1272
1343
tot_time_per_migrator ,
1273
1344
) = _compute_time_per_migrator (
1274
- mctx ,
1275
1345
migrators ,
1276
1346
)
1277
1347
for i , migrator in enumerate (migrators ):
0 commit comments