Skip to content

Commit d8a9847

Browse files
committed
improve validation/error handling of plugin settings
1 parent 034a8a7 commit d8a9847

File tree

1 file changed

+79
-48
lines changed

1 file changed

+79
-48
lines changed

plugins/bulkImageScrape/bulkImageScrape.py

Lines changed: 79 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -6,20 +6,76 @@
66
from stashapi.stashapp import StashInterface
77
from stashapi.scrape_parser import ScrapeParser
88

9-
# Quick check to make sure we have the correct version of stashapi
10-
if StashItem.IMAGE is None or not hasattr(StashInterface, "scrape_image"):
11-
log.error(
12-
"It seems you are using an older version of stashapi\n"
13-
"without support for image scraping.\n"
14-
"Please use the requirements.txt file to install the most recent version"
15-
)
16-
exit(1)
17-
189

1910
#
2011
# Helper functions
2112
#
2213

14+
README_URL: str = "https://github.com/stashapp/CommunityScripts/blob/main/plugins/bulkImageScrape/README.md"
15+
16+
17+
def validate_scraper(client: StashInterface, scraper_id: str) -> str:
18+
"""
19+
Check if the requested scraper exists and is able to scrape images
20+
and return the id if it does
21+
"""
22+
23+
if scraper_id == "":
24+
log.error(
25+
"ScraperID is empty - cannot continue\n"
26+
"Please set a valid ScraperID in the plugin settings at:\n"
27+
"Settings -> Plugins -> Plugins -> BulkImageScrape -> ScraperID\n"
28+
f"as described in the README.md file at:\n"
29+
f"{README_URL}"
30+
)
31+
exit(1)
32+
33+
scrapers: list[dict] = client.list_scrapers([StashItem.IMAGE])
34+
valid_scraper_ids: list[str] = []
35+
36+
for scraper in scrapers:
37+
if scraper["id"] == scraper_id:
38+
valid_scraper_ids.append(scraper["id"])
39+
40+
if len(valid_scraper_ids) == 0:
41+
log.error(
42+
f"No valid image scraper found with id {scraper_id}\n"
43+
"Please check the ScraperID is correct\n"
44+
"Your selected scraper should be listed at:\n"
45+
"Settings -> Metadata Providers -> Scrapers -> Image scrapers"
46+
f"as described in the README.md file at:\n"
47+
f"{README_URL}"
48+
)
49+
exit(1)
50+
51+
if len(valid_scraper_ids) > 1:
52+
log.error(
53+
f"Multiple image scrapers found with id {scraper_id}\n"
54+
"Scraper ID must be unique - please fix your scraper installations\n"
55+
"Check your installed scrapers at:\n"
56+
"Settings -> Metadata Providers -> Installed Scrapers"
57+
f"as described in the README.md file at:\n"
58+
f"{README_URL}"
59+
)
60+
exit(1)
61+
62+
return valid_scraper_ids[0]
63+
64+
65+
def validate_stashapi(item: StashItem, client: StashInterface) -> None:
66+
"""
67+
Quick check to make sure we have the correct version of stashapi installed
68+
"""
69+
if item.IMAGE is None or not hasattr(client, "scrape_image"):
70+
log.error(
71+
"It seems you are using an older version of stashapi\n"
72+
"without support for image scraping.\n"
73+
"Please use the requirements.txt file to install the most recent version\n"
74+
f"as described in the README.md file at:\n"
75+
f"{README_URL}"
76+
)
77+
exit(1)
78+
2379

2480
def get_tag_id(client: StashInterface, tag_name: str) -> str | None:
2581
"""
@@ -37,21 +93,13 @@ def get_tag_id(client: StashInterface, tag_name: str) -> str | None:
3793
return tags[0]["id"]
3894

3995

40-
def get_scraper_id(client: StashInterface, scraper_name: str) -> str | None:
41-
"""
42-
Get the id of a scraper by name or return None if the scraper is not found
43-
"""
44-
scrapers: list[dict] = client.list_scrapers([StashItem.IMAGE])
45-
for scraper in scrapers:
46-
if scraper["name"] == scraper_name:
47-
return scraper["id"]
48-
return None
49-
50-
5196
def parse_skip_tags(client: StashInterface, skip_tags: str) -> list[str]:
5297
"""
5398
Parse the skip tags to a list of tag ids
5499
"""
100+
if skip_tags == "" or skip_tags is None:
101+
return []
102+
55103
skip_tags = skip_tags.split(",")
56104
tag_ids: list[str] = []
57105
for tag in skip_tags:
@@ -87,9 +135,7 @@ def get_all_images(
87135
"depth": -1,
88136
}
89137

90-
result: list[dict] = client.find_images(f=image_filter, filter=all_results)
91-
92-
return result
138+
return client.find_images(f=image_filter, filter=all_results)
93139

94140

95141
def scrape_image(
@@ -128,7 +174,7 @@ def scrape_is_valid(scrape_input: dict | list[dict] | None) -> bool:
128174
if value is not None and value != [] and value != {} and value != ""
129175
)
130176
else:
131-
# something went wrong strangely wrong?
177+
# something went strangely wrong?
132178
return False
133179

134180

@@ -183,6 +229,7 @@ def update_image(client: StashInterface, update: dict) -> dict | None:
183229
json_input: dict = json.loads(sys.stdin.read())
184230
FRAGMENT_SERVER: dict = json_input["server_connection"]
185231
stash: StashInterface = StashInterface(FRAGMENT_SERVER)
232+
log.info("Starting Bulk Image Scrape Plugin")
186233

187234
config: dict = stash.get_configuration()["plugins"]
188235
settings: dict[str, any] = {
@@ -197,6 +244,7 @@ def update_image(client: StashInterface, update: dict) -> dict | None:
197244

198245
if "BulkImageScrape" in config:
199246
settings.update(config["BulkImageScrape"])
247+
log.info(f"settings: {settings=}")
200248

201249
scrape_parser = ScrapeParser(
202250
stash,
@@ -207,36 +255,18 @@ def update_image(client: StashInterface, update: dict) -> dict | None:
207255
)
208256

209257
#
210-
# Validate input settings
258+
# VALIDATE ENVIRONMENT
211259
#
212260

213-
214-
# Exit if no ScraperID is set or we cannot resolve it
215-
if settings["ScraperID"] == "":
216-
log.error("No ScraperID set")
217-
exit(1)
218-
219-
scraper_id: None | str = get_scraper_id(stash, settings["ScraperID"])
220-
if scraper_id is None:
221-
log.error(f"ScraperID {settings['ScraperID']} not found - cannot continue")
222-
log.error("Please check the ScraperID is correct and try again")
223-
exit(1)
224-
225-
# parse the skip tags to a list of tag ids if we have any
226-
parsed_skip_tags: list[str] = []
227-
if settings["SkipTags"] != "":
228-
parsed_skip_tags = parse_skip_tags(stash, settings["SkipTags"])
229-
if len(parsed_skip_tags) == 0:
230-
parsed_skip_tags = []
261+
validate_stashapi(StashItem.IMAGE, stash)
262+
scraper_id: str = validate_scraper(stash, settings["ScraperID"])
263+
parsed_skip_tags: list[str] = parse_skip_tags(stash, settings["SkipTags"])
231264

232265
#
233266
# MAIN
234267
#
235268

236-
log.info("Starting Bulk Image Scrape Plugin")
237-
log.info(f"settings: {settings=}")
238269
log.info("Querying images from stash")
239-
240270
images: list[dict] = get_all_images(
241271
stash, parsed_skip_tags, settings["ExcludeOrganized"]
242272
)
@@ -246,7 +276,7 @@ def update_image(client: StashInterface, update: dict) -> dict | None:
246276
log.info("No images found with the given filters")
247277
exit(0)
248278
else:
249-
log.info(f"Found {len(images)} images")
279+
log.info(f"Found {total_images} images")
250280

251281

252282
for i, image in enumerate(images, start=1):
@@ -258,7 +288,8 @@ def update_image(client: StashInterface, update: dict) -> dict | None:
258288
valid: bool = scrape_is_valid(scrape)
259289
if not valid:
260290
log.debug(
261-
f"Scraper returned invalid/empty result for image {image['id']} with scraper {scraper_id} - skipping"
291+
f"Scraper returned invalid/empty result for image {image['id']} "
292+
f"with scraper {scraper_id} - skipping"
262293
)
263294
continue
264295

0 commit comments

Comments
 (0)