diff --git a/README.md b/README.md index 6e4b31fd..b4a485d8 100644 --- a/README.md +++ b/README.md @@ -1,393 +1,130 @@ -# UltimaScraper (Python 3.10.1+) -[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) -[![Twitter](https://img.shields.io/twitter/url/https/twitter.com/0xHoarder.svg?style=social&label=Follow%200xHoarder)](https://twitter.com/0xHoarder) -# ![app-token](ultima_scraper/docs/assets/img/64255399-96a86700-cf21-11e9-8c62-87a483f33701.png) -# 27th January 2023 Migration - You can either start the script or create the __settings__ and __user_data__ folders manually. - ~~~~~~~~ - Move config.json file into "__settings__" - RENAME ".profiles" folder to "profiles" and move it into "__user_data__" -# List of things I know that are broken: - Profile and header images aren't downloading - UI (Download Progress Bars to be exact) -# Mandatory Tutorial - -Read the [#FAQ](README.md#faq) at the bottom of this page before submitting a issue. - -## Running the app locally -From the project folder open Windows Powershell/Terminal and run the commands below: - -### Installation commands: ->### Install Poetry ->https://python-poetry.org/docs/ - -Update: ->`python updater.py` - -Start: - ->`poetry run python start_us.py` ---- - -Open and edit: - -`__user_data__/profiles/default/auth.json` - -[auth] - -You have to fill in the following: - -- `{"cookie":"cookie_value"}` -- `{"x_bc":"x-bc_value"}` -- `{"user_agent":"user-agent_value"}` - -Go to www.onlyfans.com and login, open the network debugger, then check the image below on how to get said above auth values. Using Chrome for this process is recommended, as other browsers sometimes have issues producing values that will auth properly. - -![app-token](ultima_scraper/docs/assets/img/3.png) -![app-token](ultima_scraper/docs/assets/img/4.png) - -Your auth config should look similar to this - -![app-token](ultima_scraper/docs/assets/img/5.png) - - - -If you get auth attempt errors, only YOU can fix it unless you're willing to let me into your account so I can see if it's working or not. - -Note: If active is set to False, the script will ignore the profile. - -# USAGE - -`poetry run python start_us.py` - -Enter in inputs as prompted by console. - -# OPTIONAL - -Open: - -`config.json` (Open with a texteditor) - -[settings] - -### profile_directories: - -Where your account information is stored (auth.json). - - Default = ["__user_data__/profiles"] - - If you're going to fill, please remember to use forward ("/") slashes only. - -### download_directories: - -Where downloaded content is stored. - - Default = ["__user_data__/sites"] - - If you're going to fill, please remember to use forward ("/") slashes only. - - You can add multiple directories and the script will automatically rollover to the next directory if the current is full. - -### metadata_directories: - -Where metadata content is stored. - - Default = ["__user_data__/sites"] - - If you're going to fill, please remember to use forward ("/") slashes only. - - Automatic rollover not supported yet. - -### path_formatting: - -Overview for [file_directory_format](#file_directory_format), [filename_format](#filename_format) and [metadata_directory_format](#metadata_directory_format) - - {site_name} = The site you're scraping. - - {first_letter} = First letter of the model you're scraping. - - {post_id} = The posts' ID. - - {media_id} = The media's ID. - - {profile_username} = Your account's username. - - {model_username} = The model's username. - - {api_type} = Posts, Messages, etc. - - {media_type} = Images, Videos, etc. - - {filename} = The media's filename. - - {value} = Value of the content. Paid or Free. - - {text} = The media's text. - - {date} = The post's creation date. - - {ext} = The media's file extension. - - Don't use the text variable. If you do, enjoy emojis in your filepaths and errors lmao. - -### file_directory_format: - -This puts each media file into a folder. - -The list below are unique identifiers that you must include. - -You can choose one or more. - - Default = "{site_name}/{model_username}/{api_type}/{value}/{media_type}" - Default Translated = "OnlyFans/belledelphine/Posts/Free/Images" - - {model_username} = belledelphine - -### filename_format: - -Usage: Format for a filename - -The list below are unique identifiers that you must include. - -You must choose one or more. - - Default = "{filename}.{ext}" - Default Translated = "5fb5a5e4b4ce6c47ce2b4_source.mp4" - - {filename} = 5fb5a5e4b4ce6c47ce2b4_source - {media_id} = 133742069 - -### metadata_directory_format: - -Usage: Filepath for metadata. It's tied with download_directories so ignore metadata_directories in the config. - -The list below are unique identifiers that you must include. - -You must choose one or more. - - Default = "{site_name}/{model_username}/Metadata" - Default Translated = "OnlyFans/belledelphine/Metadata" - - {model_username} = belledelphine - -### text_length: - -Usage: When you use {text} in filename_format, a limit of how many characters can be set by inputting a number. - - Default = "" - Ideal = "50" - Max = "255" - - The ideal is actually 0. - -### video_quality: - -Usage: Select the resolution of the video. - - Default = "source" - 720p = "720" | "720p" - 240p = "240" | "240p" - -### auto_profile_choice: -Types: str|int - -Usage: You can automatically choose which profile you want to scrape. - - Default = "" - - If you've got a profile folder named "user_one", set auto_profile_choice to "user_one" and it will choose it automatically. - -### auto_site_choice: -Types: list|str|bool - -Usage: You can automatically choose which site you want to scrape. - - Default = "" - - Inputs: onlyfans, fansly - -### auto_media_choice: -Types: list|str|bool - -Usage: You can automatically choose which media type you want to scrape. - - Default = "" - - Inputs: All, Images, Videos, etc - - You can automatically choose which type of media you want to scrape. - -### auto_model_choice: -Types: list|str|bool - - Default = false - Inputs: All, username, etc - - If set to true, the script will scrape all the names. - -### auto_api_choice: - - Default = true - - If set to false, you'll be given the option to scrape individual apis. - -### jobs: - (Downloads) - "subscriptions" - This will scrape your standard content - "paid_content" - This will scrape paid content - - If set to false, it won't do the job. - -### export_type: - - Default = "json" - - JSON = "json" - - You can export an archive to different formats (not anymore lol). - -### overwrite_files: - - Default = false - - If set to true, any file with the same name will be redownloaded. - -### date_format: - - Default = "%d-%m-%Y" - - If you live in the USA and you want to use the incorrect format, use the following: - - "%m-%d-%Y" - -### max_threads: - - Default = -1 - - When number is set below 1, it will use all threads. - Set a number higher than 0 to limit threads. - -### min_drive_space: - - Default = 0 - Type: Float - - Space is calculated in GBs. - 0.5 is 500mb, 1 is 1gb,etc. - When a drive goes below minimum drive space, it will move onto the next drive or go into an infinite loop until drive is above the minimum space. - -### webhooks: - - Default = [] - - Supported webhooks: - Discord - - Data is sent whenever you've completely downloaded a model. - You can also put in your own custom url and parse the data. - Need another webhook? Open an issue. - -### exit_on_completion: - - Default = false - - If set to true the scraper run once and exit upon completion, otherwise the scraper will give the option to run again. This is useful if the scraper is being executed by a cron job or another script. - -### infinite_loop: - - Default = true - - If set to false, the script will run once and ask you to input anything to continue. - -### loop_timeout: - - Default = 0 - - When infinite_loop is set to true this will set the time in seconds to pause the loop in between runs. - -### boards: - - Default = [] - Example = ["s", "gif"] - - Input boards names that you want to automatically scrape. - -### ignored_keywords: - - Default = [] - Example = ["ignore", "me"] - - Any words you input, the script will ignore any content that contains these words. - -### ignore_type: - - Default = "" - a = "paid" - b = "free" - - This setting will not include any paid or free accounts in your subscription list. - - Example: "ignore_type": "paid" - - This choice will not include any accounts that you've paid for. - -### export_metadata: - - Default = true - - Set to false if you don't want to save metadata. - -### blacklist_name: - - Default = "" - Example = ["Blacklisted"] - Example = "Blacklisted,alsoforbidden" - - This setting allows you to remove usernames when you choose the "scrap all" option by using lists or targetting specific usernames. - - 1. Go to https://onlyfans.com/my/lists and create a new list; you can name it whatever you want but I called mine "Blacklisted". - Add the list's name to the config. - Example: "blacklist_name": "Blacklisted" - - 2. Or simply put the username of the content creator in the list. - -# Other Tutorials: - ->## Running the app via docker ->>Build and run the image, mounting the appropriate directories: -> ->>`docker build -t only-fans . && docker run -it --rm --name onlyfans -v ${PWD}/__settings__:/usr/src/app/__settings__ -v ${PWD}/__user_data__:/usr/src/app/__user_data__ only-fans` - ->## Running on Linux ->>[Running in Linux](/ultima_scraper/docs/Linux.md) - -# FAQ: - -Before troubleshooting, make sure you're using Python 3.10.1 and the latest commit of the script. - -## Error: Access Denied / Auth Loop - -> Quadrupal check that the cookies and user agent are correct. -> Remove 2FA. - -## I'm getting authed into the wrong account - -> Enjoy the free content. | This has been patched lol. - -## Do OnlyFans or OnlyFans models know I'm using this script? - -> OnlyFans may know that you're using this script, but I try to keep it as anon as possible. - -> Generally, models will not know unless OnlyFans tells them but other than that there is identifiable information in the metadata folder which contains your IP address, so don't share it unless you're using a proxy/vpn or just don't care. - -## Do you collect session information? - -> No. The code is on Github which allows you to audit the codebase yourself. You can use wireshark or any other network analysis program to verify the outgoing connections are respective to the modules you chose. - -## Serious Disclaimer (lmao): - -> OnlyFans is a registered trademark of Fenix International Limited 🤓☝️. -> -> The contributors of this script isn't in any way affiliated with, sponsored by, or endorsed by Fenix International Limited 🤓☝️. -> -> The contributors of this script are not responsible for the end users' actions... 🤓☝️. +# UltimaScraper + +UltimaScraper is a Python-based scraper for various content platforms. It allows users to download content from selected sites based on their configuration. + +## Installation + +1. **Clone the repository:** + ```bash + git clone https://github.com/ultimascraper/ultima-scraper.git + cd ultima-scraper + ``` + +2. **Install dependencies:** + It is recommended to use a virtual environment. + ```bash + python -m venv venv + source venv/bin/activate # On Windows use `venv\Scripts\activate` + ``` + Install the required packages: + ```bash + pip install -r requirements.txt + ``` + +## Running the Scraper + +To run the scraper, execute the `start_us.py` script: + +```bash +python start_us.py +``` + +You will be prompted to choose the site you want to scrape if not configured for automatic choice. + +## Configuration + +The scraper's behavior is controlled by a `config.json` file located in the `ultima_scraper_settings/settings` directory. If this file or directory structure does not exist, the scraper will likely create it with default values on the first run. + +### Main Configuration Options (`config.json`): + +The `config.json` file typically contains settings related to: + +* **`settings`**: Global settings for the scraper. + * **`auto_site_choice`**: Set to a specific site name (e.g., "OnlyFans", "Fansly") to automatically select that site, or leave empty/null to be prompted. + * **`infinite_loop`**: `true` to run the scraper continuously, `false` to run once. + * **`loop_timeout`**: Number of seconds to wait between runs if `infinite_loop` is `true`. + * **`exit_on_completion`**: `true` to exit the script after the current scraping job is done, `false` to wait for user input or loop. + * **`proxies`**: Configure proxy settings if needed. + * **`webhooks`**: Configure webhook URLs for notifications. + * **Other settings**: Refer to `ultima_scraper_api.classes.make_settings.GlobalSettings` for more details. + +* **`supported`**: A dictionary listing supported sites and their specific configurations. For each site (e.g., "OnlyFans", "Fansly"): + * **`auth`**: Authentication details. + * **`username`**: Your username for the site. + * **`password`**: Your password for the site (handle with care). + * **`sess`**: Session cookie. + * **`user_agent`**: User agent string for requests. + * **`auth_id`**: Authentication ID. + * **Other auth-related fields**: Specific to the site's authentication mechanism. + * **`auto_profile_choice`**: Automatically select profiles if set. + * **`auto_model_choice`**: Automatically select models/performers to scrape. + * **`auto_api_choice`**: Automatically select content types to scrape (e.g., "Posts", "Messages"). + * **`auto_media_choice`**: Automatically select media types to download (e.g., "Images", "Videos"). + * **Other site-specific settings**: Refer to `ultima_scraper_api.classes.make_settings.SiteSettings` for more details. + +**Note:** You will need to manually create or edit the `ultima_scraper_settings/settings/config.json` file with your authentication details and preferences. The scraper might guide you through creating parts of this configuration or authenticating interactively for the first time. + +### Profile-Specific Authentication + +For sites like OnlyFans and Fansly, authentication details are often stored in a user-specific `auth.json` file within the `ultima_scraper_settings/profiles/[site_name]/[username]/auth.json` directory. The scraper will use these files to log in. + +## Usage Examples + +1. **First-time run / Interactive setup:** + If `config.json` is missing or `auto_site_choice` is not set, running `python start_us.py` will typically prompt you to select a site. For sites requiring login, it might open a browser or ask for credentials to perform the initial authentication. + +2. **Automatic scraping of a specific site:** + Modify `ultima_scraper_settings/settings/config.json`: + ```json + { + "settings": { + "auto_site_choice": "OnlyFans", + "infinite_loop": false, + "exit_on_completion": true + // ... other global settings + }, + "supported": { + "OnlyFans": { + // ... your OnlyFans auth and site settings ... + "auto_model_choice": "all" // Example: scrape all subscribed models + }, + "Fansly": { + // ... your Fansly auth and site settings ... + } + } + } + ``` + Then run: + ```bash + python start_us.py + ``` + This will automatically start scraping OnlyFans content based on your configuration and then exit. + +3. **Continuous scraping with a pause:** + Modify `ultima_scraper_settings/settings/config.json`: + ```json + { + "settings": { + "auto_site_choice": "Fansly", + "infinite_loop": true, + "loop_timeout": 3600, // Pause for 1 hour (3600 seconds) + "exit_on_completion": false + // ... other global settings + }, + "supported": { + "OnlyFans": { + // ... + }, + "Fansly": { + // ... your Fansly auth and site settings ... + "auto_api_choice": ["Posts", "Messages"], // Example: scrape posts and messages + "auto_model_choice": ["model_username1", "model_username2"] // Example: scrape specific models + } + } + } + ``` + Run the scraper. It will scrape Fansly, wait for an hour, and then repeat. + +## Disclaimer + +This tool is intended for personal use and for downloading content that you have legitimate access to. Respect the terms of service of the platforms you are using this tool with. The developers are not responsible for misuse of this tool. diff --git a/start_us.py b/start_us.py index 8fd75132..b6e82a28 100644 --- a/start_us.py +++ b/start_us.py @@ -12,51 +12,72 @@ main_test.check_start_up() +# Main execution block if __name__ == "__main__": + # Import necessary modules import ultima_scraper_api.apis.api_helper as api_helper import ultima_scraper_api.helpers.main_helper as main_helper from ultima_scraper_collection.managers.filesystem_manager import FilesystemManager from ultima_scraper.ultima_scraper import UltimaScraper + # Pass parsed arguments to API helper api_helper.parsed_args = parsed_args + # Initialize FilesystemManager fsm = FilesystemManager() + # Define path to config file config_path = fsm.settings_directory.joinpath("config.json") + # Load configuration and global settings config, _updated = main_helper.get_config(config_path) global_settings = config.settings + # Extract relevant settings exit_on_completion = global_settings.exit_on_completion infinite_loop = global_settings.infinite_loop loop_timeout = global_settings.loop_timeout domain = global_settings.auto_site_choice json_sites = config.supported + # Choose modules based on domain and supported sites string, site_names_ = main_helper.module_chooser(domain, json_sites.__dict__) + # Define available site names site_name_literals = Literal["OnlyFans", "Fansly"] site_names: list[site_name_literals] = list(get_args(site_name_literals)) + # # Configure logging (currently commented out) # logging.basicConfig(level=logging.DEBUG, format="%(message)s") + # Initialize UltimaScraper with global settings USR = UltimaScraper(global_settings) + # Asynchronous function to run the scraper async def main(): + # Main loop for scraping while True: + # Create site selection options site_options = await USR.option_manager.create_option( site_names, "sites", domain ) + # Iterate through selected sites for site_name in site_options.final_choices: + # Start the scraper for the current site _api = await USR.start( config, site_name, ) + # Handle post-scraping actions based on settings if exit_on_completion: + # Close API pools if exiting on completion for datascraper in USR.datascraper_manager.datascrapers: await datascraper.api.close_pools() await USR.ui_manager.display("Now exiting") break elif not infinite_loop: + # Wait for user input if not in infinite loop await USR.ui_manager.display("Input anything to continue") input() elif loop_timeout: + # Pause if loop timeout is set await USR.ui_manager.display( f"Pausing scraper for {loop_timeout} seconds" ) await asyncio.sleep(float(loop_timeout)) + # Run the main asynchronous function asyncio.run(main()) diff --git a/ultima_scraper/ultima_scraper.py b/ultima_scraper/ultima_scraper.py index d3c847d5..8640f167 100644 --- a/ultima_scraper/ultima_scraper.py +++ b/ultima_scraper/ultima_scraper.py @@ -25,11 +25,19 @@ user_types = ultima_scraper_api.user_types +# Main class for the UltimaScraper application class UltimaScraper: + # Initializes the UltimaScraper class + # Args: + # settings (Settings, optional): Application settings. Defaults to Settings(). def __init__(self, settings: Settings = Settings()) -> None: + # Manages user interface interactions self.ui_manager = UiManager() + # Manages user options and choices self.option_manager = OptionManager() + # Manages datascapers for different sites self.datascraper_manager = DataScraperManager() + # Stores application settings self.settings = settings async def start( @@ -38,16 +46,26 @@ async def start( site_name: str, api_: api_types | None = None, ): + # Starts the scraping process for a given site. + # Args: + # config (Config): Configuration for the scraper. + # site_name (str): Name of the site to scrape (e.g., "onlyfans", "fansly"). + # api_ (api_types | None, optional): Pre-selected API object. Defaults to None. archive_time = timeit.default_timer() + # Select API if not provided if not api_: api_ = ultima_scraper_api.select_api(site_name, config) + # Select datascraper for the chosen API datascraper = self.datascraper_manager.select_datascraper( api_, self.option_manager ) if datascraper: + # Activate directory manager for the datascraper datascraper.filesystem_manager.activate_directory_manager(api_) + # Start the datascraper await self.start_datascraper(datascraper) + # Calculate and display the total time taken for archiving stop_time = str(int(timeit.default_timer() - archive_time) / 60)[:4] await self.ui_manager.display(f"Archive Completed in {stop_time} Minutes") return api_ @@ -56,23 +74,32 @@ async def start_datascraper( self, datascraper: m_onlyfans.OnlyFansDataScraper | m_fansly.FanslyDataScraper, ): + # Starts the datascraping process for a specific datascraper. + # This method handles authentication, profile processing, and job assignment. + # Args: + # datascraper (m_onlyfans.OnlyFansDataScraper | m_fansly.FanslyDataScraper): The datascraper to use. api = datascraper.api webhooks = self.settings.webhooks + # Create directories if a directory manager exists if datascraper.filesystem_manager.directory_manager: datascraper.filesystem_manager.directory_manager.create_directories() global_settings = api.get_global_settings() site_settings = api.get_site_settings() + # Return if global or site settings are missing if not (global_settings and site_settings): return + # Process user profiles await self.process_profiles(api, global_settings) scrapable_users: list[user_types] = [] auth_count = 0 + # Create profile options based on auto_profile_choice setting profile_options = await self.option_manager.create_option( api.auths, "profiles", site_settings.auto_profile_choice ) api.auths = profile_options.final_choices # await dashboard_controller.update_main_table(api) identifiers = [] + # Create subscription options if auto_model_choice is enabled if site_settings.auto_model_choice: subscription_options = await self.option_manager.create_option( scrapable_users, "subscriptions", site_settings.auto_model_choice @@ -80,15 +107,19 @@ async def start_datascraper( if not subscription_options.scrape_all(): identifiers = subscription_options.return_auto_choice() self.option_manager.performer_options = subscription_options + # Iterate through authentications for auth in api.auths: auth: auth_types = auth + # Skip if auth details are missing if not auth.get_auth_details(): continue setup = False + # Perform account setup for the datascraper setup, _subscriptions = await datascraper.account_setup( auth, datascraper, site_settings, identifiers ) if not setup: + # Process webhooks for failed authentication if webhooks: await main_helper.process_webhooks( api, "auth_webhook", "failed", global_settings @@ -102,11 +133,13 @@ async def start_datascraper( # main_helper.export_json(auth_details, user_auth_filepath) continue auth_count += 1 + # Extend scrapable users list scrapable_users.extend(await auth.get_scrapable_users()) + # Process webhooks for successful authentication await main_helper.process_webhooks( api, "auth_webhook", "succeeded", global_settings ) - # Do stuff with authed user + # Handle DRM if not already set if not auth.drm: device_client_id_blob_path = ( datascraper.filesystem_manager.devices_directory.joinpath( @@ -127,20 +160,27 @@ async def start_datascraper( device_private_key_path, auth, ) + # Remove invalid authentications await api.remove_invalid_auths() + # Create subscription options subscription_options = await self.option_manager.create_option( scrapable_users, "subscriptions", site_settings.auto_model_choice ) self.option_manager.subscription_options = subscription_options + # Configure datascraper jobs and get the final list of users for jobs final_job_user_list = await datascraper.configure_datascraper_jobs() + # Assign jobs to users await self.assign_jobs(final_job_user_list) + # Process all assigned jobs await datascraper.datascraper.api.job_manager.process_jobs() + # # Optional: Delete empty directories after processing # if global_settings.helpers.delete_empty_directories: # for job_user in job_user_list: # await main_helper.delete_empty_directories( # job_user.directory_manager.user.download_directory, # datascraper.api.filesystem_manager, # ) + # Process webhooks for download completion if webhooks: await main_helper.process_webhooks( api, "download_webhook", "succeeded", global_settings @@ -151,31 +191,46 @@ async def process_profiles( api: api_types, global_settings: make_settings.Settings, ): + # Processes user profiles found in the profile directory. + # It reads authentication details from JSON files, logs in, and updates auth details. + # Args: + # api (api_types): The API object for the site. + # global_settings (make_settings.Settings): Global application settings. from ultima_scraper_collection.managers.filesystem_manager import ( FilesystemManager, ) site_name = api.site_name filesystem_manager = FilesystemManager() + # Construct profile directory path profile_directory = filesystem_manager.profiles_directory.joinpath(site_name) profile_directory.mkdir(parents=True, exist_ok=True) + # Get list of user profile directories temp_users = list(filter(lambda x: x.is_dir(), profile_directory.iterdir())) temp_users = filesystem_manager.remove_mandatory_files(temp_users) + # Iterate through user profiles for user_profile in temp_users: user_auth_filepath = user_profile.joinpath("auth.json") temp_json_auth = main_helper.import_json(user_auth_filepath) json_auth = temp_json_auth.get("auth", {}) + # Skip if profile is not active if not json_auth.get("active", None): continue json_auth["username"] = user_profile.name + # Attempt to login with stored auth details authed = await api.login(json_auth) + # Add proxies to session manager authed.session_manager.add_proxies(global_settings.proxies) datas = {"auth": authed.get_auth_details().export()} + # Export updated auth details if datas: main_helper.export_json(datas, user_auth_filepath) return api async def assign_jobs(self, user_list: set[user_types]): + # Assigns scraping and downloading jobs to a list of users. + # Args: + # user_list (set[user_types]): A set of user objects to assign jobs to. datascraper = self.datascraper_manager.active_datascraper if not datascraper: return @@ -188,23 +243,29 @@ async def assign_jobs(self, user_list: set[user_types]): media_types = datascraper.api.MediaTypes() media_types_keys = media_types.get_keys() + # Iterate through each user to assign jobs for user in user_list: + # Create directory manager and format directories for the user await filesystem_manager.create_directory_manager(datascraper.api, user) await filesystem_manager.format_directories(user) + # Initialize metadata manager and process legacy metadata metadata_manager = MetadataManager(user, filesystem_manager) await metadata_manager.process_legacy_metadata() datascraper.metadata_manager_users[user.id] = metadata_manager local_jobs: list[CustomJob] = [] + # Determine API choices for content scraping auto_api_choice = ( site_settings.auto_api_choice if not user.scrape_whitelist else user.scrape_whitelist ) + # Create content scraping options content_options = await self.option_manager.create_option( content_types_keys, "contents", auto_api_choice ) + # Create scraping jobs jobs = JBM.create_jobs( "Scrape", content_options.final_choices, @@ -212,6 +273,7 @@ async def assign_jobs(self, user_list: set[user_types]): [user, metadata_manager], ) local_jobs.extend(jobs) + # Create downloading jobs jobs = JBM.create_jobs( "Download", content_options.final_choices, @@ -222,11 +284,13 @@ async def assign_jobs(self, user_list: set[user_types]): user.jobs.extend(local_jobs) + # Create media type options for downloads media_options = await self.option_manager.create_option( media_types_keys, "medias", site_settings.auto_media_choice ) JBM.add_media_type_to_jobs(media_options.final_choices) + # Add jobs to the job manager's queue for local_job in local_jobs: JBM.queue.put_nowait(local_job) await asyncio.sleep(0)