From 96b64b6072cfb3463b4a86cbab8c1c298a81dd88 Mon Sep 17 00:00:00 2001 From: Shahm Najeeb Date: Sun, 27 Jul 2025 15:38:56 +0400 Subject: [PATCH 01/12] Added new flag `--usage` that gives a rundown of users flags history Refactor Flag class and improved functionality and accuracy tracking, and took the `Match.generate_summary_and_graph()` out of beta after minor patching Updated plans for the future 1/3 beta's out! --- CODE/Logicytics.py | 8 +- CODE/logicytics/Flag.py | 639 ++++++++++++++++++++-------------------- PLANS.md | 6 +- 3 files changed, 328 insertions(+), 325 deletions(-) diff --git a/CODE/Logicytics.py b/CODE/Logicytics.py index bd770446..26195902 100644 --- a/CODE/Logicytics.py +++ b/CODE/Logicytics.py @@ -402,12 +402,8 @@ def handle_special_actions(): input("Press Enter to exit...") exit(0) - if ACTION == "restore": - log.warning( - "Sorry, this feature is yet to be implemented. You can manually Restore your backups, We will open " - "the location for you" - ) - file_management.open_file("../ACCESS/BACKUP/") + if ACTION == "usage": + flag.Match.generate_summary_and_graph() input("Press Enter to exit...") exit(1) diff --git a/CODE/logicytics/Flag.py b/CODE/logicytics/Flag.py index 9e362160..f862cfcd 100644 --- a/CODE/logicytics/Flag.py +++ b/CODE/logicytics/Flag.py @@ -28,345 +28,345 @@ raise ValueError("accuracy_min must be between 1 and 99") -class _Match: - @staticmethod - def __get_sim(user_input: str, all_descriptions: list[str]) -> list[float]: - """ - Compute cosine similarity between user input and flag descriptions using a Sentence Transformer model. - - This method encodes the user input and historical flag descriptions into embeddings and calculates their cosine similarities. It handles model loading, logging configuration, and error handling for the embedding process. - - Parameters: - user_input (str): The current user input to match against historical descriptions - all_descriptions (list[str]): A list of historical flag descriptions to compare - - Returns: - list[float]: A list of similarity scores between the user input and each historical description - - Raises: - SystemExit: If there is an error loading the specified Sentence Transformer model - - Notes: - - Uses the model specified in the configuration file - - Configures logging based on the global DEBUG_MODE setting - - Converts embeddings to tensors for efficient similarity computation - """ - # Encode the current user input and historical inputs - from sentence_transformers import SentenceTransformer, util - import logging # Suppress logging messages from Sentence Transformer due to verbosity - # Set the logging level based on the debug mode, either DEBUG or ERROR (aka only important messages) - if DEBUG_MODE: - logging.getLogger("sentence_transformers").setLevel(logging.DEBUG) - else: - logging.getLogger("sentence_transformers").setLevel(logging.ERROR) - - try: - MODEL = SentenceTransformer(config.get("Flag Settings", "model_to_use")) - except Exception as e: - log.critical(f"Error: {e}") - log.error("Please check the model name in the config file.") - log.error(f"Model name {config.get('Flag Settings', 'model_to_use')} may not be valid.") - exit(1) +class Flag: + class Match: + @staticmethod + def __get_sim(user_input: str, all_descriptions: list[str]) -> list[float]: + """ + Compute cosine similarity between user input and flag descriptions using a Sentence Transformer model. - user_embedding = MODEL.encode(user_input, convert_to_tensor=True, show_progress_bar=DEBUG_MODE) - historical_embeddings = MODEL.encode(all_descriptions, convert_to_tensor=True, show_progress_bar=DEBUG_MODE) + This method encodes the user input and historical flag descriptions into embeddings and calculates their cosine similarities. It handles model loading, logging configuration, and error handling for the embedding process. - # Compute cosine similarities - similarities = util.pytorch_cos_sim(user_embedding, historical_embeddings).squeeze(0).tolist() - return similarities + Parameters: + user_input (str): The current user input to match against historical descriptions + all_descriptions (list[str]): A list of historical flag descriptions to compare - @classmethod - def __suggest_flags_based_on_history(cls, user_input: str) -> list[str]: - """ - Suggests flags based on historical data and similarity to the current input. - - This method analyzes historical user interactions to recommend relevant flags when preferences for saving history are enabled. It uses semantic similarity to find the most contextually related flags from past interactions. - - Parameters: - user_input (str): The current input for which suggestions are needed. - - Returns: - list[str]: A list of suggested flags derived from historical interactions, filtered by similarity threshold. - - Notes: - - Returns an empty list if history saving is disabled or no interaction history exists - - Uses cosine similarity with a minimum threshold of 0.3 to filter suggestions - - Limits suggestions to top 3 most similar historical inputs - - Removes duplicate flag suggestions - """ - if not SAVE_PREFERENCES: - return [] - history_data = cls.load_history() - if not history_data or 'interactions' not in history_data: - return [] + Returns: + list[float]: A list of similarity scores between the user input and each historical description - interactions = history_data['interactions'] - all_descriptions = [] - all_flags = [] + Raises: + SystemExit: If there is an error loading the specified Sentence Transformer model - # Combine all flags and their respective user inputs - for flag, details in interactions.items(): - all_flags.extend([flag] * len(details)) - all_descriptions.extend([detail['user_input'] for detail in details]) + Notes: + - Uses the model specified in the configuration file + - Configures logging based on the global DEBUG_MODE setting + - Converts embeddings to tensors for efficient similarity computation + """ + # Encode the current user input and historical inputs + from sentence_transformers import SentenceTransformer, util + import logging # Suppress logging messages from Sentence Transformer due to verbosity + # Set the logging level based on the debug mode, either DEBUG or ERROR (aka only important messages) + if DEBUG_MODE: + logging.getLogger("sentence_transformers").setLevel(logging.DEBUG) + else: + logging.getLogger("sentence_transformers").setLevel(logging.ERROR) - # Encode the current user input and historical inputs - # Compute cosine similarities - similarities = cls.__get_sim(user_input, all_descriptions) + try: + MODEL = SentenceTransformer(config.get("Flag Settings", "model_to_use")) + except Exception as e: + log.critical(f"Error: {e}") + log.error("Please check the model name in the config file.") + log.error(f"Model name {config.get('Flag Settings', 'model_to_use')} may not be valid.") + exit(1) - # Find the top 3 most similar historical inputs - top_indices = sorted(range(len(similarities)), key=lambda i: similarities[i], reverse=True)[:3] - suggested_flags = [all_flags[i] for i in top_indices if similarities[i] > 0.3] + user_embedding = MODEL.encode(user_input, convert_to_tensor=True, show_progress_bar=DEBUG_MODE) + historical_embeddings = MODEL.encode(all_descriptions, convert_to_tensor=True, show_progress_bar=DEBUG_MODE) - # Remove duplicates and return suggestions - return list(dict.fromkeys(suggested_flags)) + # Compute cosine similarities + similarities = util.pytorch_cos_sim(user_embedding, historical_embeddings).squeeze(0).tolist() + return similarities - @classmethod - def _generate_summary_and_graph(cls): - """ - Generates a comprehensive summary and visualization of user interaction history with command-line flags. - - This method processes historical interaction data, computes statistical insights, and creates a bar graph representing flag usage frequency. It performs the following key tasks: - - Loads historical interaction data from a compressed file - - Calculates and prints detailed statistics for each flag - - Generates a horizontal bar graph of flag usage counts - - Saves the graph visualization to a PNG file - - Parameters: - cls (_Match): The class instance containing historical data methods - - Raises: - SystemExit: If no history data file is found - FileNotFoundError: If unable to save the graph in default locations - - Side Effects: - - Prints detailed interaction summary to console - - Saves flag usage graph as a PNG image - - Uses matplotlib to create visualization - - Notes: - - Currently in beta stage of development - - Requires matplotlib for graph generation - - Attempts to save graph in multiple predefined directory paths - """ - # TODO Yet in beta - v3.6.0 - # Load the decompressed history data using the load_history function - import matplotlib.pyplot as plt - - if not os.path.exists(HISTORY_FILE): - exit("No history data found.") - - history_data = cls.load_history() - - # Extract interactions and flag usage count - interactions = history_data['interactions'] - flags_usage = history_data['flags_usage'] - - # Summary of flag usage - total_interactions = sum(flags_usage.values()) - - log.info("User Interaction Summary:-\n-------------------------------------------------") - for flag, details in interactions.items(): - log.info(f"\nFlag: {flag}") - - accuracies = [detail['accuracy'] for detail in details] - device_names = [detail['device_name'] for detail in details] - user_inputs = [detail['user_input'] for detail in details] - - average_accuracy = sum(accuracies) / len(accuracies) - most_common_device = Counter(device_names).most_common(1)[0][0] - average_user_input = Counter(user_inputs).most_common(1)[0][0] - - print(f" Average Accuracy: {average_accuracy:.2f}%") - print(f" Most Common Device Name: {most_common_device}") - print(f" Most Common User Input: {average_user_input}") - - # Print the summary to the console - log.info(f"\n\nTotal Interactions with the match flag feature: {total_interactions}") - log.info("\nFlag Usage Summary:") - for flag, count in flags_usage.items(): - print(f" {flag}: {count} times") - - # Generate the graph for flag usage - flags = list(flags_usage.keys()) - counts = list(flags_usage.values()) - - plt.figure(figsize=(10, 6)) - plt.barh(flags, counts, color='skyblue') - plt.xlabel('Usage Count') - plt.title('Flag Usage Frequency') - plt.gca().invert_yaxis() # Invert y-axis for better readability - plt.subplots_adjust(left=0.2, right=0.8, top=0.9, bottom=0.1) # Adjust layout - - # Save and display the graph - try: - plt.savefig('../ACCESS/DATA/Flag_usage_summary.png') - log.info("\nFlag Usage Summary Graph saved to 'ACCESS/DATA/Flag_usage_summary.png'") - except FileNotFoundError: + @classmethod + def __suggest_flags_based_on_history(cls, user_input: str) -> list[str]: + """ + Suggests flags based on historical data and similarity to the current input. + + This method analyzes historical user interactions to recommend relevant flags when preferences for saving history are enabled. It uses semantic similarity to find the most contextually related flags from past interactions. + + Parameters: + user_input (str): The current input for which suggestions are needed. + + Returns: + list[str]: A list of suggested flags derived from historical interactions, filtered by similarity threshold. + + Notes: + - Returns an empty list if history saving is disabled or no interaction history exists + - Uses cosine similarity with a minimum threshold of 0.3 to filter suggestions + - Limits suggestions to top 3 most similar historical inputs + - Removes duplicate flag suggestions + """ + if not SAVE_PREFERENCES: + return [] + history_data = cls.load_history() + if not history_data or 'interactions' not in history_data: + return [] + + interactions = history_data['interactions'] + all_descriptions = [] + all_flags = [] + + # Combine all flags and their respective user inputs + for flag, details in interactions.items(): + all_flags.extend([flag] * len(details)) + all_descriptions.extend([detail['user_input'] for detail in details]) + + # Encode the current user input and historical inputs + # Compute cosine similarities + similarities = cls.__get_sim(user_input, all_descriptions) + + # Find the top 3 most similar historical inputs + top_indices = sorted(range(len(similarities)), key=lambda i: similarities[i], reverse=True)[:3] + suggested_flags = [all_flags[i] for i in top_indices if similarities[i] > 0.3] + + # Remove duplicates and return suggestions + return list(dict.fromkeys(suggested_flags)) + + @classmethod + def generate_summary_and_graph(cls): + """ + Generates a comprehensive summary and visualization of user interaction history with command-line flags. + + This method processes historical interaction data, computes statistical insights, and creates a bar graph representing flag usage frequency. It performs the following key tasks: + - Loads historical interaction data from a compressed file + - Calculates and prints detailed statistics for each flag + - Generates a horizontal bar graph of flag usage counts + - Saves the graph visualization to a PNG file + + Parameters: + cls (_Match): The class instance containing historical data methods + + Raises: + SystemExit: If no history data file is found + FileNotFoundError: If unable to save the graph in default locations + + Side Effects: + - Prints detailed interaction summary to console + - Saves flag usage graph as a PNG image + - Uses matplotlib to create visualization + + Notes: + - Currently in beta stage of development + - Requires matplotlib for graph generation + - Attempts to save graph in multiple predefined directory paths + """ + # Load the decompressed history data using the load_history function + import matplotlib.pyplot as plt + + if not os.path.exists(HISTORY_FILE): + exit("No history data found.") + + history_data = cls.load_history() + + # Extract interactions and flag usage count + interactions = history_data['interactions'] + flags_usage = history_data['flags_usage'] + + # Summary of flag usage + total_interactions = sum(flags_usage.values()) + + log.info( + "--------------------------------------------------\n Flag guessing statistics:\n --------------------------------------------------") + + for flag, details in interactions.items(): + accuracies = [detail['accuracy'] for detail in details] + device_names = [detail['device_name'] for detail in details] + user_inputs = [detail['user_input'] for detail in details] + + average_accuracy = sum(accuracies) / len(accuracies) + most_common_device = Counter(device_names).most_common(1)[0][0] + average_user_input = Counter(user_inputs).most_common(1)[0][0] + log.info(f"""Flag: {flag} + Average Accuracy: {average_accuracy:.2f}% + Most Common Device Name: {most_common_device} + Most Common User Input: {average_user_input}""") + + # Print the summary to the console + log.info( + "--------------------------------------------------\n User Interaction Summary:\n --------------------------------------------------") + + log.info(f"Total Interactions with the match flag feature: {total_interactions}") + flag_usage_summary = "\n".join([f" {flag}: {count} times" for flag, count in flags_usage.items()]) + log.info(f"Flag Usage Summary:\n{flag_usage_summary}") + + # Generate the graph for flag usage + flags = list(flags_usage.keys()) + counts = list(flags_usage.values()) + + plt.figure(figsize=(10, 6)) + plt.barh(flags, counts, color='skyblue') + plt.xlabel('Usage Count') + plt.title('Flag Usage Frequency') + plt.gca().invert_yaxis() # Invert y-axis for better readability + plt.subplots_adjust(left=0.2, right=0.8, top=0.9, bottom=0.1) # Adjust layout + + # Save and display the graph try: - plt.savefig('../../ACCESS/DATA/Flag_usage_summary.png') - log.info("\nFlag Usage Summary Graph saved to 'ACCESS/DATA/Flag_usage_summary.png'") + plt.savefig('../ACCESS/DATA/Flag_usage_summary.png') + log.info("Flag Usage Summary Graph saved to 'ACCESS/DATA/Flag_usage_summary.png'") except FileNotFoundError: - plt.savefig('Flag_usage_summary.png') - log.info("\nFlag Usage Summary Graph saved in current working directory as 'Flag_usage_summary.png'") + try: + plt.savefig('../../ACCESS/DATA/Flag_usage_summary.png') + log.info("Flag Usage Summary Graph saved to 'ACCESS/DATA/Flag_usage_summary.png'") + except FileNotFoundError: + plt.savefig('Flag_usage_summary.png') + log.info("Flag Usage Summary Graph saved in current working directory as 'Flag_usage_summary.png'") + + @staticmethod + def load_history() -> dict: + """ + Load user interaction history from a gzipped JSON file. - @staticmethod - def load_history() -> dict: - """ - Load user interaction history from a gzipped JSON file. - - This method attempts to read and parse historical interaction data from a compressed JSON file. If the file is not found, it returns an empty history structure with an empty interactions dictionary and a zero-initialized flags usage counter. - - Returns: - dict: A dictionary containing: - - 'interactions': A dictionary of past user interactions - - 'flags_usage': A Counter object tracking flag usage frequencies - - Raises: - json.JSONDecodeError: If the JSON file is malformed - gzip.BadGzipFile: If the gzipped file is corrupted - """ - try: - with gzip.open(HISTORY_FILE, 'rt', encoding='utf-8') as f: # Use 'rt' mode for text read - return json.load(f) - except FileNotFoundError: - return {'interactions': {}, 'flags_usage': Counter()} + This method attempts to read and parse historical interaction data from a compressed JSON file. If the file is not found, it returns an empty history structure with an empty interactions dictionary and a zero-initialized flags usage counter. - @staticmethod - def save_history(history_data: dict): - """ - Save user interaction history to a gzipped JSON file. - - This method writes the user history to a compressed JSON file only if saving preferences are enabled. - The history is saved with an indentation of 4 spaces for readability. - - Parameters: - history_data (dict[str, any]): A dictionary containing user interaction history data to be saved. - - Notes: - - Saves only if SAVE_PREFERENCES is True - - Uses gzip compression to reduce file size - - Writes in UTF-8 encoding - - Indents JSON for human-readable format - """ - if SAVE_PREFERENCES: - with gzip.open(HISTORY_FILE, 'wt', encoding='utf-8') as f: # Use 'wt' mode for text write - json.dump(history_data, f, indent=4) + Returns: + dict: A dictionary containing: + - 'interactions': A dictionary of past user interactions + - 'flags_usage': A Counter object tracking flag usage frequencies - @classmethod - def update_history(cls, user_input: str, matched_flag: str, accuracy: float): - """ - Update the user interaction history with details of a matched flag. - - This method records user interactions with flags, including timestamp, input, match accuracy, - and device information. It only updates history if save preferences are enabled. - - Parameters: - user_input (str): The original input text provided by the user. - matched_flag (str): The flag that was successfully matched to the user input. - accuracy (float): The similarity/match accuracy score for the flag. - - Side Effects: - - Modifies the history JSON file by adding a new interaction entry - - Increments the usage count for the matched flag - - Requires write access to the history file - - Notes: - - Skips history update if SAVE_PREFERENCES is False - - Creates new flag entries in history if they do not exist - - Uses current timestamp and logged-in user's device name - """ - if not SAVE_PREFERENCES: - return - history_data = cls.load_history() - matched_flag = matched_flag.lstrip('-') - - # Ensure that interactions is a dictionary (not a list) - if not isinstance(history_data['interactions'], dict): - history_data['interactions'] = {} - - # Create a new interaction dictionary - interaction = { - 'timestamp': datetime.now().strftime('%H:%M:%S - %d/%m/%Y'), - 'user_input': user_input, - 'accuracy': accuracy, - 'device_name': os.getlogin() - } + Raises: + json.JSONDecodeError: If the JSON file is malformed + gzip.BadGzipFile: If the gzipped file is corrupted + """ + try: + with gzip.open(HISTORY_FILE, 'rt', encoding='utf-8') as f: # Use 'rt' mode for text read + return json.load(f) + except FileNotFoundError: + return {'interactions': {}, 'flags_usage': Counter()} - # Ensure the flag exists in the interactions dictionary - if matched_flag not in history_data['interactions']: - history_data['interactions'][matched_flag] = [] + @staticmethod + def save_history(history_data: dict): + """ + Save user interaction history to a gzipped JSON file. - # Append the new interaction to the flag's list of interactions - history_data['interactions'][matched_flag].append(interaction) + This method writes the user history to a compressed JSON file only if saving preferences are enabled. + The history is saved with an indentation of 4 spaces for readability. - # Ensure the flag exists in the flags_usage counter and increment it - if matched_flag not in history_data['flags_usage']: - history_data['flags_usage'][matched_flag] = 0 - history_data['flags_usage'][matched_flag] += 1 + Parameters: + history_data (dict[str, any]): A dictionary containing user interaction history data to be saved. - cls.save_history(history_data) + Notes: + - Saves only if SAVE_PREFERENCES is True + - Uses gzip compression to reduce file size + - Writes in UTF-8 encoding + - Indents JSON for human-readable format + """ + if SAVE_PREFERENCES: + with gzip.open(HISTORY_FILE, 'wt', encoding='utf-8') as f: # Use 'wt' mode for text write + json.dump(history_data, f, indent=4) - @classmethod - def flag(cls, user_input: str, flags: list[str], flag_description: list[str]) -> tuple[str, float]: - """ - Matches user input to flag descriptions using advanced semantic similarity. - - Computes the best matching flag based on cosine similarity between the user input and flag descriptions. - Handles matching with a minimum accuracy threshold and provides flag suggestions from historical data - if no direct match is found. - - Parameters: - user_input (str): The input string to match against available flags. - flags (list[str]): List of available command flags. - flag_description (list[str]): Corresponding descriptions for each flag. - - Returns: - tuple[str, float]: A tuple containing: - - The best matched flag (or 'Nothing matched') - - Accuracy percentage of the match (0.0-100.0) - - Raises: - ValueError: If the number of flags and descriptions do not match. - - Side Effects: - - Updates user interaction history - - Prints flag suggestions if no direct match is found - - Requires a global MIN_ACCURACY_THRESHOLD to be defined - - Example: - matched_flag, accuracy = Flag.flag("show help", - ["-h", "--verbose"], - ["Display help", "Enable verbose output"]) - """ - if len(flags) != len(flag_description): - raise ValueError("flags and flag_description lists must be of the same length") + @classmethod + def update_history(cls, user_input: str, matched_flag: str, accuracy: float): + """ + Update the user interaction history with details of a matched flag. - # Combine flags and descriptions for better matching context - combined_descriptions = [f"{flag} {desc}" for flag, desc in zip(flags, flag_description)] + This method records user interactions with flags, including timestamp, input, match accuracy, + and device information. It only updates history if save preferences are enabled. - # Encode user input and all descriptions - # Compute cosine similarities - similarities = cls.__get_sim(user_input, combined_descriptions) + Parameters: + user_input (str): The original input text provided by the user. + matched_flag (str): The flag that was successfully matched to the user input. + accuracy (float): The similarity/match accuracy score for the flag. - # Find the best match - best_index = max(range(len(similarities)), key=lambda i: similarities[i]) - best_accuracy = similarities[best_index] * 100 - best_match = flags[best_index] if best_accuracy > MIN_ACCURACY_THRESHOLD else "Nothing matched" + Side Effects: + - Modifies the history JSON file by adding a new interaction entry + - Increments the usage count for the matched flag + - Requires write access to the history file + + Notes: + - Skips history update if SAVE_PREFERENCES is False + - Creates new flag entries in history if they do not exist + - Uses current timestamp and logged-in user's device name + """ + if not SAVE_PREFERENCES: + return + history_data = cls.load_history() + matched_flag = matched_flag.lstrip('-') + + # Ensure that interactions is a dictionary (not a list) + if not isinstance(history_data['interactions'], dict): + history_data['interactions'] = {} + + # Create a new interaction dictionary + interaction = { + 'timestamp': datetime.now().strftime('%H:%M:%S - %d/%m/%Y'), + 'user_input': user_input, + 'accuracy': accuracy, + 'device_name': os.getlogin() + } + + # Ensure the flag exists in the interactions dictionary + if matched_flag not in history_data['interactions']: + history_data['interactions'][matched_flag] = [] + + # Append the new interaction to the flag's list of interactions + history_data['interactions'][matched_flag].append(interaction) - # Update history - cls.update_history(user_input, best_match, best_accuracy) + # Ensure the flag exists in the flags_usage counter and increment it + if matched_flag not in history_data['flags_usage']: + history_data['flags_usage'][matched_flag] = 0 + history_data['flags_usage'][matched_flag] += 1 - # Suggest flags if accuracy is low - if best_accuracy < MIN_ACCURACY_THRESHOLD: - suggested_flags = cls.__suggest_flags_based_on_history(user_input) - if suggested_flags: - log.warning(f"No Flags matched so suggestions based on historical data: " - f"{', '.join(suggested_flags)}") + cls.save_history(history_data) - return best_match, best_accuracy + @classmethod + def flag(cls, user_input: str, flags: list[str], flag_description: list[str]) -> tuple[str, float]: + """ + Matches user input to flag descriptions using advanced semantic similarity. + Computes the best matching flag based on cosine similarity between the user input and flag descriptions. + Handles matching with a minimum accuracy threshold and provides flag suggestions from historical data + if no direct match is found. + + Parameters: + user_input (str): The input string to match against available flags. + flags (list[str]): List of available command flags. + flag_description (list[str]): Corresponding descriptions for each flag. + + Returns: + tuple[str, float]: A tuple containing: + - The best matched flag (or 'Nothing matched') + - Accuracy percentage of the match (0.0-100.0) + + Raises: + ValueError: If the number of flags and descriptions do not match. + + Side Effects: + - Updates user interaction history + - Prints flag suggestions if no direct match is found + - Requires a global MIN_ACCURACY_THRESHOLD to be defined + + Example: + matched_flag, accuracy = Flag.flag("show help", + ["-h", "--verbose"], + ["Display help", "Enable verbose output"]) + """ + if len(flags) != len(flag_description): + raise ValueError("flags and flag_description lists must be of the same length") + + # Combine flags and descriptions for better matching context + combined_descriptions = [f"{flag} {desc}" for flag, desc in zip(flags, flag_description)] + + # Encode user input and all descriptions + # Compute cosine similarities + similarities = cls.__get_sim(user_input, combined_descriptions) + + # Find the best match + best_index = max(range(len(similarities)), key=lambda i: similarities[i]) + best_accuracy = similarities[best_index] * 100 + best_match = flags[best_index] if best_accuracy > MIN_ACCURACY_THRESHOLD else "Nothing matched" + + # Update history + cls.update_history(user_input, best_match, best_accuracy) + + # Suggest flags if accuracy is low + if best_accuracy < MIN_ACCURACY_THRESHOLD: + suggested_flags = cls.__suggest_flags_based_on_history(user_input) + if suggested_flags: + log.warning(f"No Flags matched so suggestions based on historical data: " + f"{', '.join(suggested_flags)}") + + return best_match, best_accuracy -class Flag: @classmethod def __colorify(cls, text: str, color: str) -> str: """ @@ -472,6 +472,12 @@ def __available_arguments(cls) -> tuple[argparse.Namespace, argparse.ArgumentPar f"{cls.__colorify('- Beta Mode -', 'y')}" ) + parser.add_argument( + "--usage", + action="store_true", + help="Run's script that shows and gives your local statistics, on the flags used by you" + ) + # Define Side Flags parser.add_argument( "--debug", @@ -555,7 +561,8 @@ def __exclusivity_logic(args: argparse.Namespace) -> bool: args.minimal, args.nopy, args.depth, - args.performance_check + args.performance_check, + args.usage } exclusive_flags = { args.vulnscan_ai, @@ -639,7 +646,7 @@ def __suggest_flag(cls, user_input: str, valid_flags: list[str]): # Map the user-provided description to the closest valid flag flags_list = [f"--{flag}" for flag in valid_flags] descriptions_list = [f"Run Logicytics with {flag}" for flag in valid_flags] - flag_received, accuracy_received = _Match.flag(user_input_desc, flags_list, descriptions_list) + flag_received, accuracy_received = cls.Match.flag(user_input_desc, flags_list, descriptions_list) if DEBUG_MODE: log.info( f"User input: {user_input_desc}\nMatched flag: {flag_received.replace('_', '-')}\nAccuracy: {accuracy_received:.2f}%\n") @@ -737,12 +744,12 @@ def update_data_history(matched_flag: str): Example: update_data_history('--verbose') # Increments usage count for '--verbose' flag """ - history_data = _Match.load_history() + history_data = cls.Match.load_history() # Ensure the flag exists in the flags_usage counter and increment it if matched_flag.replace("--", "") not in history_data['flags_usage']: history_data['flags_usage'][matched_flag.replace("--", "")] = 0 history_data['flags_usage'][matched_flag.replace("--", "")] += 1 - _Match.save_history(history_data) + cls.Match.save_history(history_data) if len(used_flags) == 2: for flag in used_flags: diff --git a/PLANS.md b/PLANS.md index 4c7e31f6..de71be52 100644 --- a/PLANS.md +++ b/PLANS.md @@ -8,8 +8,8 @@ | Task | Version | Might or Will be done? | |----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|------------------------| | Get any BETA features out of BETA | v3.6.0 | ✅ | -| Replace Logger.py with Util that contains (tprint), also implement the ExceptionHandler and UpdateManager from Util | v3.6.0 | ✅ | | Remake VulnScan .pkl and .pth to be more accurate | v3.6.0 | ❌ | -| Encrypted Volume Detection and Analysis, Advanced USB Device History Tracker | v3.7.0 | ❌ | -| Merge `sensitive data miner` with `vulnscan` | v3.7.0 | ✅ | +| Encrypted Volume Detection and Analysis, Advanced USB Device History Tracker | v3.6.0 | ❌ | +| Merge `sensitive data miner` with `vulnscan` to be 1 tool | v4.0.0 | ✅ | | Remake Logicytics End-Execution cycle, where files created must go in `temp/` directory, and zipper takes it from there only, simplifying any code logic with this as well | v4.0.0 | ✅ | +| Replace Logger.py with Util that contains (tprint), also implement the ExceptionHandler and UpdateManager from Util | v4.0.0 | ✅ | From 4245589a0b9917e7ee0092a889ba7e25b5c0998e Mon Sep 17 00:00:00 2001 From: Shahm Najeeb Date: Sun, 27 Jul 2025 15:43:22 +0400 Subject: [PATCH 02/12] Removal of the backup feature git should be the one we focus on and use, all history is preserved, and the --backup feature bloats the code Signed-off-by: Shahm Najeeb --- CODE/Logicytics.py | 42 ----------------------------------------- CODE/logicytics/Flag.py | 7 ------- CODE/vulnscan.py | 1 + 3 files changed, 1 insertion(+), 49 deletions(-) diff --git a/CODE/Logicytics.py b/CODE/Logicytics.py index 26195902..69011ebb 100644 --- a/CODE/Logicytics.py +++ b/CODE/Logicytics.py @@ -2,10 +2,8 @@ import gc import os -import shutil import subprocess import sys -import zipfile from concurrent.futures import ThreadPoolExecutor, as_completed from datetime import datetime @@ -254,36 +252,6 @@ def __performance(self): class SpecialAction: - @staticmethod - def backup(directory: str, name: str): - """ - Creates a backup of a specified directory by zipping its contents and moving it to a designated backup location. - - Args: - directory (str): The path to the directory to be backed up. - name (str): The name of the backup file. - - Returns: - None - """ - if not os.path.exists(directory): - log.critical(f"Directory {directory} does not exist!") - return - - # Check if backup exists, delete it if so - if os.path.exists(f"../ACCESS/BACKUP/{name}.zip"): - os.remove(f"../ACCESS/BACKUP/{name}.zip") - - # Zip the directory and move it to the backup location - with zipfile.ZipFile(f"{name}.zip", "w") as zip_file: - for root, dirs, files in os.walk(directory): - for file in files: - file_path = os.path.join(root, file) - relative_path = os.path.relpath(str(file_path), start=os.getcwd()) - zip_file.write(str(file_path), arcname=relative_path) - - shutil.move(f"{name}.zip", "../ACCESS/BACKUP") - @staticmethod def update() -> tuple[str, str]: """ @@ -407,16 +375,6 @@ def handle_special_actions(): input("Press Enter to exit...") exit(1) - if ACTION == "backup": - log.info("Backing up...") - SpecialAction.backup(".", "Default_Backup") - log.debug("Backup complete -> CODE dir") - SpecialAction.backup("../MODS", "Mods_Backup") - log.debug("Backup complete -> MODS dir") - log.info("Backup complete!") - input("Press Enter to exit...") - exit(0) - def check_privileges(): """ diff --git a/CODE/logicytics/Flag.py b/CODE/logicytics/Flag.py index f862cfcd..e8687fc5 100644 --- a/CODE/logicytics/Flag.py +++ b/CODE/logicytics/Flag.py @@ -487,13 +487,6 @@ def __available_arguments(cls) -> tuple[argparse.Namespace, argparse.ArgumentPar f"{cls.__colorify('- Use to get a special log file to report the bug -', 'b')}.", ) - parser.add_argument( - "--backup", - action="store_true", - help="Backup Logicytics files to the ACCESS/BACKUPS directory " - f"{cls.__colorify('- Use on your own device only -', 'y')}.", - ) - parser.add_argument( "--update", action="store_true", diff --git a/CODE/vulnscan.py b/CODE/vulnscan.py index b7c86124..65a4ba23 100644 --- a/CODE/vulnscan.py +++ b/CODE/vulnscan.py @@ -8,6 +8,7 @@ import aiofiles import joblib import numpy as np +# noinspection PyPackageRequirements import torch from pathlib import Path from safetensors import safe_open From 3a3e907fdb1cc29988cea7a08f0576e41fd12804 Mon Sep 17 00:00:00 2001 From: Shahm Najeeb Date: Sun, 27 Jul 2025 16:04:38 +0400 Subject: [PATCH 03/12] Improve --update and --debug flags, fix file comparison and edge cases - Fixed crash when Git is not installed by implementing a proper existence check - Resolved issue where --debug misidentified ignored files as extra - Unified file comparison logic in --debug with _dev.py logic to avoid mismatches - Enhanced debug logging for better clarity and traceability - Fixed dev mode bug where, after max attempts, input was ignored even if valid - Added support for Python 3.13 in debug, thus no error will be thrown in the menu Signed-off-by: Shahm Najeeb --- CODE/Logicytics.py | 12 +++++++++--- CODE/_debug.py | 28 ++++++++++++++++------------ CODE/_dev.py | 7 ++++--- 3 files changed, 29 insertions(+), 18 deletions(-) diff --git a/CODE/Logicytics.py b/CODE/Logicytics.py index 69011ebb..332817ad 100644 --- a/CODE/Logicytics.py +++ b/CODE/Logicytics.py @@ -264,12 +264,18 @@ def update() -> tuple[str, str]: str: The output from the git pull command. """ # Check if git command is available - if subprocess.run(["git", "--version"], capture_output=True).returncode != 0: + try: + if subprocess.run(["git", "--version"], capture_output=True).returncode != 0: + return "Git is not installed or not available in the PATH.", "error" + except FileNotFoundError: return "Git is not installed or not available in the PATH.", "error" # Check if the project is a git repository - if not os.path.exists(os.path.join(os.getcwd(), "../.git")): - return "This project is not a git repository. The update flag uses git.", "error" + try: + if not os.path.exists(os.path.join(os.getcwd(), "../.git")): + return "This project is not a git repository. The update flag uses git.", "error" + except Exception as e: + return f"Error checking for git repository: {e}", "error" current_dir = os.getcwd() parent_dir = os.path.dirname(current_dir) diff --git a/CODE/_debug.py b/CODE/_debug.py index 46c395ea..029484d8 100644 --- a/CODE/_debug.py +++ b/CODE/_debug.py @@ -9,7 +9,7 @@ import psutil import requests -from logicytics import Log, DEBUG, VERSION, check, config +from logicytics import Log, DEBUG, VERSION, check, config, get log_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "ACCESS\\LOGS\\DEBUG\\DEBUG.log") log = Log({"log_level": DEBUG, "filename": log_path, "truncate_message": False, "delete_log": True}) @@ -47,14 +47,16 @@ def check_required_files(directory: str, required_files: list[str]): log.error(f"Directory {directory} does not exist.") return - actual_files = [] - for root, _, files in os.walk(directory): - for file in files: - relative_path = os.path.relpath(os.path.join(root, file), start=directory) - actual_files.append(relative_path.replace("\\", "/").replace('"', '')) # Normalize paths + # Use get.list_of_files to retrieve files, excluding specified files, dirs, and extensions + actual_files = get.list_of_files( + directory, + exclude_files=["logicytics/User_History.json.gz", "logicytics/User_History.json"], + exclude_dirs=["SysInternal_Suite"], + exclude_extensions=[".pyc"] + ) + actual_files = [f.replace("\\", "/").replace('"', '') for f in actual_files] # Normalize paths log.debug(f"Actual files found: {actual_files}") - # Strip quotes and normalize paths for comparison normalized_required_files = [ required_file.strip().replace("\\", "/").replace('"', '') # Remove quotes and normalize paths @@ -130,11 +132,14 @@ def python_version(): """ version = sys.version.split()[0] MIN_VERSION = (3, 11) - MAX_VERSION = (3, 13) + MAX_VERSION = (3, 14) try: major, minor = map(int, version.split(".")[:2]) if MIN_VERSION <= (major, minor) < MAX_VERSION: - log.info(f"Python Version: {version} - Perfect") + if (major, minor) == MIN_VERSION: + log.info(f"Python Version: {version} - Perfect (mainly tested on 3.11.x)") + else: + log.info(f"Python Version: {version} - Supported") elif (major, minor) < MIN_VERSION: log.warning(f"Python Version: {version} - Recommended: 3.11.x") else: @@ -142,7 +147,6 @@ def python_version(): except Exception as e: log.error(f"Failed to parse Python Version: {e}") - class ConfigManager: @staticmethod def get_online_config() -> dict | None: @@ -206,8 +210,8 @@ def debug(): SysInternalManager.check_binaries("SysInternal_Suite") # System Checks - log.info("Admin privileges found" if check.admin() else "Admin privileges not found") - log.info("UAC enabled" if check.uac() else "UAC disabled") + log.info("Admin privileges found") if check.admin() else log.warning("Admin privileges not found") + log.info("UAC enabled") if check.uac() else log.warning("UAC disabled") log.info(f"Execution path: {psutil.__file__}") log.info(f"Global execution path: {sys.executable}") log.info(f"Local execution path: {sys.prefix}") diff --git a/CODE/_dev.py b/CODE/_dev.py index b2b0e5e0..7add36bf 100644 --- a/CODE/_dev.py +++ b/CODE/_dev.py @@ -150,12 +150,13 @@ def _handle_file_operations() -> None: while True: version = color_print(f"[?] Enter the new version of the project (Old version is {VERSION}): ", "cyan", is_input=True) - if attempts >= max_attempts: - color_print("[x] Maximum attempts reached. Please run the script again.", "red") - exit() + if re.match(r"^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)$", version): _update_ini_file("config.ini", version, "version") break + elif attempts >= max_attempts: + color_print("[x] Maximum attempts reached. Please run the script again.", "red") + exit() else: color_print("[!] Please enter a valid version number (e.g., 1.2.3)", "yellow") attempts += 1 From 02f68ce2814bd9172e5774aa42c77d744948192d Mon Sep 17 00:00:00 2001 From: Shahm Najeeb Date: Sun, 27 Jul 2025 16:23:44 +0400 Subject: [PATCH 04/12] Basic cleanup of the backup removal and major file bug fix When the init directories for data don't exist, it fails to be made, fixed this by making the first instance create the files (logger module inside the Logicytics module Signed-off-by: Shahm Najeeb --- CODE/_dev.py | 3 +-- CODE/logicytics/FileManagement.py | 4 +--- CODE/logicytics/Logger.py | 2 ++ CODE/logicytics/__init__.py | 16 +++++++++------- PLANS.md | 1 - 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/CODE/_dev.py b/CODE/_dev.py index 7add36bf..e752225b 100644 --- a/CODE/_dev.py +++ b/CODE/_dev.py @@ -6,7 +6,7 @@ import configobj -from logicytics import log, get, file_management, CURRENT_FILES, VERSION +from logicytics import log, get, CURRENT_FILES, VERSION def color_print(text, color="reset", is_input=False) -> None | str: @@ -190,7 +190,6 @@ def dev_checks() -> None: - Updates configuration file with current files and version - Logs warnings or errors during the process """ - file_management.mkdir() if not _perform_checks(): return _handle_file_operations() diff --git a/CODE/logicytics/FileManagement.py b/CODE/logicytics/FileManagement.py index 3305c53d..87cc5381 100644 --- a/CODE/logicytics/FileManagement.py +++ b/CODE/logicytics/FileManagement.py @@ -35,11 +35,10 @@ def open_file(file: str, use_full_path: bool = False) -> str | None: @staticmethod def mkdir(): """ - Creates the necessary directories for storing logs, backups, and data. + Creates the necessary directories for storing logs, and data. This method ensures the existence of specific directory structures used by the application, including: - Log directories for general, debug, and performance logs - - Backup directory - Data directories for storing hashes and zip files The method uses `os.makedirs()` with `exist_ok=True` to create directories without raising an error if they already exist. @@ -50,7 +49,6 @@ def mkdir(): os.makedirs("../ACCESS/LOGS/", exist_ok=True) os.makedirs("../ACCESS/LOGS/DEBUG", exist_ok=True) os.makedirs("../ACCESS/LOGS/PERFORMANCE", exist_ok=True) - os.makedirs("../ACCESS/BACKUP/", exist_ok=True) os.makedirs("../ACCESS/DATA/Hashes", exist_ok=True) os.makedirs("../ACCESS/DATA/Zip", exist_ok=True) diff --git a/CODE/logicytics/Logger.py b/CODE/logicytics/Logger.py index b2be07da..171c5870 100644 --- a/CODE/logicytics/Logger.py +++ b/CODE/logicytics/Logger.py @@ -11,6 +11,7 @@ import colorlog from logicytics.Config import DEBUG +from logicytics.FileManagement import FileManagement class Log: @@ -39,6 +40,7 @@ def __init__(self, config: dict = None): :param config: A dictionary containing configuration options. """ + FileManagement.mkdir() # Ensure the necessary directories are created if self._initialized and config is None: return self._initialized = True diff --git a/CODE/logicytics/__init__.py b/CODE/logicytics/__init__.py index c0071958..53c98b35 100644 --- a/CODE/logicytics/__init__.py +++ b/CODE/logicytics/__init__.py @@ -12,15 +12,16 @@ # Check if the script is being run directly, if not, set up the library if __name__ == '__main__': exit("This is a library, Please import rather than directly run.") -execute = Execute() -get = Get() -check = Check() -flag = Flag() -file_management = FileManagement() -__show_trace = DEBUG == "DEBUG" -file_management.mkdir() +execute = Execute() # Initialize the Execute class for executing commands +get = Get() # Initialize the Get class for retrieving data +check = Check() # Initialize the Check class for performing checks +flag = Flag() # Initialize the Flag class for managing cli flags +file_management = FileManagement() # Initialize the FileManagement class for file operations +__show_trace = DEBUG == "DEBUG" # Determine if stack traces should be shown based on the debug level + +# Exception for handling object loading errors class ObjectLoadError(Exception): """Raised when an Object fails to load.""" @@ -38,6 +39,7 @@ def __init__(self, message="Failed to load object", object_name=None): super().__init__(message) +# Decorator for marking functions as deprecated [custom] def deprecated(removal_version: str, reason: str, show_trace: bool = __show_trace) -> callable: """ Decorator function that marks a function as deprecated diff --git a/PLANS.md b/PLANS.md index de71be52..6ca3f620 100644 --- a/PLANS.md +++ b/PLANS.md @@ -7,7 +7,6 @@ | Task | Version | Might or Will be done? | |----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|------------------------| -| Get any BETA features out of BETA | v3.6.0 | ✅ | | Remake VulnScan .pkl and .pth to be more accurate | v3.6.0 | ❌ | | Encrypted Volume Detection and Analysis, Advanced USB Device History Tracker | v3.6.0 | ❌ | | Merge `sensitive data miner` with `vulnscan` to be 1 tool | v4.0.0 | ✅ | From 1683db05f5b147a6488fc7df2e8cba91515907a4 Mon Sep 17 00:00:00 2001 From: Shahm Najeeb Date: Sun, 27 Jul 2025 16:33:36 +0400 Subject: [PATCH 05/12] Promoted --performance-check flag from beta to official, fixed major bug in zipping - Removed "Beta Mode" label from the --performance-check help text - Flag is now considered stable and ready for general use - Removed the memory usage section as it is unreliable and in very low values and is also influenced by other system apps - Renamed vulnscan folder and items to be bug free, as old name resulted in the files getting zipped - Found bug in vulnscan that causes a critical crash, will fix in next commit Signed-off-by: Shahm Najeeb --- .gitignore | 2 +- .idea/Logicytics.iml | 18 +++---- .idea/webResources.xml | 4 +- CODE/Logicytics.py | 45 ++++-------------- CODE/logicytics/FileManagement.py | 2 +- CODE/logicytics/Flag.py | 2 - CODE/vulnscan.py | 2 +- .../SenseMini.3n3.pth} | Bin .../vectorizer.3n3.pkl} | Bin 9 files changed, 23 insertions(+), 52 deletions(-) rename CODE/{VulnScan/Model SenseMini .3n3.pth => vulnscan/SenseMini.3n3.pth} (100%) rename CODE/{VulnScan/Vectorizer .3n3.pkl => vulnscan/vectorizer.3n3.pkl} (100%) diff --git a/.gitignore b/.gitignore index f0f94ffc..124e1cfa 100644 --- a/.gitignore +++ b/.gitignore @@ -319,7 +319,7 @@ $RECYCLE.BIN/ *.pyc /CODE/SysInternal_Suite/.sys.ignore /ACCESS/ -/CODE/VulnScan/tools/NN features/ +/CODE/vulnscan/tools/NN features/ /CODE/logicytics/User_History.json.gz /CODE/logicytics/User_History.json /CODE/SysInternal_Suite/psfile.exe diff --git a/.idea/Logicytics.iml b/.idea/Logicytics.iml index b6f2739e..4798e7a8 100644 --- a/.idea/Logicytics.iml +++ b/.idea/Logicytics.iml @@ -8,17 +8,17 @@ - - + + - - - - - - - + + + + + + + diff --git a/.idea/webResources.xml b/.idea/webResources.xml index a01ef1d4..769a033f 100644 --- a/.idea/webResources.xml +++ b/.idea/webResources.xml @@ -11,8 +11,8 @@ - - + + diff --git a/CODE/Logicytics.py b/CODE/Logicytics.py index 332817ad..e7de3ff7 100644 --- a/CODE/Logicytics.py +++ b/CODE/Logicytics.py @@ -7,7 +7,6 @@ from concurrent.futures import ThreadPoolExecutor, as_completed from datetime import datetime -import psutil from prettytable import PrettyTable from logicytics import Log, execute, check, get, file_management, flag, DEBUG, DELETE_LOGS, config @@ -198,43 +197,20 @@ def __performance(self): log.warning("Advised to turn on DEBUG logging!!") execution_times = [] - memory_usage = [] - process = psutil.Process(os.getpid()) for file in range(len(self.execution_list)): gc.collect() start_time = datetime.now() - start_memory = process.memory_full_info().uss / 1024 / 1024 # MB log.execution(execute.script(self.execution_list[file])) end_time = datetime.now() - end_memory = process.memory_full_info().uss / 1024 / 1024 # MB elapsed_time = end_time - start_time - memory_delta = max(0, end_memory - start_memory) # Clamps negative delta to 0 - memory_usage.append((self.execution_list[file], f"{memory_delta}")) execution_times.append((self.execution_list[file], elapsed_time)) log.info(f"{self.execution_list[file]} executed in {elapsed_time}") - try: - if (end_memory - start_memory) < 0: - log.info( - f"{self.execution_list[file]} used {memory_delta:.3f}MB of memory - \033[33mPossible Affected by outside processes\033[0m") - else: - log.info(f"{self.execution_list[file]} used {memory_delta:.3f}MB of memory") - except Exception as e: - log.warning("Failed to log memory usage delta, reason: " + str(e)) - log.debug(f"Started with {start_memory:.3f}MB of memory and ended with {end_memory:.3f}MB of memory") table = PrettyTable() - table.field_names = ["Script", "Execution Time", "Memory Usage (MB)"] + table.field_names = ["Script", "Execution Time"] for script, elapsed_time in execution_times: - try: - memory = f"{float(next(m[1] for m in memory_usage if m[0] == script)):.3f}" - except StopIteration: - log.warning(f"No memory data found for {script}") - memory = "N/A" - except Exception as e: - log.warning(f"Failed to log memory usage for {script}, reason: " + str(e)) - memory = "N/A" - table.add_row([script, elapsed_time, f"{memory}"]) + table.add_row([script, elapsed_time]) try: with open( @@ -242,10 +218,7 @@ def __performance(self): f"{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.txt", "w" ) as f: f.write(table.get_string()) - f.write( - "\nSome values may be negative, Reason may be due to external resources playing with memory usage,\n" - "Close background tasks to get more accurate readings\n\n") - f.write("Note: This is not a low-level memory logger, data here isn't 100% accurate!\n") + f.write("\nNote: This test only measures execution time.\n") log.info("Performance check complete! Performance log found in ACCESS/LOGS/PERFORMANCE") except Exception as e: log.error(f"Error writing performance log: {e}") @@ -444,12 +417,12 @@ def handle_sub_action(): """ log.info("Completed successfully!") log.newline() - if ACTION == "performance_check": - return # Do not handle sub actions for performance check - if SUB_ACTION == "shutdown": - subprocess.call("shutdown /s /t 3", shell=False) - elif SUB_ACTION == "reboot": - subprocess.call("shutdown /r /t 3", shell=False) + # Do not handle sub actions for performance check + if ACTION != "performance_check": + if SUB_ACTION == "shutdown": + subprocess.call("shutdown /s /t 3", shell=False) + elif SUB_ACTION == "reboot": + subprocess.call("shutdown /r /t 3", shell=False) @log.function diff --git a/CODE/logicytics/FileManagement.py b/CODE/logicytics/FileManagement.py index 87cc5381..4a68d6b5 100644 --- a/CODE/logicytics/FileManagement.py +++ b/CODE/logicytics/FileManagement.py @@ -92,7 +92,7 @@ def __get_files_to_zip(path: str) -> list: """ excluded_extensions = (".py", ".exe", ".bat", ".ps1", ".pkl", ".pth") excluded_prefixes = ("config.ini", "SysInternal_Suite", - "__pycache__", "logicytics", "VulnScan") + "__pycache__", "logicytics", "vulnscan") return [ f for f in os.listdir(path) diff --git a/CODE/logicytics/Flag.py b/CODE/logicytics/Flag.py index e8687fc5..0ae5974b 100644 --- a/CODE/logicytics/Flag.py +++ b/CODE/logicytics/Flag.py @@ -463,13 +463,11 @@ def __available_arguments(cls) -> tuple[argparse.Namespace, argparse.ArgumentPar help="Run Logicytics in minimal mode. Just bare essential scraping using only quick scripts", ) - # TODO v3.6.0 -> Out of beta parser.add_argument( "--performance-check", action="store_true", help="Run's Logicytics default while testing its performance and time, " "this then shows a table with the file names and time to executed. " - f"{cls.__colorify('- Beta Mode -', 'y')}" ) parser.add_argument( diff --git a/CODE/vulnscan.py b/CODE/vulnscan.py index 65a4ba23..da9481e8 100644 --- a/CODE/vulnscan.py +++ b/CODE/vulnscan.py @@ -185,7 +185,7 @@ async def scan_worker(scan_file): "C:\\Program Files", "C:\\Program Files (x86)" ] - vulnscan = VulnScan("vulnscan/Model SenseMini .3n3.pth", "vulnscan/Vectorizer .3n3.pkl") + vulnscan = VulnScan("vulnscan/SenseMini.3n3.pth", "vulnscan/vectorizer.3n3.pkl") vulnscan.scan_directory(base_paths) except KeyboardInterrupt: log.warning("User interrupted. Exiting gracefully.") diff --git a/CODE/VulnScan/Model SenseMini .3n3.pth b/CODE/vulnscan/SenseMini.3n3.pth similarity index 100% rename from CODE/VulnScan/Model SenseMini .3n3.pth rename to CODE/vulnscan/SenseMini.3n3.pth diff --git a/CODE/VulnScan/Vectorizer .3n3.pkl b/CODE/vulnscan/vectorizer.3n3.pkl similarity index 100% rename from CODE/VulnScan/Vectorizer .3n3.pkl rename to CODE/vulnscan/vectorizer.3n3.pkl From a47fa0c0216abe8fa54c197012455a0d6028afa5 Mon Sep 17 00:00:00 2001 From: Shahm Najeeb Date: Sun, 27 Jul 2025 17:04:07 +0400 Subject: [PATCH 06/12] Added automatic device detection for model loading in vulnscan Resolved crash when loading PyTorch model on systems without CUDA by implementing auto-detection of device availability (CPU/GPU). torch.load now uses map_location to ensure compatibility across environments. Signed-off-by: Shahm Najeeb --- CODE/vulnscan.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/CODE/vulnscan.py b/CODE/vulnscan.py index da9481e8..b57952f0 100644 --- a/CODE/vulnscan.py +++ b/CODE/vulnscan.py @@ -55,7 +55,15 @@ def _load_model(self) -> None: elif self.model_path.endswith('.pth'): with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=FutureWarning) - self.model = torch.load(self.model_path, weights_only=False) + self.model = torch.load( + self.model_path, + map_location=torch.device("cuda" if torch.cuda.is_available() else "cpu"), + weights_only=False + ) + if not torch.cuda.is_available() and torch.version.cuda and torch.backends.cudnn.is_available(): + log.warning( + "NVIDIA GPU detected but CUDA is not available. Check your PyTorch and CUDA installation to utilise as much power as possible.") + log.debug(f"Model using device: {torch.device('cuda' if torch.cuda.is_available() else 'cpu')}") else: raise ValueError("Unsupported model file format") From 1f8e0ec7bea479bc64f7dd20614d9ca2765975b4 Mon Sep 17 00:00:00 2001 From: Shahm Najeeb Date: Sun, 27 Jul 2025 17:14:29 +0400 Subject: [PATCH 07/12] Bump version to 3.5.1 and update file paths in config.ini Signed-off-by: Shahm Najeeb --- CODE/config.ini | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CODE/config.ini b/CODE/config.ini index fca15d55..939b4e6a 100644 --- a/CODE/config.ini +++ b/CODE/config.ini @@ -26,8 +26,8 @@ save_preferences = true [System Settings] # Do not play with these settings unless you know what you are doing # Dev Mode allows a safe way to modify these settings!! -version = 3.5.0 -files = "bluetooth_details.py, bluetooth_logger.py, browser_miner.ps1, cmd_commands.py, config.ini, dir_list.py, dump_memory.py, event_log.py, Logicytics.py, log_miner.py, media_backup.py, netadapter.ps1, network_psutil.py, packet_sniffer.py, property_scraper.ps1, registry.py, sensitive_data_miner.py, ssh_miner.py, sys_internal.py, tasklist.py, tree.ps1, vulnscan.py, wifi_stealer.py, window_feature_miner.ps1, wmic.py, logicytics\Checks.py, logicytics\Config.py, logicytics\Execute.py, logicytics\FileManagement.py, logicytics\Flag.py, logicytics\Get.py, logicytics\Logger.py, logicytics\User_History.json.gz, vulnscan\Model SenseMini .3n3.pth, vulnscan\Vectorizer .3n3.pkl" +version = 3.5.1 +files = "bluetooth_details.py, bluetooth_logger.py, browser_miner.ps1, cmd_commands.py, config.ini, dir_list.py, dump_memory.py, event_log.py, Logicytics.py, log_miner.py, media_backup.py, netadapter.ps1, network_psutil.py, packet_sniffer.py, property_scraper.ps1, registry.py, sensitive_data_miner.py, ssh_miner.py, sys_internal.py, tasklist.py, tree.ps1, vulnscan.py, wifi_stealer.py, window_feature_miner.ps1, wmic.py, logicytics\Checks.py, logicytics\Config.py, logicytics\Execute.py, logicytics\FileManagement.py, logicytics\Flag.py, logicytics\Get.py, logicytics\Logger.py, logicytics\User_History.json.gz, vulnscan\SenseMini.3n3.pth, vulnscan\vectorizer.3n3.pkl" # If you forked the project, change the USERNAME to your own to use your own fork as update material, # I dont advise doing this however config_url = https://raw.githubusercontent.com/DefinetlyNotAI/Logicytics/main/CODE/config.ini From e77824b376880cc83693a71a0db6e921c222dc70 Mon Sep 17 00:00:00 2001 From: Shahm Najeeb Date: Sun, 27 Jul 2025 17:21:18 +0400 Subject: [PATCH 08/12] Update CODE/vulnscan.py The condition logic is incorrect. If torch.cuda.is_available() returns False, then torch.backends.cudnn.is_available() will also return False, making this condition always False. Consider simplifying to just check if CUDA is not available but the system has CUDA capabilities. Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Signed-off-by: Shahm Najeeb --- CODE/vulnscan.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CODE/vulnscan.py b/CODE/vulnscan.py index b57952f0..42b12133 100644 --- a/CODE/vulnscan.py +++ b/CODE/vulnscan.py @@ -60,7 +60,7 @@ def _load_model(self) -> None: map_location=torch.device("cuda" if torch.cuda.is_available() else "cpu"), weights_only=False ) - if not torch.cuda.is_available() and torch.version.cuda and torch.backends.cudnn.is_available(): + if not torch.cuda.is_available() and torch.version.cuda: log.warning( "NVIDIA GPU detected but CUDA is not available. Check your PyTorch and CUDA installation to utilise as much power as possible.") log.debug(f"Model using device: {torch.device('cuda' if torch.cuda.is_available() else 'cpu')}") From e740fc2d90aa9ea9f5cf4bbf0642cac687ac5c63 Mon Sep 17 00:00:00 2001 From: Shahm Najeeb Date: Sun, 27 Jul 2025 17:23:11 +0400 Subject: [PATCH 09/12] Update CODE/_dev.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Signed-off-by: Shahm Najeeb --- CODE/_dev.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CODE/_dev.py b/CODE/_dev.py index e752225b..8663d47f 100644 --- a/CODE/_dev.py +++ b/CODE/_dev.py @@ -154,12 +154,12 @@ def _handle_file_operations() -> None: if re.match(r"^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)$", version): _update_ini_file("config.ini", version, "version") break - elif attempts >= max_attempts: + attempts += 1 + if attempts >= max_attempts: color_print("[x] Maximum attempts reached. Please run the script again.", "red") exit() else: color_print("[!] Please enter a valid version number (e.g., 1.2.3)", "yellow") - attempts += 1 color_print(f"[!] {max_attempts - attempts} attempts remaining", "yellow") color_print("\n[-] Great Job! Please tick the box in the GitHub PR request for completing steps in --dev", "green") From c977f68f7ba565fd75b10db289834502bc3ba21a Mon Sep 17 00:00:00 2001 From: Shahm Najeeb Date: Sun, 27 Jul 2025 17:23:47 +0400 Subject: [PATCH 10/12] Update CODE/Logicytics.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Signed-off-by: Shahm Najeeb --- CODE/Logicytics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CODE/Logicytics.py b/CODE/Logicytics.py index e7de3ff7..fa1e89d0 100644 --- a/CODE/Logicytics.py +++ b/CODE/Logicytics.py @@ -417,7 +417,7 @@ def handle_sub_action(): """ log.info("Completed successfully!") log.newline() - # Do not handle sub actions for performance check + # Handle sub actions for all actions except performance check if ACTION != "performance_check": if SUB_ACTION == "shutdown": subprocess.call("shutdown /s /t 3", shell=False) From 963073c93d2c80c4563a4ec03592bcb0c48dcf4e Mon Sep 17 00:00:00 2001 From: "qltysh[bot]" <168846912+qltysh[bot]@users.noreply.github.com> Date: Sun, 27 Jul 2025 13:30:22 +0000 Subject: [PATCH 11/12] =?UTF-8?q?=F0=9F=93=9D=20qlty=20fmt?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- CODE/Logicytics.py | 138 +++++++++---- CODE/_debug.py | 80 ++++++-- CODE/_dev.py | 64 ++++-- CODE/logicytics/FileManagement.py | 28 ++- CODE/logicytics/Flag.py | 320 ++++++++++++++++++------------ CODE/logicytics/Logger.py | 124 +++++++----- CODE/logicytics/__init__.py | 28 ++- CODE/vulnscan.py | 65 ++++-- PLANS.md | 13 +- 9 files changed, 559 insertions(+), 301 deletions(-) diff --git a/CODE/Logicytics.py b/CODE/Logicytics.py index fa1e89d0..2ef89a10 100644 --- a/CODE/Logicytics.py +++ b/CODE/Logicytics.py @@ -9,12 +9,24 @@ from prettytable import PrettyTable -from logicytics import Log, execute, check, get, file_management, flag, DEBUG, DELETE_LOGS, config +from logicytics import ( + Log, + execute, + check, + get, + file_management, + flag, + DEBUG, + DELETE_LOGS, + config, +) # Initialization log = Log({"log_level": DEBUG, "delete_log": DELETE_LOGS}) ACTION, SUB_ACTION = None, None -MAX_WORKERS = config.getint("Settings", "max_workers", fallback=min(32, (os.cpu_count() or 1) + 4)) +MAX_WORKERS = config.getint( + "Settings", "max_workers", fallback=min(32, (os.cpu_count() or 1) + 4) +) log.debug(f"MAX_WORKERS: {MAX_WORKERS}") @@ -28,7 +40,9 @@ def __safe_remove(file_name: str, file_list: list[str] | set[str]) -> list[str]: if file_name in file_set: file_set.remove(file_name) else: - log.critical(f"The file {file_name} should exist in this directory - But was not found!") + log.critical( + f"The file {file_name} should exist in this directory - But was not found!" + ) return list(file_set) @staticmethod @@ -63,9 +77,12 @@ def __generate_execution_list(self) -> list[str]: - Logs the final execution list for debugging purposes - Warns users about potential long execution times for certain actions """ - execution_list = get.list_of_files(".", only_extensions=(".py", ".exe", ".ps1", ".bat"), - exclude_files=["Logicytics.py"], - exclude_dirs=["logicytics", "SysInternal_Suite"]) + execution_list = get.list_of_files( + ".", + only_extensions=(".py", ".exe", ".ps1", ".bat"), + exclude_files=["Logicytics.py"], + exclude_dirs=["logicytics", "SysInternal_Suite"], + ) files_to_remove = { "sensitive_data_miner.py", "dir_list.py", @@ -73,7 +90,9 @@ def __generate_execution_list(self) -> list[str]: "vulnscan.py", "event_log.py", } - execution_list = [file for file in execution_list if file not in files_to_remove] + execution_list = [ + file for file in execution_list if file not in files_to_remove + ] if ACTION == "minimal": execution_list = [ @@ -93,18 +112,23 @@ def __generate_execution_list(self) -> list[str]: "netadapter.ps1", "property_scraper.ps1", "window_feature_miner.ps1", - "tree.ps1" + "tree.ps1", ] elif ACTION == "modded": # Add all files in MODS to execution list - execution_list = get.list_of_files("../MODS", only_extensions=(".py", ".exe", ".ps1", ".bat"), - append_file_list=execution_list, exclude_files=["Logicytics.py"], - exclude_dirs=["logicytics", "SysInternal_Suite"]) + execution_list = get.list_of_files( + "../MODS", + only_extensions=(".py", ".exe", ".ps1", ".bat"), + append_file_list=execution_list, + exclude_files=["Logicytics.py"], + exclude_dirs=["logicytics", "SysInternal_Suite"], + ) elif ACTION == "depth": log.warning( - "This flag will use clunky and huge scripts, and so may take a long time, but reap great rewards.") + "This flag will use clunky and huge scripts, and so may take a long time, but reap great rewards." + ) files_to_append = { "sensitive_data_miner.py", "dir_list.py", @@ -124,7 +148,9 @@ def __generate_execution_list(self) -> list[str]: exit(1) if len(execution_list) == 0: - log.critical("Nothing is in the execution list.. This is due to faulty code or corrupted Logicytics files!") + log.critical( + "Nothing is in the execution list.. This is due to faulty code or corrupted Logicytics files!" + ) exit(1) log.debug(f"Execution list length: {len(execution_list)}") @@ -163,8 +189,10 @@ def __threaded(self): """Executes scripts in parallel using threading.""" log.debug("Using threading") with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor: - futures = {executor.submit(self.__script_handler, script): script - for script in self.execution_list} + futures = { + executor.submit(self.__script_handler, script): script + for script in self.execution_list + } for future in as_completed(futures): script = futures[future] @@ -214,12 +242,15 @@ def __performance(self): try: with open( - f"../ACCESS/LOGS/PERFORMANCE/Performance_Summary_" - f"{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.txt", "w" + f"../ACCESS/LOGS/PERFORMANCE/Performance_Summary_" + f"{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.txt", + "w", ) as f: f.write(table.get_string()) f.write("\nNote: This test only measures execution time.\n") - log.info("Performance check complete! Performance log found in ACCESS/LOGS/PERFORMANCE") + log.info( + "Performance check complete! Performance log found in ACCESS/LOGS/PERFORMANCE" + ) except Exception as e: log.error(f"Error writing performance log: {e}") @@ -228,17 +259,20 @@ class SpecialAction: @staticmethod def update() -> tuple[str, str]: """ - Updates the repository by pulling the latest changes from the remote repository. + Updates the repository by pulling the latest changes from the remote repository. - This function navigates to the parent directory, pulls the latest changes using Git, - and then returns to the current working directory. + This function navigates to the parent directory, pulls the latest changes using Git, + and then returns to the current working directory. - Returns: - str: The output from the git pull command. - """ + Returns: + str: The output from the git pull command. + """ # Check if git command is available try: - if subprocess.run(["git", "--version"], capture_output=True).returncode != 0: + if ( + subprocess.run(["git", "--version"], capture_output=True).returncode + != 0 + ): return "Git is not installed or not available in the PATH.", "error" except FileNotFoundError: return "Git is not installed or not available in the PATH.", "error" @@ -246,7 +280,10 @@ def update() -> tuple[str, str]: # Check if the project is a git repository try: if not os.path.exists(os.path.join(os.getcwd(), "../.git")): - return "This project is not a git repository. The update flag uses git.", "error" + return ( + "This project is not a git repository. The update flag uses git.", + "error", + ) except Exception as e: return f"Error checking for git repository: {e}", "error" @@ -279,7 +316,9 @@ def execute_new_window(file_path: str): """ sr_current_dir = os.path.dirname(os.path.abspath(__file__)) sr_script_path = os.path.join(sr_current_dir, file_path) - sr_process = subprocess.Popen(["cmd.exe", "/c", "start", sys.executable, sr_script_path]) + sr_process = subprocess.Popen( + ["cmd.exe", "/c", "start", sys.executable, sr_script_path] + ) sr_process.wait() exit(0) @@ -287,12 +326,12 @@ def execute_new_window(file_path: str): def get_flags(): """ Retrieves action and sub-action flags from the Flag module and sets global variables. - + This function extracts the current action and sub-action from the Flag module, setting global ACTION and SUB_ACTION variables. It logs the retrieved values for debugging and tracing purposes. - + No parameters. - + Side effects: - Sets global variables ACTION and SUB_ACTION - Logs debug information about current action and sub-action @@ -307,20 +346,20 @@ def get_flags(): def handle_special_actions(): """ Handles special actions based on the current action flag. - + This function performs specific actions depending on the global `ACTION` variable: - For "debug": Opens the debug menu by executing '_debug.py' - For "dev": Opens the developer menu by executing '_dev.py' - For "update": Updates the repository using Health.update() method - For "restore": Displays a warning and opens the backup location - For "backup": Creates backups of the CODE and MODS directories - + Side Effects: - Logs informational, debug, warning, or error messages - May execute external Python scripts - May open file locations - May terminate the program after completing special actions - + Raises: SystemExit: Exits the program after completing certain special actions """ @@ -358,14 +397,14 @@ def handle_special_actions(): def check_privileges(): """ Checks if the script is running with administrative privileges and handles UAC (User Account Control) settings. - + This function verifies if the script has admin privileges. If not, it either logs a warning (in debug mode) or prompts the user to run the script with admin privileges and exits. It also checks if UAC is enabled and logs warnings accordingly. - + Raises: SystemExit: If the script is not running with admin privileges and not in debug mode. - + Notes: - Requires the `Check` module with `admin()` and `uac()` methods - Depends on global `DEBUG` configuration variable @@ -373,15 +412,20 @@ def check_privileges(): """ if not check.admin(): if DEBUG == "DEBUG": - log.warning("Running in debug mode, continuing without admin privileges - This may cause issues") + log.warning( + "Running in debug mode, continuing without admin privileges - This may cause issues" + ) else: log.critical( - "Please run this script with admin privileges - To ignore this message, run with DEBUG in config") + "Please run this script with admin privileges - To ignore this message, run with DEBUG in config" + ) input("Press Enter to exit...") exit(1) if check.uac(): - log.warning("UAC is enabled, this may cause issues - Please disable UAC if possible") + log.warning( + "UAC is enabled, this may cause issues - Please disable UAC if possible" + ) class ZIP: @@ -394,11 +438,15 @@ def files(cls): @staticmethod def __and_log(directory: str, name: str): - log.debug(f"Zipping directory '{directory}' with name '{name}' under action '{ACTION}'") + log.debug( + f"Zipping directory '{directory}' with name '{name}' under action '{ACTION}'" + ) zip_values = file_management.Zip.and_hash( directory, name, - ACTION if ACTION is not None else f"ERROR_NO_ACTION_SPECIFIED_{datetime.now().isoformat()}" + ACTION + if ACTION is not None + else f"ERROR_NO_ACTION_SPECIFIED_{datetime.now().isoformat()}", ) if isinstance(zip_values, str): log.error(zip_values) @@ -429,7 +477,7 @@ def handle_sub_action(): def Logicytics(): """ Orchestrates the complete Logicytics workflow, managing script execution, system actions, and user interactions. - + This function serves as the primary entry point for the Logicytics utility, coordinating a series of system-level operations: - Retrieves command-line configuration flags - Processes special actions @@ -438,7 +486,7 @@ def Logicytics(): - Compresses generated output files - Handles final system sub-actions - Provides a graceful exit mechanism - + Performs actions sequentially without returning a value, designed to be the main execution flow of the Logicytics utility. """ # Get flags_list and configs @@ -461,7 +509,9 @@ def Logicytics(): try: Logicytics() except KeyboardInterrupt: - log.warning("Force shutdown detected! Some temporary files might be left behind.") + log.warning( + "Force shutdown detected! Some temporary files might be left behind." + ) log.warning("Next time, let the program finish naturally for complete cleanup.") # Emergency cleanup - zip generated files ZIP.files() diff --git a/CODE/_debug.py b/CODE/_debug.py index 029484d8..f3653693 100644 --- a/CODE/_debug.py +++ b/CODE/_debug.py @@ -11,8 +11,18 @@ from logicytics import Log, DEBUG, VERSION, check, config, get -log_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "ACCESS\\LOGS\\DEBUG\\DEBUG.log") -log = Log({"log_level": DEBUG, "filename": log_path, "truncate_message": False, "delete_log": True}) +log_path = os.path.join( + os.path.dirname(os.path.dirname(os.path.abspath(__file__))), + "ACCESS\\LOGS\\DEBUG\\DEBUG.log", +) +log = Log( + { + "log_level": DEBUG, + "filename": log_path, + "truncate_message": False, + "delete_log": True, + } +) url = config.get("System Settings", "config_url") @@ -24,12 +34,12 @@ def parse_version(version: str) -> tuple[int, int, int | str, str]: """ try: if version.startswith("snapshot-"): - parts = version.split('-')[1].split('.') + parts = version.split("-")[1].split(".") major, minor = map(int, parts[:2]) patch = parts[2] if len(parts) > 2 else "0" return major, minor, patch, "snapshot" else: - return tuple(map(int, version.split('.'))) + ("release",) + return tuple(map(int, version.split("."))) + ("release",) except Exception as e: log.error(f"Failed to parse version: {e}") return 0, 0, 0, "error" @@ -50,21 +60,30 @@ def check_required_files(directory: str, required_files: list[str]): # Use get.list_of_files to retrieve files, excluding specified files, dirs, and extensions actual_files = get.list_of_files( directory, - exclude_files=["logicytics/User_History.json.gz", "logicytics/User_History.json"], + exclude_files=[ + "logicytics/User_History.json.gz", + "logicytics/User_History.json", + ], exclude_dirs=["SysInternal_Suite"], - exclude_extensions=[".pyc"] + exclude_extensions=[".pyc"], ) - actual_files = [f.replace("\\", "/").replace('"', '') for f in actual_files] # Normalize paths + actual_files = [ + f.replace("\\", "/").replace('"', "") for f in actual_files + ] # Normalize paths log.debug(f"Actual files found: {actual_files}") # Strip quotes and normalize paths for comparison normalized_required_files = [ - required_file.strip().replace("\\", "/").replace('"', '') # Remove quotes and normalize paths + required_file.strip() + .replace("\\", "/") + .replace('"', "") # Remove quotes and normalize paths for required_file in required_files ] # Compare files - missing_files, extra_files = FileManager.compare_files(actual_files, normalized_required_files) + missing_files, extra_files = FileManager.compare_files( + actual_files, normalized_required_files + ) if missing_files: log.error(f"Missing files: {', '.join(missing_files)}") @@ -76,7 +95,9 @@ def check_required_files(directory: str, required_files: list[str]): log.error(f"Unexpected error during file check: {e}") @staticmethod - def compare_files(actual_files: list[str], required_files: list[str]) -> tuple[list[str], list[str]]: + def compare_files( + actual_files: list[str], required_files: list[str] + ) -> tuple[list[str], list[str]]: """ Compares actual and required files, returning missing and extra files. """ @@ -108,7 +129,9 @@ def check_binaries(path: str): elif has_zip and has_exe: log.info("Both zip and exe files - All good") else: - log.error("SysInternal Binaries Not Found: Missing Files - Corruption detected") + log.error( + "SysInternal Binaries Not Found: Missing Files - Corruption detected" + ) except Exception as e: log.error(f"Unexpected error: {e}") @@ -122,7 +145,7 @@ def cpu_info() -> tuple[str, str, str]: return ( f"CPU Architecture: {platform.machine()}", f"CPU Vendor ID: {platform.system()}", - f"CPU Model: {platform.release()} {platform.version()}" + f"CPU Model: {platform.release()} {platform.version()}", ) @staticmethod @@ -137,7 +160,9 @@ def python_version(): major, minor = map(int, version.split(".")[:2]) if MIN_VERSION <= (major, minor) < MAX_VERSION: if (major, minor) == MIN_VERSION: - log.info(f"Python Version: {version} - Perfect (mainly tested on 3.11.x)") + log.info( + f"Python Version: {version} - Perfect (mainly tested on 3.11.x)" + ) else: log.info(f"Python Version: {version} - Supported") elif (major, minor) < MIN_VERSION: @@ -147,6 +172,7 @@ def python_version(): except Exception as e: log.error(f"Failed to parse Python Version: {e}") + class ConfigManager: @staticmethod def get_online_config() -> dict | None: @@ -182,12 +208,16 @@ def check_versions(local_version: str, remote_version: str): if local_version_tuple == remote_version_tuple: log.info(f"Version is up to date. Your Version: {local_version}") elif local_version_tuple > remote_version_tuple: - log.warning("Version is ahead of the repository. " - f"Your Version: {local_version}, " - f"Repository Version: {remote_version}") + log.warning( + "Version is ahead of the repository. " + f"Your Version: {local_version}, " + f"Repository Version: {remote_version}" + ) else: - log.error("Version is behind the repository. " - f"Your Version: {local_version}, Repository Version: {remote_version}") + log.error( + "Version is behind the repository. " + f"Your Version: {local_version}, Repository Version: {remote_version}" + ) except Exception as e: log.error(f"Version comparison error: {e}") @@ -210,15 +240,23 @@ def debug(): SysInternalManager.check_binaries("SysInternal_Suite") # System Checks - log.info("Admin privileges found") if check.admin() else log.warning("Admin privileges not found") + log.info("Admin privileges found") if check.admin() else log.warning( + "Admin privileges not found" + ) log.info("UAC enabled") if check.uac() else log.warning("UAC disabled") log.info(f"Execution path: {psutil.__file__}") log.info(f"Global execution path: {sys.executable}") log.info(f"Local execution path: {sys.prefix}") log.info( - "Running in a virtual environment" if sys.prefix != sys.base_prefix else "Not running in a virtual environment") + "Running in a virtual environment" + if sys.prefix != sys.base_prefix + else "Not running in a virtual environment" + ) log.info( - "Execution policy is unrestricted" if check.execution_policy() else "Execution policy is restricted") + "Execution policy is unrestricted" + if check.execution_policy() + else "Execution policy is restricted" + ) # Python Version Check SystemInfoManager.python_version() diff --git a/CODE/_dev.py b/CODE/_dev.py index 8663d47f..2f32cfd7 100644 --- a/CODE/_dev.py +++ b/CODE/_dev.py @@ -36,7 +36,9 @@ def _update_ini_file(filename: str, new_data: list | str, key: str) -> None: None """ try: - config = configobj.ConfigObj(filename, encoding="utf-8", write_empty_values=True) + config = configobj.ConfigObj( + filename, encoding="utf-8", write_empty_values=True + ) if key == "files": config["System Settings"][key] = ", ".join(new_data) @@ -55,21 +57,23 @@ def _update_ini_file(filename: str, new_data: list | str, key: str) -> None: color_print(f"[x] {e}", "red") -def _prompt_user(question: str, file_to_open: str = None, special: bool = False) -> bool: +def _prompt_user( + question: str, file_to_open: str = None, special: bool = False +) -> bool: """ Prompts the user with a yes/no question and optionally opens a file. - + Parameters: question (str): The question to be presented to the user. file_to_open (str, optional): Path to a file that will be opened if the user does not respond affirmatively. special (bool, optional): Flag to suppress the default reminder message when the user responds negatively. - + Returns: bool: True if the user responds with 'yes' or 'Y', False otherwise. - + Raises: Exception: Logs any unexpected errors during user interaction. - + Notes: - Uses subprocess to open files on Windows systems - Case-insensitive input handling for 'yes' responses @@ -82,7 +86,8 @@ def _prompt_user(question: str, file_to_open: str = None, special: bool = False) subprocess.run(["start", file_to_open], shell=True) if not special: color_print( - "[x] Please ensure you fix the issues/problem and try again with the checklist.", "red" + "[x] Please ensure you fix the issues/problem and try again with the checklist.", + "red", ) return False return True @@ -94,7 +99,7 @@ def _prompt_user(question: str, file_to_open: str = None, special: bool = False) def _perform_checks() -> bool: """ Performs a series of user prompts for various checks. - + Returns: bool: True if all checks are confirmed by the user, False otherwise. """ @@ -116,11 +121,18 @@ def _handle_file_operations() -> None: """ Handles file operations and logging for added, removed, and normal files. """ - EXCLUDE_FILES = ["logicytics\\User_History.json.gz", "logicytics\\User_History.json"] - files = get.list_of_files(".", exclude_files=EXCLUDE_FILES, exclude_dirs=["SysInternal_Suite"], - exclude_extensions=[".pyc"]) + EXCLUDE_FILES = [ + "logicytics\\User_History.json.gz", + "logicytics\\User_History.json", + ] + files = get.list_of_files( + ".", + exclude_files=EXCLUDE_FILES, + exclude_dirs=["SysInternal_Suite"], + exclude_extensions=[".pyc"], + ) added_files, removed_files, normal_files = [], [], [] - clean_files_list = [file.replace('"', '') for file in CURRENT_FILES] + clean_files_list = [file.replace('"', "") for file in CURRENT_FILES] files_set = set(os.path.abspath(f) for f in files) clean_files_set = set(os.path.abspath(f) for f in clean_files_list) @@ -148,29 +160,39 @@ def _handle_file_operations() -> None: _update_ini_file("config.ini", files, "files") while True: - version = color_print(f"[?] Enter the new version of the project (Old version is {VERSION}): ", "cyan", - is_input=True) + version = color_print( + f"[?] Enter the new version of the project (Old version is {VERSION}): ", + "cyan", + is_input=True, + ) if re.match(r"^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)$", version): _update_ini_file("config.ini", version, "version") break attempts += 1 if attempts >= max_attempts: - color_print("[x] Maximum attempts reached. Please run the script again.", "red") + color_print( + "[x] Maximum attempts reached. Please run the script again.", "red" + ) exit() else: - color_print("[!] Please enter a valid version number (e.g., 1.2.3)", "yellow") + color_print( + "[!] Please enter a valid version number (e.g., 1.2.3)", "yellow" + ) color_print(f"[!] {max_attempts - attempts} attempts remaining", "yellow") - color_print("\n[-] Great Job! Please tick the box in the GitHub PR request for completing steps in --dev", "green") + color_print( + "\n[-] Great Job! Please tick the box in the GitHub PR request for completing steps in --dev", + "green", + ) @log.function def dev_checks() -> None: """ Performs comprehensive developer checks to ensure code quality and project guidelines compliance. - - This function guides developers through a series of predefined checks, validates file additions, + + This function guides developers through a series of predefined checks, validates file additions, and updates project configuration. It performs the following key steps: - Verify adherence to contributing guidelines - Check file naming conventions @@ -179,10 +201,10 @@ def dev_checks() -> None: - Assess feature modularity - Categorize and display file changes - Update project configuration file - + Raises: None: Returns None if any check fails or an error occurs during the process. - + Side Effects: - Creates necessary directories - Prompts user for multiple confirmations diff --git a/CODE/logicytics/FileManagement.py b/CODE/logicytics/FileManagement.py index 4a68d6b5..b82b7f26 100644 --- a/CODE/logicytics/FileManagement.py +++ b/CODE/logicytics/FileManagement.py @@ -36,13 +36,13 @@ def open_file(file: str, use_full_path: bool = False) -> str | None: def mkdir(): """ Creates the necessary directories for storing logs, and data. - + This method ensures the existence of specific directory structures used by the application, including: - Log directories for general, debug, and performance logs - Data directories for storing hashes and zip files - + The method uses `os.makedirs()` with `exist_ok=True` to create directories without raising an error if they already exist. - + Returns: None: No return value. Directories are created as a side effect. """ @@ -91,12 +91,19 @@ def __get_files_to_zip(path: str) -> list: list: A list of file and directory names to be zipped. """ excluded_extensions = (".py", ".exe", ".bat", ".ps1", ".pkl", ".pth") - excluded_prefixes = ("config.ini", "SysInternal_Suite", - "__pycache__", "logicytics", "vulnscan") + excluded_prefixes = ( + "config.ini", + "SysInternal_Suite", + "__pycache__", + "logicytics", + "vulnscan", + ) return [ - f for f in os.listdir(path) - if not f.endswith(excluded_extensions) and not f.startswith(excluded_prefixes) + f + for f in os.listdir(path) + if not f.endswith(excluded_extensions) + and not f.startswith(excluded_prefixes) ] @staticmethod @@ -116,7 +123,10 @@ def __create_zip_file(path: str, files: list, filename: str): def ignore_files(files_func): for root, _, file_func in os.walk(os.path.join(path, files_func)): for f in file_func: - zip_file.write(os.path.join(root, f), os.path.relpath(os.path.join(root, f), path)) + zip_file.write( + os.path.join(root, f), + os.path.relpath(os.path.join(root, f), path), + ) with zipfile.ZipFile(f"{filename}.zip", "w") as zip_file: for file in files: @@ -203,7 +213,7 @@ def and_hash(cls, path: str, name: str, flag: str) -> tuple | str: Returns: tuple or str: A tuple containing success messages or an error message. """ - time = datetime.now().strftime('%Y-%m-%d_%H-%M-%S') + time = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") filename = f"Logicytics_{name}_{flag}_{time}" files_to_zip = cls.__get_files_to_zip(path) cls.__create_zip_file(path, files_to_zip, filename) diff --git a/CODE/logicytics/Flag.py b/CODE/logicytics/Flag.py index 0ae5974b..3cc50783 100644 --- a/CODE/logicytics/Flag.py +++ b/CODE/logicytics/Flag.py @@ -12,18 +12,23 @@ from logicytics.Logger import log # Check if the script is being run directly, if not, set up the library -if __name__ == '__main__': +if __name__ == "__main__": exit("This is a library, Please import rather than directly run.") else: # Save user preferences? SAVE_PREFERENCES = config.getboolean("Settings", "save_preferences") # Debug mode for Sentence Transformer - DEBUG_MODE = config.getboolean("Flag Settings", "model_debug") # Debug mode for Sentence Transformer + DEBUG_MODE = config.getboolean( + "Flag Settings", "model_debug" + ) # Debug mode for Sentence Transformer # File for storing user history data - HISTORY_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'User_History.json.gz') # User history file + HISTORY_FILE = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "User_History.json.gz" + ) # User history file # Minimum accuracy threshold for flag suggestions MIN_ACCURACY_THRESHOLD = float( - config.get("Flag Settings", "accuracy_min")) # Minimum accuracy threshold for flag suggestions + config.get("Flag Settings", "accuracy_min") + ) # Minimum accuracy threshold for flag suggestions if not 1 <= MIN_ACCURACY_THRESHOLD <= 99: raise ValueError("accuracy_min must be between 1 and 99") @@ -55,6 +60,7 @@ def __get_sim(user_input: str, all_descriptions: list[str]) -> list[float]: # Encode the current user input and historical inputs from sentence_transformers import SentenceTransformer, util import logging # Suppress logging messages from Sentence Transformer due to verbosity + # Set the logging level based on the debug mode, either DEBUG or ERROR (aka only important messages) if DEBUG_MODE: logging.getLogger("sentence_transformers").setLevel(logging.DEBUG) @@ -66,14 +72,24 @@ def __get_sim(user_input: str, all_descriptions: list[str]) -> list[float]: except Exception as e: log.critical(f"Error: {e}") log.error("Please check the model name in the config file.") - log.error(f"Model name {config.get('Flag Settings', 'model_to_use')} may not be valid.") + log.error( + f"Model name {config.get('Flag Settings', 'model_to_use')} may not be valid." + ) exit(1) - user_embedding = MODEL.encode(user_input, convert_to_tensor=True, show_progress_bar=DEBUG_MODE) - historical_embeddings = MODEL.encode(all_descriptions, convert_to_tensor=True, show_progress_bar=DEBUG_MODE) + user_embedding = MODEL.encode( + user_input, convert_to_tensor=True, show_progress_bar=DEBUG_MODE + ) + historical_embeddings = MODEL.encode( + all_descriptions, convert_to_tensor=True, show_progress_bar=DEBUG_MODE + ) # Compute cosine similarities - similarities = util.pytorch_cos_sim(user_embedding, historical_embeddings).squeeze(0).tolist() + similarities = ( + util.pytorch_cos_sim(user_embedding, historical_embeddings) + .squeeze(0) + .tolist() + ) return similarities @classmethod @@ -98,25 +114,29 @@ def __suggest_flags_based_on_history(cls, user_input: str) -> list[str]: if not SAVE_PREFERENCES: return [] history_data = cls.load_history() - if not history_data or 'interactions' not in history_data: + if not history_data or "interactions" not in history_data: return [] - interactions = history_data['interactions'] + interactions = history_data["interactions"] all_descriptions = [] all_flags = [] # Combine all flags and their respective user inputs for flag, details in interactions.items(): all_flags.extend([flag] * len(details)) - all_descriptions.extend([detail['user_input'] for detail in details]) + all_descriptions.extend([detail["user_input"] for detail in details]) # Encode the current user input and historical inputs # Compute cosine similarities similarities = cls.__get_sim(user_input, all_descriptions) # Find the top 3 most similar historical inputs - top_indices = sorted(range(len(similarities)), key=lambda i: similarities[i], reverse=True)[:3] - suggested_flags = [all_flags[i] for i in top_indices if similarities[i] > 0.3] + top_indices = sorted( + range(len(similarities)), key=lambda i: similarities[i], reverse=True + )[:3] + suggested_flags = [ + all_flags[i] for i in top_indices if similarities[i] > 0.3 + ] # Remove duplicates and return suggestions return list(dict.fromkeys(suggested_flags)) @@ -158,19 +178,20 @@ def generate_summary_and_graph(cls): history_data = cls.load_history() # Extract interactions and flag usage count - interactions = history_data['interactions'] - flags_usage = history_data['flags_usage'] + interactions = history_data["interactions"] + flags_usage = history_data["flags_usage"] # Summary of flag usage total_interactions = sum(flags_usage.values()) log.info( - "--------------------------------------------------\n Flag guessing statistics:\n --------------------------------------------------") + "--------------------------------------------------\n Flag guessing statistics:\n --------------------------------------------------" + ) for flag, details in interactions.items(): - accuracies = [detail['accuracy'] for detail in details] - device_names = [detail['device_name'] for detail in details] - user_inputs = [detail['user_input'] for detail in details] + accuracies = [detail["accuracy"] for detail in details] + device_names = [detail["device_name"] for detail in details] + user_inputs = [detail["user_input"] for detail in details] average_accuracy = sum(accuracies) / len(accuracies) most_common_device = Counter(device_names).most_common(1)[0][0] @@ -182,10 +203,18 @@ def generate_summary_and_graph(cls): # Print the summary to the console log.info( - "--------------------------------------------------\n User Interaction Summary:\n --------------------------------------------------") + "--------------------------------------------------\n User Interaction Summary:\n --------------------------------------------------" + ) - log.info(f"Total Interactions with the match flag feature: {total_interactions}") - flag_usage_summary = "\n".join([f" {flag}: {count} times" for flag, count in flags_usage.items()]) + log.info( + f"Total Interactions with the match flag feature: {total_interactions}" + ) + flag_usage_summary = "\n".join( + [ + f" {flag}: {count} times" + for flag, count in flags_usage.items() + ] + ) log.info(f"Flag Usage Summary:\n{flag_usage_summary}") # Generate the graph for flag usage @@ -193,23 +222,31 @@ def generate_summary_and_graph(cls): counts = list(flags_usage.values()) plt.figure(figsize=(10, 6)) - plt.barh(flags, counts, color='skyblue') - plt.xlabel('Usage Count') - plt.title('Flag Usage Frequency') + plt.barh(flags, counts, color="skyblue") + plt.xlabel("Usage Count") + plt.title("Flag Usage Frequency") plt.gca().invert_yaxis() # Invert y-axis for better readability - plt.subplots_adjust(left=0.2, right=0.8, top=0.9, bottom=0.1) # Adjust layout + plt.subplots_adjust( + left=0.2, right=0.8, top=0.9, bottom=0.1 + ) # Adjust layout # Save and display the graph try: - plt.savefig('../ACCESS/DATA/Flag_usage_summary.png') - log.info("Flag Usage Summary Graph saved to 'ACCESS/DATA/Flag_usage_summary.png'") + plt.savefig("../ACCESS/DATA/Flag_usage_summary.png") + log.info( + "Flag Usage Summary Graph saved to 'ACCESS/DATA/Flag_usage_summary.png'" + ) except FileNotFoundError: try: - plt.savefig('../../ACCESS/DATA/Flag_usage_summary.png') - log.info("Flag Usage Summary Graph saved to 'ACCESS/DATA/Flag_usage_summary.png'") + plt.savefig("../../ACCESS/DATA/Flag_usage_summary.png") + log.info( + "Flag Usage Summary Graph saved to 'ACCESS/DATA/Flag_usage_summary.png'" + ) except FileNotFoundError: - plt.savefig('Flag_usage_summary.png') - log.info("Flag Usage Summary Graph saved in current working directory as 'Flag_usage_summary.png'") + plt.savefig("Flag_usage_summary.png") + log.info( + "Flag Usage Summary Graph saved in current working directory as 'Flag_usage_summary.png'" + ) @staticmethod def load_history() -> dict: @@ -228,10 +265,12 @@ def load_history() -> dict: gzip.BadGzipFile: If the gzipped file is corrupted """ try: - with gzip.open(HISTORY_FILE, 'rt', encoding='utf-8') as f: # Use 'rt' mode for text read + with gzip.open( + HISTORY_FILE, "rt", encoding="utf-8" + ) as f: # Use 'rt' mode for text read return json.load(f) except FileNotFoundError: - return {'interactions': {}, 'flags_usage': Counter()} + return {"interactions": {}, "flags_usage": Counter()} @staticmethod def save_history(history_data: dict): @@ -251,7 +290,9 @@ def save_history(history_data: dict): - Indents JSON for human-readable format """ if SAVE_PREFERENCES: - with gzip.open(HISTORY_FILE, 'wt', encoding='utf-8') as f: # Use 'wt' mode for text write + with gzip.open( + HISTORY_FILE, "wt", encoding="utf-8" + ) as f: # Use 'wt' mode for text write json.dump(history_data, f, indent=4) @classmethod @@ -280,36 +321,38 @@ def update_history(cls, user_input: str, matched_flag: str, accuracy: float): if not SAVE_PREFERENCES: return history_data = cls.load_history() - matched_flag = matched_flag.lstrip('-') + matched_flag = matched_flag.lstrip("-") # Ensure that interactions is a dictionary (not a list) - if not isinstance(history_data['interactions'], dict): - history_data['interactions'] = {} + if not isinstance(history_data["interactions"], dict): + history_data["interactions"] = {} # Create a new interaction dictionary interaction = { - 'timestamp': datetime.now().strftime('%H:%M:%S - %d/%m/%Y'), - 'user_input': user_input, - 'accuracy': accuracy, - 'device_name': os.getlogin() + "timestamp": datetime.now().strftime("%H:%M:%S - %d/%m/%Y"), + "user_input": user_input, + "accuracy": accuracy, + "device_name": os.getlogin(), } # Ensure the flag exists in the interactions dictionary - if matched_flag not in history_data['interactions']: - history_data['interactions'][matched_flag] = [] + if matched_flag not in history_data["interactions"]: + history_data["interactions"][matched_flag] = [] # Append the new interaction to the flag's list of interactions - history_data['interactions'][matched_flag].append(interaction) + history_data["interactions"][matched_flag].append(interaction) # Ensure the flag exists in the flags_usage counter and increment it - if matched_flag not in history_data['flags_usage']: - history_data['flags_usage'][matched_flag] = 0 - history_data['flags_usage'][matched_flag] += 1 + if matched_flag not in history_data["flags_usage"]: + history_data["flags_usage"][matched_flag] = 0 + history_data["flags_usage"][matched_flag] += 1 cls.save_history(history_data) @classmethod - def flag(cls, user_input: str, flags: list[str], flag_description: list[str]) -> tuple[str, float]: + def flag( + cls, user_input: str, flags: list[str], flag_description: list[str] + ) -> tuple[str, float]: """ Matches user input to flag descriptions using advanced semantic similarity. @@ -341,10 +384,14 @@ def flag(cls, user_input: str, flags: list[str], flag_description: list[str]) -> ["Display help", "Enable verbose output"]) """ if len(flags) != len(flag_description): - raise ValueError("flags and flag_description lists must be of the same length") + raise ValueError( + "flags and flag_description lists must be of the same length" + ) # Combine flags and descriptions for better matching context - combined_descriptions = [f"{flag} {desc}" for flag, desc in zip(flags, flag_description)] + combined_descriptions = [ + f"{flag} {desc}" for flag, desc in zip(flags, flag_description) + ] # Encode user input and all descriptions # Compute cosine similarities @@ -353,7 +400,11 @@ def flag(cls, user_input: str, flags: list[str], flag_description: list[str]) -> # Find the best match best_index = max(range(len(similarities)), key=lambda i: similarities[i]) best_accuracy = similarities[best_index] * 100 - best_match = flags[best_index] if best_accuracy > MIN_ACCURACY_THRESHOLD else "Nothing matched" + best_match = ( + flags[best_index] + if best_accuracy > MIN_ACCURACY_THRESHOLD + else "Nothing matched" + ) # Update history cls.update_history(user_input, best_match, best_accuracy) @@ -362,8 +413,10 @@ def flag(cls, user_input: str, flags: list[str], flag_description: list[str]) -> if best_accuracy < MIN_ACCURACY_THRESHOLD: suggested_flags = cls.__suggest_flags_based_on_history(user_input) if suggested_flags: - log.warning(f"No Flags matched so suggestions based on historical data: " - f"{', '.join(suggested_flags)}") + log.warning( + f"No Flags matched so suggestions based on historical data: " + f"{', '.join(suggested_flags)}" + ) return best_match, best_accuracy @@ -379,21 +432,19 @@ def __colorify(cls, text: str, color: str) -> str: Returns: str: The colorized text with ANSI escape codes """ - colors = { - "y": "\033[93m", - "r": "\033[91m", - "b": "\033[94m" - } + colors = {"y": "\033[93m", "r": "\033[91m", "b": "\033[94m"} RESET = "\033[0m" return f"{colors.get(color, '')}{text}{RESET}" if color in colors else text @classmethod - def __available_arguments(cls) -> tuple[argparse.Namespace, argparse.ArgumentParser]: + def __available_arguments( + cls, + ) -> tuple[argparse.Namespace, argparse.ArgumentParser]: """ Defines and parses command-line arguments for the Logicytics application. - + This method creates an ArgumentParser with a comprehensive set of flags for customizing the application's behavior. It supports various execution modes, debugging options, system management flags, and post-execution actions. - + The method handles argument parsing, provides helpful descriptions for each flag, and includes color-coded hints for user guidance. It also supports suggesting valid flags if an unknown flag is provided. Returns: @@ -404,9 +455,9 @@ def __available_arguments(cls) -> tuple[argparse.Namespace, argparse.ArgumentPar # Define the argument parser parser = argparse.ArgumentParser( description="Logicytics, The most powerful tool for system data analysis. " - "This tool provides a comprehensive suite of features for analyzing system data, " - "including various modes for different levels of detail and customization.", - allow_abbrev=False + "This tool provides a comprehensive suite of features for analyzing system data, " + "including various modes for different levels of detail and customization.", + allow_abbrev=False, ) # Define Actions Flags @@ -414,37 +465,37 @@ def __available_arguments(cls) -> tuple[argparse.Namespace, argparse.ArgumentPar "--default", action="store_true", help="Runs Logicytics with its default settings and scripts. " - f"{cls.__colorify('- Recommended for most users -', 'b')}", + f"{cls.__colorify('- Recommended for most users -', 'b')}", ) parser.add_argument( "--threaded", action="store_true", help="Runs Logicytics using threads, where it runs in parallel, default settings though" - f"{cls.__colorify('- Recommended for some users -', 'b')}", + f"{cls.__colorify('- Recommended for some users -', 'b')}", ) parser.add_argument( "--modded", action="store_true", help="Runs the normal Logicytics, as well as any File in the MODS directory, " - "Used for custom scripts as well as default ones.", + "Used for custom scripts as well as default ones.", ) parser.add_argument( "--depth", action="store_true", help="This flag will run all default script's in threading mode, " - "as well as any clunky and huge code, which produces a lot of data " - f"{cls.__colorify('- Will take a long time -', 'y')}", + "as well as any clunky and huge code, which produces a lot of data " + f"{cls.__colorify('- Will take a long time -', 'y')}", ) parser.add_argument( "--nopy", action="store_true", help="Run Logicytics using all non-python scripts, " - f"These may be {cls.__colorify('outdated', 'y')} " - "and not the best, use only if the device doesnt have python installed.", + f"These may be {cls.__colorify('outdated', 'y')} " + "and not the best, use only if the device doesnt have python installed.", ) # TODO v3.6.0 -> Out of beta @@ -452,9 +503,9 @@ def __available_arguments(cls) -> tuple[argparse.Namespace, argparse.ArgumentPar "--vulnscan-ai", action="store_true", help="Run's Logicytics new Sensitive data Detection AI, its a new feature that will " - "detect any files that are out of the ordinary, and logs their path. Runs threaded." - f"{cls.__colorify('- Beta Mode -', 'y')} " - f"{cls.__colorify('- Will take a long time -', 'y')}", + "detect any files that are out of the ordinary, and logs their path. Runs threaded." + f"{cls.__colorify('- Beta Mode -', 'y')} " + f"{cls.__colorify('- Will take a long time -', 'y')}", ) parser.add_argument( @@ -467,13 +518,13 @@ def __available_arguments(cls) -> tuple[argparse.Namespace, argparse.ArgumentPar "--performance-check", action="store_true", help="Run's Logicytics default while testing its performance and time, " - "this then shows a table with the file names and time to executed. " + "this then shows a table with the file names and time to executed. ", ) parser.add_argument( "--usage", action="store_true", - help="Run's script that shows and gives your local statistics, on the flags used by you" + help="Run's script that shows and gives your local statistics, on the flags used by you", ) # Define Side Flags @@ -481,24 +532,24 @@ def __available_arguments(cls) -> tuple[argparse.Namespace, argparse.ArgumentPar "--debug", action="store_true", help="Runs the Debugger, Will check for any issues, " - "warning etc, useful for debugging and issue reporting " - f"{cls.__colorify('- Use to get a special log file to report the bug -', 'b')}.", + "warning etc, useful for debugging and issue reporting " + f"{cls.__colorify('- Use to get a special log file to report the bug -', 'b')}.", ) parser.add_argument( "--update", action="store_true", help="Update Logicytics from GitHub, only if you have git properly installed " - "and the project was downloaded via git " - f"{cls.__colorify('- Use on your own device only -', 'y')}.", + "and the project was downloaded via git " + f"{cls.__colorify('- Use on your own device only -', 'y')}.", ) parser.add_argument( "--dev", action="store_true", help="Run Logicytics developer mod, this is only for people who want to " - "register their contributions properly. " - f"{cls.__colorify('- Use on your own device only -', 'y')}.", + "register their contributions properly. " + f"{cls.__colorify('- Use on your own device only -', 'y')}.", ) # Define After-Execution Flags @@ -516,7 +567,9 @@ def __available_arguments(cls) -> tuple[argparse.Namespace, argparse.ArgumentPar # Parse the arguments args, unknown = parser.parse_known_args() - valid_flags = [action.dest for action in parser._actions if action.dest != 'help'] + valid_flags = [ + action.dest for action in parser._actions if action.dest != "help" + ] if unknown: cls.__suggest_flag(unknown[0], valid_flags) exit(1) @@ -526,18 +579,18 @@ def __available_arguments(cls) -> tuple[argparse.Namespace, argparse.ArgumentPar def __exclusivity_logic(args: argparse.Namespace) -> bool: """ Validates the mutual exclusivity of command-line flags to prevent invalid flag combinations. - + This method checks for conflicting or mutually exclusive flags across three flag categories: - Special flags (reboot, shutdown, webhook) - Action flags (default, threaded, modded, minimal, nopy, depth, performance_check) - Exclusive flags (vulnscan_ai) - + Parameters: args (argparse.Namespace): Parsed command-line arguments to validate. - + Returns: bool: True if any special flags are set, False otherwise. - + Raises: SystemExit: If incompatible flag combinations are detected, with an error message describing the conflict. """ @@ -553,22 +606,28 @@ def __exclusivity_logic(args: argparse.Namespace) -> bool: args.nopy, args.depth, args.performance_check, - args.usage + args.usage, } exclusive_flags = { args.vulnscan_ai, } if any(special_flags) and not any(action_flags): - log.error("Invalid combination of flags_list: Special and Action flag exclusivity issue.") + log.error( + "Invalid combination of flags_list: Special and Action flag exclusivity issue." + ) exit(1) if any(exclusive_flags) and any(action_flags): - log.error("Invalid combination of flags_list: Exclusive and Action flag exclusivity issue.") + log.error( + "Invalid combination of flags_list: Exclusive and Action flag exclusivity issue." + ) exit(1) if any(exclusive_flags) and any(special_flags): - log.error("Invalid combination of flags_list: Exclusive and Special flag exclusivity issue.") + log.error( + "Invalid combination of flags_list: Exclusive and Special flag exclusivity issue." + ) exit(1) return any(special_flags) @@ -577,18 +636,18 @@ def __exclusivity_logic(args: argparse.Namespace) -> bool: def __used_flags_logic(args: argparse.Namespace) -> tuple[str, ...]: """ Determines the flags that are set to True in the provided command-line arguments. - - This method examines the arguments namespace and returns a tuple of flag names - that have been activated. It limits the returned flags to a maximum of two to + + This method examines the arguments namespace and returns a tuple of flag names + that have been activated. It limits the returned flags to a maximum of two to prevent excessive flag usage. - + Parameters: args (argparse.Namespace): Parsed command-line arguments to be analyzed. - + Returns: - tuple[str, ...]: A tuple containing the names of flags set to True, + tuple[str, ...]: A tuple containing the names of flags set to True, with a maximum of two flags. - + Notes: - If no flags are set, returns an empty tuple. - Stops collecting flags after finding two True flags to limit complexity. @@ -606,63 +665,74 @@ def __used_flags_logic(args: argparse.Namespace) -> tuple[str, ...]: def __suggest_flag(cls, user_input: str, valid_flags: list[str]): """ Suggests the closest valid flag based on the user's input and provides interactive flag matching. - + This method handles flag suggestion through two mechanisms: 1. Using difflib to find close flag matches 2. Prompting user for a description to find the most relevant flag - + Args: user_input (str): The flag input by the user. valid_flags (list[str]): The list of valid flags. - + Behavior: - If a close flag match exists, suggests the closest match - If no close match, prompts user for a description - Uses the _Match.flag method to find the most accurate flag based on description - Prints matching results, with optional detailed output in debug mode - + Side Effects: - Prints suggestions and matched flags to console - Prompts user for additional input if no direct match is found """ # Get the closest valid flag match based on the user's input - closest_matches = difflib.get_close_matches(user_input, valid_flags, n=1, cutoff=0.6) + closest_matches = difflib.get_close_matches( + user_input, valid_flags, n=1, cutoff=0.6 + ) if closest_matches: - log.warning(f"Invalid flag '{user_input}', Did you mean '--{closest_matches[0].replace('_', '-')}'?") + log.warning( + f"Invalid flag '{user_input}', Did you mean '--{closest_matches[0].replace('_', '-')}'?" + ) exit(1) # Prompt the user for a description if no close match is found - user_input_desc = input("We can't find a match, Please provide a description: ").lower() + user_input_desc = input( + "We can't find a match, Please provide a description: " + ).lower() # Map the user-provided description to the closest valid flag flags_list = [f"--{flag}" for flag in valid_flags] descriptions_list = [f"Run Logicytics with {flag}" for flag in valid_flags] - flag_received, accuracy_received = cls.Match.flag(user_input_desc, flags_list, descriptions_list) + flag_received, accuracy_received = cls.Match.flag( + user_input_desc, flags_list, descriptions_list + ) if DEBUG_MODE: log.info( - f"User input: {user_input_desc}\nMatched flag: {flag_received.replace('_', '-')}\nAccuracy: {accuracy_received:.2f}%\n") + f"User input: {user_input_desc}\nMatched flag: {flag_received.replace('_', '-')}\nAccuracy: {accuracy_received:.2f}%\n" + ) else: - log.info(f"Matched flag: {flag_received.replace('_', '-')} (Accuracy: {accuracy_received:.2f}%)\n") + log.info( + f"Matched flag: {flag_received.replace('_', '-')} (Accuracy: {accuracy_received:.2f}%)\n" + ) @staticmethod def show_help_menu(return_output: bool = False) -> str | None: """ Display the help menu for the Logicytics application. - + This method retrieves the argument parser from the Flag class and either prints or returns the help text based on the input parameter. - + Args: - return_output (bool, optional): Controls the method's behavior. - - If True, returns the formatted help text as a string. + return_output (bool, optional): Controls the method's behavior. + - If True, returns the formatted help text as a string. - If False (default), prints the help text directly to the console. - + Returns: str or None: Help text as a string if return_output is True, otherwise None. - + Example: # Print help menu to console Flag.show_help_menu() - + # Get help menu as a string help_text = Flag.show_help_menu(return_output=True) print(help_text) @@ -677,7 +747,7 @@ def show_help_menu(return_output: bool = False) -> str | None: def data(cls) -> tuple[str, str | None]: """ Handles the parsing and validation of command-line flags. - + This method processes command-line arguments, validates their usage, and manages flag interactions. It ensures that: - Only one primary action flag is used at a time - Special flags are handled with specific logic @@ -689,7 +759,7 @@ def data(cls) -> tuple[str, str | None]: - The primary matched flag - An optional secondary flag (None if not applicable) - Exits the program if no flags are used or invalid combinations are detected - + Raises: SystemExit: Terminates the program with an error message for: - Invalid flag combinations @@ -721,25 +791,25 @@ def data(cls) -> tuple[str, str | None]: def update_data_history(matched_flag: str): """ Update the usage count for a specific flag in the user's interaction history. - + This method increments the usage count for a given flag in the historical data. If the flag does not exist in the history, it initializes its count to 0 before incrementing. - + Parameters: matched_flag (str): The flag whose usage count needs to be updated. - + Side Effects: - Modifies the 'flags_usage' dictionary in the user's history file - Saves the updated history data to a persistent storage - + Example: update_data_history('--verbose') # Increments usage count for '--verbose' flag """ history_data = cls.Match.load_history() # Ensure the flag exists in the flags_usage counter and increment it - if matched_flag.replace("--", "") not in history_data['flags_usage']: - history_data['flags_usage'][matched_flag.replace("--", "")] = 0 - history_data['flags_usage'][matched_flag.replace("--", "")] += 1 + if matched_flag.replace("--", "") not in history_data["flags_usage"]: + history_data["flags_usage"][matched_flag.replace("--", "")] = 0 + history_data["flags_usage"][matched_flag.replace("--", "")] += 1 cls.Match.save_history(history_data) if len(used_flags) == 2: diff --git a/CODE/logicytics/Logger.py b/CODE/logicytics/Logger.py index 171c5870..d4078f33 100644 --- a/CODE/logicytics/Logger.py +++ b/CODE/logicytics/Logger.py @@ -18,6 +18,7 @@ class Log: """ A logging class that supports colored output using the colorlog library. """ + _instance = None def __new__(cls, *args, **kwargs): @@ -48,8 +49,12 @@ def __init__(self, config: dict = None): self.reset() # log_path_relative variable takes Logger.py full path, # goes up twice then joins with ACCESS\\LOGS\\Logicytics.log - log_path_relative = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), - "ACCESS\\LOGS\\Logicytics.log") + log_path_relative = os.path.join( + os.path.dirname( + os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + ), + "ACCESS\\LOGS\\Logicytics.log", + ) config = config or { "filename": log_path_relative, "use_colorlog": True, @@ -105,11 +110,13 @@ def __init__(self, config: dict = None): if not os.path.exists(self.filename): self.newline() - self._raw("| Timestamp | LOG Level |" - + " " * 71 - + "LOG Messages" - + " " * 71 - + "|") + self._raw( + "| Timestamp | LOG Level |" + + " " * 71 + + "LOG Messages" + + " " * 71 + + "|" + ) elif os.path.exists(self.filename) and config.get("delete_log", False): with open(self.filename, "w") as f: f.write( @@ -158,15 +165,15 @@ def __trunc_message(self, message: str) -> str: def __internal(self, message): """ Log an internal message exclusively to the console. - + Internal messages are used for logging system states or debug information that should not be written to log files. These messages are only displayed in the console when color logging is enabled. - + Parameters: message (str): The internal message to be logged. If the message is "None" or None, no logging occurs. - + Notes: - Requires color logging to be enabled - Uses a custom internal log level @@ -187,7 +194,7 @@ def debug(self, message): def _raw(self, message): """ Log a raw message directly to the log file. - + This method writes a message directly to the log file without any additional formatting or logging levels. @@ -197,14 +204,14 @@ def _raw(self, message): Parameters: message (str): The raw message to be written to the log file. - + Notes: - Checks the calling context to warn about non-function calls - Handles potential Unicode encoding errors - Skips logging if message is None or "None" - Writes message with a newline character - Logs internal errors if file writing fails - + Raises: Logs internal errors for Unicode or file writing issues without stopping execution """ @@ -215,27 +222,25 @@ def _raw(self, message): ) # Precompiled regex for ANSI escape codes # Remove all ANSI escape sequences in one pass - message = re.compile(r'\033\[\d+(;\d+)*m').sub('', message) + message = re.compile(r"\033\[\d+(;\d+)*m").sub("", message) if message and message != "None": try: with open(self.filename, "a", encoding="utf-8") as f: f.write(f"{str(message)}\n") except (UnicodeDecodeError, UnicodeEncodeError) as UDE: - self.__internal( - f"UnicodeDecodeError: {UDE} - Message: {str(message)}" - ) + self.__internal(f"UnicodeDecodeError: {UDE} - Message: {str(message)}") except Exception as E: self.__internal(f"Error: {E} - Message: {str(message)}") def newline(self): """ Write a newline separator to the log file, creating a visual divider between log entries. - + This method writes a formatted horizontal line to the log file using ASCII characters, which helps visually separate different sections or log entries. The line consists of vertical bars and dashes creating a structured tabular-like separator. - + Side Effects: Appends a newline separator to the log file specified by `self.filename`. """ @@ -250,7 +255,9 @@ def info(self, message): """ if self.color and message != "None" and message is not None: colorlog.info(str(message)) - self._raw(f"[{self.__timestamp()}] > INFO: | {self.__trunc_message(str(message))}") + self._raw( + f"[{self.__timestamp()}] > INFO: | {self.__trunc_message(str(message))}" + ) def warning(self, message): """ @@ -260,7 +267,9 @@ def warning(self, message): """ if self.color and message != "None" and message is not None: colorlog.warning(str(message)) - self._raw(f"[{self.__timestamp()}] > WARNING: | {self.__trunc_message(str(message))}") + self._raw( + f"[{self.__timestamp()}] > WARNING: | {self.__trunc_message(str(message))}" + ) def error(self, message): """ @@ -270,7 +279,9 @@ def error(self, message): """ if self.color and message != "None" and message is not None: colorlog.error(str(message)) - self._raw(f"[{self.__timestamp()}] > ERROR: | {self.__trunc_message(str(message))}") + self._raw( + f"[{self.__timestamp()}] > ERROR: | {self.__trunc_message(str(message))}" + ) def critical(self, message): """ @@ -280,53 +291,62 @@ def critical(self, message): """ if self.color and message != "None" and message is not None: colorlog.critical(str(message)) - self._raw(f"[{self.__timestamp()}] > CRITICAL: | {self.__trunc_message(str(message))}") + self._raw( + f"[{self.__timestamp()}] > CRITICAL: | {self.__trunc_message(str(message))}" + ) def string(self, message, type: str): """ Logs a message with a specified log type, supporting multiple type aliases. - + This method allows logging messages with flexible type specifications, mapping aliases to standard log types and handling potential errors in type selection. It supports logging with color if enabled. - + Parameters: message (str): The message to be logged. Skipped if "None" or None. type (str): The log type, which can be one of: - Standard types: 'debug', 'info', 'warning', 'error', 'critical' - Aliases: 'err' (error), 'warn' (warning), 'crit' (critical), 'except' (exception) - + Behavior: - Converts type to lowercase and maps aliases to standard log types - Logs message using the corresponding log method - Falls back to debug logging if an invalid type is provided - Only logs if color is enabled and message is not "None" - + Raises: AttributeError: If no matching log method is found (internally handled) """ if self.color and message != "None" and message is not None: - type_map = {"err": "error", "warn": "warning", "crit": "critical", "except": "exception"} + type_map = { + "err": "error", + "warn": "warning", + "crit": "critical", + "except": "exception", + } type = type_map.get(type.lower(), type) try: getattr(self, type.lower())(str(message)) except AttributeError as AE: - self.__internal(f"A wrong Log Type was called: {type} not found. -> {AE}") + self.__internal( + f"A wrong Log Type was called: {type} not found. -> {AE}" + ) getattr(self, "Debug".lower())(str(message)) def exception(self, message, exception_type: Type = Exception): """ Log an exception message and raise the specified exception. - + Warning: Not recommended for production use. Prefer Log().error() for logging exceptions. - + Args: message (str): The exception message to be logged. exception_type (Type, optional): The type of exception to raise. Defaults to Exception. - + Raises: The specified exception type with the provided message. - + Note: - Only logs the exception if color logging is enabled and message is not None - Logs the exception with a timestamp and truncated message @@ -334,26 +354,27 @@ def exception(self, message, exception_type: Type = Exception): """ if self.color and message != "None" and message is not None: self._raw( - f"[{self.__timestamp()}] > EXCEPTION:| {self.__trunc_message(f'{message} -> Exception provoked: {str(exception_type)}')}") + f"[{self.__timestamp()}] > EXCEPTION:| {self.__trunc_message(f'{message} -> Exception provoked: {str(exception_type)}')}" + ) raise exception_type(message) def execution(self, message_log: list[tuple[str, str]]): """ Parse and log multiple messages with their corresponding log types. - + This method processes a list of messages, where each message is associated with a specific log type. It is designed for scenarios where multiple log entries need to be processed simultaneously, such as logging script execution results. - + Parameters: message_log (list[tuple[str, str]]): A list of message entries. Each entry is a list containing two elements: - First element: The log message (str) - Second element: The log type (str) - + Behavior: - Iterates through the provided message log - Logs each message using the specified log type via `self.string()` - Logs an internal warning if a message list does not contain exactly two elements - + Example: log = Log() log.parse_execution([ @@ -374,16 +395,16 @@ def execution(self, message_log: list[tuple[str, str]]): def function(self, func: callable): """ A decorator that logs the execution details of a function, tracking its performance and providing runtime insights. - + Parameters: func (callable): The function to be decorated and monitored. - + Returns: callable: A wrapper function that logs execution metrics. - + Raises: TypeError: If the provided function is not callable. - + Example: @log.function def example_function(): @@ -396,32 +417,35 @@ def example_function(): def wrapper(*args, **kwargs): """ Wrapper function that logs the execution of the decorated function. - + Tracks and logs the start, execution, and completion of a function with performance timing. - + Parameters: *args (tuple): Positional arguments passed to the decorated function. **kwargs (dict): Keyword arguments passed to the decorated function. - + Returns: Any: The original result of the decorated function. - + Raises: TypeError: If the decorated function is not callable. - + Notes: - Logs debug messages before and after function execution - Measures and logs the total execution time with microsecond precision - Preserves the original function's return value """ start_time = time.perf_counter() - func_args = ", ".join([str(arg) for arg in args] + - [f"{k}={v}" for k, v in kwargs.items()]) + func_args = ", ".join( + [str(arg) for arg in args] + [f"{k}={v}" for k, v in kwargs.items()] + ) self.debug(f"Running the function {func.__name__}({func_args}).") result = func(*args, **kwargs) end_time = time.perf_counter() elapsed_time = end_time - start_time - self.debug(f"{func.__name__}({func_args}) executed in {elapsed_time} -> returned {type(result).__name__}") + self.debug( + f"{func.__name__}({func_args}) executed in {elapsed_time} -> returned {type(result).__name__}" + ) return result return wrapper diff --git a/CODE/logicytics/__init__.py b/CODE/logicytics/__init__.py index 53c98b35..fb07ac86 100644 --- a/CODE/logicytics/__init__.py +++ b/CODE/logicytics/__init__.py @@ -10,15 +10,19 @@ from logicytics.Logger import log, Log # Check if the script is being run directly, if not, set up the library -if __name__ == '__main__': +if __name__ == "__main__": exit("This is a library, Please import rather than directly run.") execute = Execute() # Initialize the Execute class for executing commands get = Get() # Initialize the Get class for retrieving data check = Check() # Initialize the Check class for performing checks flag = Flag() # Initialize the Flag class for managing cli flags -file_management = FileManagement() # Initialize the FileManagement class for file operations -__show_trace = DEBUG == "DEBUG" # Determine if stack traces should be shown based on the debug level +file_management = ( + FileManagement() +) # Initialize the FileManagement class for file operations +__show_trace = ( + DEBUG == "DEBUG" +) # Determine if stack traces should be shown based on the debug level # Exception for handling object loading errors @@ -40,7 +44,9 @@ def __init__(self, message="Failed to load object", object_name=None): # Decorator for marking functions as deprecated [custom] -def deprecated(removal_version: str, reason: str, show_trace: bool = __show_trace) -> callable: +def deprecated( + removal_version: str, reason: str, show_trace: bool = __show_trace +) -> callable: """ Decorator function that marks a function as deprecated and provides a warning when the function is called. @@ -94,7 +100,7 @@ def wrapper(*args, **kwargs) -> callable: """ message = f"\033[91mDeprecationWarning: A call to the deprecated function {func.__name__}() has been called, {reason}. Function will be removed at version {removal_version}\n" if show_trace: - stack = ''.join(traceback.format_stack()[:-1]) + stack = "".join(traceback.format_stack()[:-1]) message += f"Called from:\n{stack}\033[0m" else: message += "\033[0m" @@ -106,4 +112,14 @@ def wrapper(*args, **kwargs) -> callable: return decorator -__all__ = ['execute', 'get', 'check', 'flag', 'file_management', 'deprecated', 'ObjectLoadError', 'log', 'Log'] +__all__ = [ + "execute", + "get", + "check", + "flag", + "file_management", + "deprecated", + "ObjectLoadError", + "log", + "Log", +] diff --git a/CODE/vulnscan.py b/CODE/vulnscan.py index 42b12133..4459cdb6 100644 --- a/CODE/vulnscan.py +++ b/CODE/vulnscan.py @@ -8,6 +8,7 @@ import aiofiles import joblib import numpy as np + # noinspection PyPackageRequirements import torch from pathlib import Path @@ -18,7 +19,9 @@ warnings.filterwarnings("ignore") -UNREADABLE_EXTENSIONS = config.get("VulnScan Settings", "unreadable_extensions").split(",") +UNREADABLE_EXTENSIONS = config.get("VulnScan Settings", "unreadable_extensions").split( + "," +) MAX_FILE_SIZE_MB = config.get("VulnScan Settings", "max_file_size_mb", fallback="None") raw_workers = config.get("VulnScan Settings", "max_workers", fallback="auto") max_workers = min(32, os.cpu_count() * 2) if raw_workers == "auto" else int(raw_workers) @@ -48,22 +51,27 @@ def _load_model(self) -> None: self.model = self.model_cache[self.model_path] return - if self.model_path.endswith('.pkl'): + if self.model_path.endswith(".pkl"): self.model = joblib.load(self.model_path) - elif self.model_path.endswith('.safetensors'): - self.model = safe_open(self.model_path, framework='torch') - elif self.model_path.endswith('.pth'): + elif self.model_path.endswith(".safetensors"): + self.model = safe_open(self.model_path, framework="torch") + elif self.model_path.endswith(".pth"): with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=FutureWarning) self.model = torch.load( self.model_path, - map_location=torch.device("cuda" if torch.cuda.is_available() else "cpu"), - weights_only=False + map_location=torch.device( + "cuda" if torch.cuda.is_available() else "cpu" + ), + weights_only=False, ) if not torch.cuda.is_available() and torch.version.cuda: log.warning( - "NVIDIA GPU detected but CUDA is not available. Check your PyTorch and CUDA installation to utilise as much power as possible.") - log.debug(f"Model using device: {torch.device('cuda' if torch.cuda.is_available() else 'cpu')}") + "NVIDIA GPU detected but CUDA is not available. Check your PyTorch and CUDA installation to utilise as much power as possible." + ) + log.debug( + f"Model using device: {torch.device('cuda' if torch.cuda.is_available() else 'cpu')}" + ) else: raise ValueError("Unsupported model file format") @@ -94,22 +102,31 @@ def _is_sensitive(self, content: str) -> tuple[bool, float, str]: self.model.eval() indices = torch.LongTensor(np.vstack(features.nonzero())) values = torch.FloatTensor(features.data) - tensor = torch.sparse_coo_tensor(indices, values, size=features.shape).to(device) + tensor = torch.sparse_coo_tensor(indices, values, size=features.shape).to( + device + ) with torch.no_grad(): pred = self.model(tensor) prob = torch.softmax(pred, dim=1).max().item() - reason = ", ".join(self.vectorizer.get_feature_names_out()[i] for i in np.argsort(features.data)[-5:]) + reason = ", ".join( + self.vectorizer.get_feature_names_out()[i] + for i in np.argsort(features.data)[-5:] + ) return pred.argmax(dim=1).item() == 1, prob, reason else: probs = self.model.predict_proba(features) top_indices = np.argsort(features.toarray()[0])[-5:] - reason = ", ".join(self.vectorizer.get_feature_names_out()[i] for i in top_indices) + reason = ", ".join( + self.vectorizer.get_feature_names_out()[i] for i in top_indices + ) return self.model.predict(features)[0] == 1, probs.max(), reason async def scan_file_async(self, file_path: str) -> tuple[bool, float, str]: try: - async with aiofiles.open(file_path, 'r', encoding='utf-8', errors='ignore') as f: + async with aiofiles.open( + file_path, "r", encoding="utf-8", errors="ignore" + ) as f: content = await f.read() return self._is_sensitive(content) except Exception as e: @@ -135,7 +152,7 @@ def scan_directory(self, scan_paths: list[str]) -> None: for path in scan_paths: try: - all_files.extend(str(f) for f in Path(path).rglob('*') if f.is_file()) + all_files.extend(str(f) for f in Path(path).rglob("*") if f.is_file()) log.debug(f"Found {len(all_files)} files in {path}") except Exception as e: log.warning(f"Skipping path {path} due to error: {e}") @@ -168,19 +185,29 @@ async def scan_worker(scan_file): async with semaphore: result, prob, reason = await self.scanner.scan_file_async(scan_file) if result: - log.debug(f"SENSITIVE: {scan_file} | Confidence: {prob:.2f} | Reason: {reason}") + log.debug( + f"SENSITIVE: {scan_file} | Confidence: {prob:.2f} | Reason: {reason}" + ) sensitive_files.append(scan_file) tasks = [scan_worker(f) for f in valid_files] - with tqdm(total=len(valid_files), desc="\033[32mSCAN\033[0m \033[94mScanning Files\033[0m", - unit="file", bar_format="{l_bar} {bar} {n_fmt}/{total_fmt}\n") as pbar: + with tqdm( + total=len(valid_files), + desc="\033[32mSCAN\033[0m \033[94mScanning Files\033[0m", + unit="file", + bar_format="{l_bar} {bar} {n_fmt}/{total_fmt}\n", + ) as pbar: for f in asyncio.as_completed(tasks): await f pbar.update(1) with open("Sensitive_File_Paths.txt", "a") as out: - out.write("\n".join(sensitive_files) + "\n" if sensitive_files else "No sensitive files detected.\n") + out.write( + "\n".join(sensitive_files) + "\n" + if sensitive_files + else "No sensitive files detected.\n" + ) self.scanner.cleanup() @@ -191,7 +218,7 @@ async def scan_worker(scan_file): "C:\\Users\\", "C:\\Windows\\Logs", "C:\\Program Files", - "C:\\Program Files (x86)" + "C:\\Program Files (x86)", ] vulnscan = VulnScan("vulnscan/SenseMini.3n3.pth", "vulnscan/vectorizer.3n3.pkl") vulnscan.scan_directory(base_paths) diff --git a/PLANS.md b/PLANS.md index 6ca3f620..2a9bd992 100644 --- a/PLANS.md +++ b/PLANS.md @@ -2,13 +2,14 @@ > [!TIP] > Here is a key for the table above: +> > - ❌ ➡️ Might be done, Not sure yet > - ✅ ➡️ Will be done, 100% sure | Task | Version | Might or Will be done? | -|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|------------------------| -| Remake VulnScan .pkl and .pth to be more accurate | v3.6.0 | ❌ | -| Encrypted Volume Detection and Analysis, Advanced USB Device History Tracker | v3.6.0 | ❌ | -| Merge `sensitive data miner` with `vulnscan` to be 1 tool | v4.0.0 | ✅ | -| Remake Logicytics End-Execution cycle, where files created must go in `temp/` directory, and zipper takes it from there only, simplifying any code logic with this as well | v4.0.0 | ✅ | -| Replace Logger.py with Util that contains (tprint), also implement the ExceptionHandler and UpdateManager from Util | v4.0.0 | ✅ | +| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | ---------------------- | +| Remake VulnScan .pkl and .pth to be more accurate | v3.6.0 | ❌ | +| Encrypted Volume Detection and Analysis, Advanced USB Device History Tracker | v3.6.0 | ❌ | +| Merge `sensitive data miner` with `vulnscan` to be 1 tool | v4.0.0 | ✅ | +| Remake Logicytics End-Execution cycle, where files created must go in `temp/` directory, and zipper takes it from there only, simplifying any code logic with this as well | v4.0.0 | ✅ | +| Replace Logger.py with Util that contains (tprint), also implement the ExceptionHandler and UpdateManager from Util | v4.0.0 | ✅ | From a53eb72ab9602ec06a80bb41beddcbbccd892f0a Mon Sep 17 00:00:00 2001 From: Shahm Najeeb Date: Sun, 27 Jul 2025 17:59:45 +0400 Subject: [PATCH 12/12] Formatting Signed-off-by: Shahm Najeeb --- .github/ISSUE_TEMPLATE/bug_report.yml | 2 +- .github/ISSUE_TEMPLATE/dev_bug_report.yml | 2 +- .github/ISSUE_TEMPLATE/feature_request.yml | 2 +- .github/PULL_REQUEST_TEMPLATE.md | 3 +- .github/workflows/codeql.yml | 90 ++++++++-------- .github/workflows/dependency-review.yml | 2 +- .github/workflows/greetings.yml | 20 ++-- .github/workflows/stale.yml | 24 ++--- .pre-commit-config.yaml | 26 ++--- CODE/Logicytics.py | 10 +- CODE/_debug.py | 2 +- CODE/_dev.py | 2 +- CODE/browser_miner.ps1 | 112 ++++++++++---------- CODE/logicytics/FileManagement.py | 2 +- CODE/logicytics/Flag.py | 46 ++++----- CODE/logicytics/__init__.py | 4 +- CODE/property_scraper.ps1 | 4 +- CODE/sensitive_data_miner.py | 1 + CODE/sys_internal.py | 6 +- CODE/vulnscan.py | 11 +- CONTRIBUTING.md | 6 +- CREDITS.md | 5 +- MODS/_MOD_SKELETON.py | 1 + PLANS.md | 12 +-- README.md | 115 ++++++++++++--------- SECURITY.md | 1 - 26 files changed, 270 insertions(+), 241 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 7029f67c..f813bca4 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -1,7 +1,7 @@ name: Report a bug description: Tell us about a bug or issue you may have identified in Logicytics. title: "Provide a general summary of the issue" -labels: ["progress/Unreviewed"] +labels: [ "progress/Unreviewed" ] assignees: "DefinetlyNotAI" body: - type: checkboxes diff --git a/.github/ISSUE_TEMPLATE/dev_bug_report.yml b/.github/ISSUE_TEMPLATE/dev_bug_report.yml index ab39b3eb..18287a04 100644 --- a/.github/ISSUE_TEMPLATE/dev_bug_report.yml +++ b/.github/ISSUE_TEMPLATE/dev_bug_report.yml @@ -1,7 +1,7 @@ name: Report a development bug description: Tell us about a bug or issue you may have identified while developing Logicytics. title: "Provide a general summary of the development issue" -labels: ["progress/Unreviewed", "type/Development"] +labels: [ "progress/Unreviewed", "type/Development" ] assignees: "DefinetlyNotAI" body: - type: checkboxes diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index c4579666..27e51ef3 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -1,7 +1,7 @@ name: Feature request description: Suggest new or updated features to include in Logicytics. title: "Suggest a new feature" -labels: ["progress/Unreviewed"] +labels: [ "progress/Unreviewed" ] assignees: "DefinetlyNotAI" body: - type: checkboxes diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 8583fa4f..e62326c4 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -7,7 +7,8 @@ - [ ] I have [searched](https://github.com/DefinetlyNotAI/Logicytics/pulls) for duplicate or closed issues. - [ ] I have read the [contributing guidelines](https://github.com/DefinetlyNotAI/Logicytics/blob/main/CONTRIBUTING.md). -- [ ] I have followed the instructions in the [wiki](https://github.com/DefinetlyNotAI/Logicytics/wiki) about contributions. +- [ ] I have followed the instructions in the [wiki](https://github.com/DefinetlyNotAI/Logicytics/wiki) about + contributions. - [ ] I have updated the documentation accordingly, if required. - [ ] I have tested my code with the `--dev` flag, if required. diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 868a3d4f..c1e01ac9 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -46,10 +46,10 @@ jobs: fail-fast: false matrix: include: - - language: actions - build-mode: none - - language: python - build-mode: none + - language: actions + build-mode: none + - language: python + build-mode: none # CodeQL supports the following values keywords for 'language': 'actions', 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'rust', 'swift' # Use `c-cpp` to analyze code written in C, C++ or both # Use 'java-kotlin' to analyze code written in Java, Kotlin or both @@ -59,50 +59,50 @@ jobs: # If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how # your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages steps: - - name: Harden the runner (Audit all outbound calls) - uses: step-security/harden-runner@ec9f2d5744a09debf3a187a3f4f675c53b671911 # v2.13.0 - with: - egress-policy: audit + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@ec9f2d5744a09debf3a187a3f4f675c53b671911 # v2.13.0 + with: + egress-policy: audit - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - name: Checkout repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - # Add any setup steps before running the `github/codeql-action/init` action. - # This includes steps like installing compilers or runtimes (`actions/setup-node` - # or others). This is typically only required for manual builds. - # - name: Setup runtime (example) - # uses: actions/setup-example@v1 + # Add any setup steps before running the `github/codeql-action/init` action. + # This includes steps like installing compilers or runtimes (`actions/setup-node` + # or others). This is typically only required for manual builds. + # - name: Setup runtime (example) + # uses: actions/setup-example@v1 - # Initializes the CodeQL tools for scanning. - - name: Initialize CodeQL - uses: github/codeql-action/init@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3.29.4 - with: - languages: ${{ matrix.language }} - build-mode: ${{ matrix.build-mode }} - # If you wish to specify custom queries, you can do so here or in a config file. - # By default, queries listed here will override any specified in a config file. - # Prefix the list here with "+" to use these queries and those in the config file. + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3.29.4 + with: + languages: ${{ matrix.language }} + build-mode: ${{ matrix.build-mode }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. - # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs - # queries: security-extended,security-and-quality + # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs + # queries: security-extended,security-and-quality - # If the analyze step fails for one of the languages you are analyzing with - # "We were unable to automatically build your code", modify the matrix above - # to set the build mode to "manual" for that language. Then modify this step - # to build your code. - # ℹ️ Command-line programs to run using the OS shell. - # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun - - if: matrix.build-mode == 'manual' - shell: bash - run: | - echo 'If you are using a "manual" build mode for one or more of the' \ - 'languages you are analyzing, replace this with the commands to build' \ - 'your code, for example:' - echo ' make bootstrap' - echo ' make release' - exit 1 + # If the analyze step fails for one of the languages you are analyzing with + # "We were unable to automatically build your code", modify the matrix above + # to set the build mode to "manual" for that language. Then modify this step + # to build your code. + # ℹ️ Command-line programs to run using the OS shell. + # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun + - if: matrix.build-mode == 'manual' + shell: bash + run: | + echo 'If you are using a "manual" build mode for one or more of the' \ + 'languages you are analyzing, replace this with the commands to build' \ + 'your code, for example:' + echo ' make bootstrap' + echo ' make release' + exit 1 - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3.29.4 - with: - category: "/language:${{matrix.language}}" + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3.29.4 + with: + category: "/language:${{matrix.language}}" diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 71ddc655..08cf6a47 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -7,7 +7,7 @@ # # Source repository: https://github.com/actions/dependency-review-action name: 'Dependency Review' -on: [pull_request] +on: [ pull_request ] permissions: contents: read diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 794f83e4..3d572343 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -1,6 +1,6 @@ name: Greetings -on: [pull_request_target, issues] +on: [ pull_request_target, issues ] permissions: contents: read @@ -12,13 +12,13 @@ jobs: issues: write pull-requests: write steps: - - name: Harden Runner - uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0 - with: - egress-policy: audit + - name: Harden Runner + uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0 + with: + egress-policy: audit - - uses: actions/first-interaction@3c71ce730280171fd1cfb57c00c774f8998586f7 # v1 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - issue-message: "Hi! Thanks for pointing out an issue/suggestion for the first time to Logicytics 🤗 We hope it goes as smoothly as possible." - pr-message: "Hi! Thanks for contributing for the first time to Logicytics 🤗 We hope it goes as smoothly as possible and appreciate your valuable contribution." + - uses: actions/first-interaction@3c71ce730280171fd1cfb57c00c774f8998586f7 # v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + issue-message: "Hi! Thanks for pointing out an issue/suggestion for the first time to Logicytics 🤗 We hope it goes as smoothly as possible." + pr-message: "Hi! Thanks for contributing for the first time to Logicytics 🤗 We hope it goes as smoothly as possible and appreciate your valuable contribution." diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 1d56287e..0ead14fe 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -7,7 +7,7 @@ name: Mark stale issues and pull requests on: schedule: - - cron: '00 00 * * *' + - cron: '00 00 * * *' permissions: contents: read @@ -21,15 +21,15 @@ jobs: pull-requests: write steps: - - name: Harden Runner - uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0 - with: - egress-policy: audit + - name: Harden Runner + uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0 + with: + egress-policy: audit - - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - stale-issue-message: 'Stale issue message' - stale-pr-message: 'Stale pull request message' - stale-issue-label: 'progress/Stale' - stale-pr-label: 'progress/Stale' + - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + stale-issue-message: 'Stale issue message' + stale-pr-message: 'Stale pull request message' + stale-issue-label: 'progress/Stale' + stale-pr-label: 'progress/Stale' diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b09832e4..5c46e656 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,14 +1,14 @@ repos: -- repo: https://github.com/gitleaks/gitleaks - rev: v8.16.3 - hooks: - - id: gitleaks -- repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 - hooks: - - id: end-of-file-fixer - - id: trailing-whitespace -- repo: https://github.com/pylint-dev/pylint - rev: v2.17.2 - hooks: - - id: pylint + - repo: https://github.com/gitleaks/gitleaks + rev: v8.16.3 + hooks: + - id: gitleaks + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: end-of-file-fixer + - id: trailing-whitespace + - repo: https://github.com/pylint-dev/pylint + rev: v2.17.2 + hooks: + - id: pylint diff --git a/CODE/Logicytics.py b/CODE/Logicytics.py index 2ef89a10..80c59791 100644 --- a/CODE/Logicytics.py +++ b/CODE/Logicytics.py @@ -242,9 +242,9 @@ def __performance(self): try: with open( - f"../ACCESS/LOGS/PERFORMANCE/Performance_Summary_" - f"{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.txt", - "w", + f"../ACCESS/LOGS/PERFORMANCE/Performance_Summary_" + f"{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.txt", + "w", ) as f: f.write(table.get_string()) f.write("\nNote: This test only measures execution time.\n") @@ -270,8 +270,8 @@ def update() -> tuple[str, str]: # Check if git command is available try: if ( - subprocess.run(["git", "--version"], capture_output=True).returncode - != 0 + subprocess.run(["git", "--version"], capture_output=True).returncode + != 0 ): return "Git is not installed or not available in the PATH.", "error" except FileNotFoundError: diff --git a/CODE/_debug.py b/CODE/_debug.py index f3653693..87cc8a14 100644 --- a/CODE/_debug.py +++ b/CODE/_debug.py @@ -96,7 +96,7 @@ def check_required_files(directory: str, required_files: list[str]): @staticmethod def compare_files( - actual_files: list[str], required_files: list[str] + actual_files: list[str], required_files: list[str] ) -> tuple[list[str], list[str]]: """ Compares actual and required files, returning missing and extra files. diff --git a/CODE/_dev.py b/CODE/_dev.py index 2f32cfd7..b9863125 100644 --- a/CODE/_dev.py +++ b/CODE/_dev.py @@ -58,7 +58,7 @@ def _update_ini_file(filename: str, new_data: list | str, key: str) -> None: def _prompt_user( - question: str, file_to_open: str = None, special: bool = False + question: str, file_to_open: str = None, special: bool = False ) -> bool: """ Prompts the user with a yes/no question and optionally opens a file. diff --git a/CODE/browser_miner.ps1 b/CODE/browser_miner.ps1 index b689cbcc..f0be76b0 100644 --- a/CODE/browser_miner.ps1 +++ b/CODE/browser_miner.ps1 @@ -1,28 +1,28 @@ # Define the list of source paths with placeholders $sourcePaths = @( - "C:\Users\{}\AppData\Local\Microsoft\Edge\User Data\Default\Network", - "C:\Users\{}\AppData\Local\Google\Chrome\User Data\Default\Network", - "C:\Users\{}\AppData\Roaming\Mozilla\Firefox\Profiles", - "C:\Users\{}\AppData\Roaming\Opera Software\Opera Stable\Network", - "C:\Users\{}\AppData\Roaming\Opera Software\Opera GX Stable\Network", - 'C:\\Windows\\System32\\config', - 'C:\\Windows\\System32\\GroupPolicy', - 'C:\\Windows\\System32\\GroupPolicyUsers', - 'C:\\Windows\\System32\\winevt\\Logs' + "C:\Users\{}\AppData\Local\Microsoft\Edge\User Data\Default\Network", + "C:\Users\{}\AppData\Local\Google\Chrome\User Data\Default\Network", + "C:\Users\{}\AppData\Roaming\Mozilla\Firefox\Profiles", + "C:\Users\{}\AppData\Roaming\Opera Software\Opera Stable\Network", + "C:\Users\{}\AppData\Roaming\Opera Software\Opera GX Stable\Network", + 'C:\\Windows\\System32\\config', + 'C:\\Windows\\System32\\GroupPolicy', + 'C:\\Windows\\System32\\GroupPolicyUsers', + 'C:\\Windows\\System32\\winevt\\Logs' ) # Define the list of identifiers for renaming $identifiers = @( - "Edge", - "Chrome", - "Firefox", - "OperaStable", - "OperaGXStable", - "SAM", - "SystemConfig", - "GroupPolicy", - "GroupPolicyUsers", - "WindowsEventLogs" + "Edge", + "Chrome", + "Firefox", + "OperaStable", + "OperaGXStable", + "SAM", + "SystemConfig", + "GroupPolicy", + "GroupPolicyUsers", + "WindowsEventLogs" ) # Get the current user's name @@ -34,56 +34,56 @@ $baseDirectory = "Browser_Data" # Function to check if a path exists and is accessible function Test-PathAndAccess($path) { -return Test-Path $path -PathType Container -ErrorAction SilentlyContinue + return Test-Path $path -PathType Container -ErrorAction SilentlyContinue } # Loop through each source path foreach ($sourcePath in $sourcePaths) { -# Replace the placeholder with the current user's name -$fullSourcePath = $sourcePath -replace '\{\}', $currentUser + # Replace the placeholder with the current user's name + $fullSourcePath = $sourcePath -replace '\{\}', $currentUser -# Enhanced error checking for source path existence and accessibility -if (-not (Test-PathAndAccess $fullSourcePath)) -{ - Write-Host "WARNING: Source path $fullSourcePath does not exist or cannot be accessed." -continue -} + # Enhanced error checking for source path existence and accessibility + if (-not (Test-PathAndAccess $fullSourcePath)) + { + Write-Host "WARNING: Source path $fullSourcePath does not exist or cannot be accessed." + continue + } -# Extract the identifier from the source path using the corresponding Index from the $identifiers array -try + # Extract the identifier from the source path using the corresponding Index from the $identifiers array + try { -$index = [Array]::IndexOf($identifiers, $sourcePath.Split('\')[-1].Split('\\')[-1]) -$identifier = $identifiers[$index] -} -catch -{ - Write-Host "ERROR: Failed to extract identifier from source path $fullSourcePath." -continue -} + $index = [Array]::IndexOf($identifiers, $sourcePath.Split('\')[-1].Split('\\')[-1]) + $identifier = $identifiers[$index] + } + catch + { + Write-Host "ERROR: Failed to extract identifier from source path $fullSourcePath." + continue + } -# Define the destination path -$destinationPath = Join-Path -Path $baseDirectory -ChildPath "USER_$identifier" + # Define the destination path + $destinationPath = Join-Path -Path $baseDirectory -ChildPath "USER_$identifier" -# Enhanced error checking for destination directory existence -if (-not (Test-PathAndAccess $destinationPath)) -{ -New-Item -ItemType Directory -Path $destinationPath -Force | Out-Null -} + # Enhanced error checking for destination directory existence + if (-not (Test-PathAndAccess $destinationPath)) + { + New-Item -ItemType Directory -Path $destinationPath -Force | Out-Null + } -# Attempt to copy the folder to the DATA directory and rename it -try + # Attempt to copy the folder to the DATA directory and rename it + try { Copy-Item -Path $fullSourcePath -Destination $destinationPath -Recurse -Force -ErrorAction SilentlyContinue -# Print the success message to the console -Write-Host "INFO: Successfully copied $fullSourcePath to $destinationPath" -} -catch -{ - # Detailed error handling - Write-Host "ERROR: An error occurred while copying $fullSourcePath to $destinationPath : $_" -exit -} + # Print the success message to the console + Write-Host "INFO: Successfully copied $fullSourcePath to $destinationPath" + } + catch + { + # Detailed error handling + Write-Host "ERROR: An error occurred while copying $fullSourcePath to $destinationPath : $_" + exit + } } diff --git a/CODE/logicytics/FileManagement.py b/CODE/logicytics/FileManagement.py index b82b7f26..a2633feb 100644 --- a/CODE/logicytics/FileManagement.py +++ b/CODE/logicytics/FileManagement.py @@ -103,7 +103,7 @@ def __get_files_to_zip(path: str) -> list: f for f in os.listdir(path) if not f.endswith(excluded_extensions) - and not f.startswith(excluded_prefixes) + and not f.startswith(excluded_prefixes) ] @staticmethod diff --git a/CODE/logicytics/Flag.py b/CODE/logicytics/Flag.py index 3cc50783..b93d98b4 100644 --- a/CODE/logicytics/Flag.py +++ b/CODE/logicytics/Flag.py @@ -266,7 +266,7 @@ def load_history() -> dict: """ try: with gzip.open( - HISTORY_FILE, "rt", encoding="utf-8" + HISTORY_FILE, "rt", encoding="utf-8" ) as f: # Use 'rt' mode for text read return json.load(f) except FileNotFoundError: @@ -291,7 +291,7 @@ def save_history(history_data: dict): """ if SAVE_PREFERENCES: with gzip.open( - HISTORY_FILE, "wt", encoding="utf-8" + HISTORY_FILE, "wt", encoding="utf-8" ) as f: # Use 'wt' mode for text write json.dump(history_data, f, indent=4) @@ -351,7 +351,7 @@ def update_history(cls, user_input: str, matched_flag: str, accuracy: float): @classmethod def flag( - cls, user_input: str, flags: list[str], flag_description: list[str] + cls, user_input: str, flags: list[str], flag_description: list[str] ) -> tuple[str, float]: """ Matches user input to flag descriptions using advanced semantic similarity. @@ -438,7 +438,7 @@ def __colorify(cls, text: str, color: str) -> str: @classmethod def __available_arguments( - cls, + cls, ) -> tuple[argparse.Namespace, argparse.ArgumentParser]: """ Defines and parses command-line arguments for the Logicytics application. @@ -455,8 +455,8 @@ def __available_arguments( # Define the argument parser parser = argparse.ArgumentParser( description="Logicytics, The most powerful tool for system data analysis. " - "This tool provides a comprehensive suite of features for analyzing system data, " - "including various modes for different levels of detail and customization.", + "This tool provides a comprehensive suite of features for analyzing system data, " + "including various modes for different levels of detail and customization.", allow_abbrev=False, ) @@ -465,37 +465,37 @@ def __available_arguments( "--default", action="store_true", help="Runs Logicytics with its default settings and scripts. " - f"{cls.__colorify('- Recommended for most users -', 'b')}", + f"{cls.__colorify('- Recommended for most users -', 'b')}", ) parser.add_argument( "--threaded", action="store_true", help="Runs Logicytics using threads, where it runs in parallel, default settings though" - f"{cls.__colorify('- Recommended for some users -', 'b')}", + f"{cls.__colorify('- Recommended for some users -', 'b')}", ) parser.add_argument( "--modded", action="store_true", help="Runs the normal Logicytics, as well as any File in the MODS directory, " - "Used for custom scripts as well as default ones.", + "Used for custom scripts as well as default ones.", ) parser.add_argument( "--depth", action="store_true", help="This flag will run all default script's in threading mode, " - "as well as any clunky and huge code, which produces a lot of data " - f"{cls.__colorify('- Will take a long time -', 'y')}", + "as well as any clunky and huge code, which produces a lot of data " + f"{cls.__colorify('- Will take a long time -', 'y')}", ) parser.add_argument( "--nopy", action="store_true", help="Run Logicytics using all non-python scripts, " - f"These may be {cls.__colorify('outdated', 'y')} " - "and not the best, use only if the device doesnt have python installed.", + f"These may be {cls.__colorify('outdated', 'y')} " + "and not the best, use only if the device doesnt have python installed.", ) # TODO v3.6.0 -> Out of beta @@ -503,9 +503,9 @@ def __available_arguments( "--vulnscan-ai", action="store_true", help="Run's Logicytics new Sensitive data Detection AI, its a new feature that will " - "detect any files that are out of the ordinary, and logs their path. Runs threaded." - f"{cls.__colorify('- Beta Mode -', 'y')} " - f"{cls.__colorify('- Will take a long time -', 'y')}", + "detect any files that are out of the ordinary, and logs their path. Runs threaded." + f"{cls.__colorify('- Beta Mode -', 'y')} " + f"{cls.__colorify('- Will take a long time -', 'y')}", ) parser.add_argument( @@ -518,7 +518,7 @@ def __available_arguments( "--performance-check", action="store_true", help="Run's Logicytics default while testing its performance and time, " - "this then shows a table with the file names and time to executed. ", + "this then shows a table with the file names and time to executed. ", ) parser.add_argument( @@ -532,24 +532,24 @@ def __available_arguments( "--debug", action="store_true", help="Runs the Debugger, Will check for any issues, " - "warning etc, useful for debugging and issue reporting " - f"{cls.__colorify('- Use to get a special log file to report the bug -', 'b')}.", + "warning etc, useful for debugging and issue reporting " + f"{cls.__colorify('- Use to get a special log file to report the bug -', 'b')}.", ) parser.add_argument( "--update", action="store_true", help="Update Logicytics from GitHub, only if you have git properly installed " - "and the project was downloaded via git " - f"{cls.__colorify('- Use on your own device only -', 'y')}.", + "and the project was downloaded via git " + f"{cls.__colorify('- Use on your own device only -', 'y')}.", ) parser.add_argument( "--dev", action="store_true", help="Run Logicytics developer mod, this is only for people who want to " - "register their contributions properly. " - f"{cls.__colorify('- Use on your own device only -', 'y')}.", + "register their contributions properly. " + f"{cls.__colorify('- Use on your own device only -', 'y')}.", ) # Define After-Execution Flags diff --git a/CODE/logicytics/__init__.py b/CODE/logicytics/__init__.py index fb07ac86..e9637974 100644 --- a/CODE/logicytics/__init__.py +++ b/CODE/logicytics/__init__.py @@ -21,7 +21,7 @@ FileManagement() ) # Initialize the FileManagement class for file operations __show_trace = ( - DEBUG == "DEBUG" + DEBUG == "DEBUG" ) # Determine if stack traces should be shown based on the debug level @@ -45,7 +45,7 @@ def __init__(self, message="Failed to load object", object_name=None): # Decorator for marking functions as deprecated [custom] def deprecated( - removal_version: str, reason: str, show_trace: bool = __show_trace + removal_version: str, reason: str, show_trace: bool = __show_trace ) -> callable: """ Decorator function that marks a function as deprecated diff --git a/CODE/property_scraper.ps1 b/CODE/property_scraper.ps1 index ce8ab6fd..93ef3524 100644 --- a/CODE/property_scraper.ps1 +++ b/CODE/property_scraper.ps1 @@ -14,8 +14,8 @@ $rootDrive = $env:SystemDrive # Prepare the data to be written to the File $data = @" Property(C): Windows Build = $buildNumber -Property(C): Physical Memory = $($physicalMemory -as [int]) -Property(C): Virtual Memory = $($virtualMemory -as [int]) +Property(C): Physical Memory = $( $physicalMemory -as [int] ) +Property(C): Virtual Memory = $( $virtualMemory -as [int] ) Property(C): Log on User = $userName Property(C): User SID = $userSid Property(C): User Language ID = $userLanguageId diff --git a/CODE/sensitive_data_miner.py b/CODE/sensitive_data_miner.py index e884a6f4..89b92324 100644 --- a/CODE/sensitive_data_miner.py +++ b/CODE/sensitive_data_miner.py @@ -1,6 +1,7 @@ import os import shutil from concurrent.futures import ThreadPoolExecutor + from pathlib import Path from logicytics import log diff --git a/CODE/sys_internal.py b/CODE/sys_internal.py index 81ac6685..da71bea3 100644 --- a/CODE/sys_internal.py +++ b/CODE/sys_internal.py @@ -49,9 +49,9 @@ def sys_internal(): # Optionally, handle errors if any if ( - result.stderr.decode() != "" - and result.returncode != 0 - and result.stderr.decode() is not None + result.stderr.decode() != "" + and result.returncode != 0 + and result.stderr.decode() is not None ): log.warning(f"{executable}: {result.stderr.decode()}") outfile.write(f"{executable}:\n{result.stderr.decode()}") diff --git a/CODE/vulnscan.py b/CODE/vulnscan.py index 4459cdb6..72c6c0ea 100644 --- a/CODE/vulnscan.py +++ b/CODE/vulnscan.py @@ -8,7 +8,6 @@ import aiofiles import joblib import numpy as np - # noinspection PyPackageRequirements import torch from pathlib import Path @@ -125,7 +124,7 @@ def _is_sensitive(self, content: str) -> tuple[bool, float, str]: async def scan_file_async(self, file_path: str) -> tuple[bool, float, str]: try: async with aiofiles.open( - file_path, "r", encoding="utf-8", errors="ignore" + file_path, "r", encoding="utf-8", errors="ignore" ) as f: content = await f.read() return self._is_sensitive(content) @@ -193,10 +192,10 @@ async def scan_worker(scan_file): tasks = [scan_worker(f) for f in valid_files] with tqdm( - total=len(valid_files), - desc="\033[32mSCAN\033[0m \033[94mScanning Files\033[0m", - unit="file", - bar_format="{l_bar} {bar} {n_fmt}/{total_fmt}\n", + total=len(valid_files), + desc="\033[32mSCAN\033[0m \033[94mScanning Files\033[0m", + unit="file", + bar_format="{l_bar} {bar} {n_fmt}/{total_fmt}\n", ) as pbar: for f in asyncio.as_completed(tasks): await f diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 267635fc..f9be04e7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -42,7 +42,8 @@ If you don't get a response immediately, it doesn't mean that we are ignoring you or that we don't care about your issue or PR. We will get back to you as soon as we can. -If you decide to pull a PR or fork the project, keep in mind that you should only add/edit the scripts you need to, leave core files alone. +If you decide to pull a PR or fork the project, keep in mind that you should only add/edit the scripts you need to, +leave core files alone. ## Guidelines for Modifications 📃 @@ -118,6 +119,7 @@ You also agree to the [Developer Certificate of Origin](DCO.md). - **Issues**: Use GitHub issues for bug reports and feature requests. Keep the discussion focused and relevant. - **Pull Requests**: Use pull requests to propose changes. Be prepared to discuss your changes and address any feedback. -If you have any questions or need further clarification, please feel free to [contact](mailto:Nirt_12023@outlook.com) me. +If you have any questions or need further clarification, please feel free to [contact](mailto:Nirt_12023@outlook.com) +me. Thank you for your contributions! diff --git a/CREDITS.md b/CREDITS.md index 35dc7568..c66fcfec 100644 --- a/CREDITS.md +++ b/CREDITS.md @@ -16,11 +16,13 @@ Until Now, no one. Please think of sparing a dollar ❤️ ## 👨‍💻 Coders Credits 👨‍💻 ### Wifi-Stealer.py, bluetooth_details.py and bluetooth_logger.py by ski-sketch + Created Wi-Fi Password Stealer using python The sole creator of the code of wifi-stealer Also created bluetooth_details.py and bluetooth_logger.py which are used to get the details of the bluetooth devices and log the details of the bluetooth devices respectively + - [ski-sketch](https://github.com/ski-sketch) ## 🛠️ Refactorers Credits 🛠️ @@ -34,10 +36,11 @@ Until Now, no one. Become a contributor and help us spread the word. ## 🐛 Bug bounty credits 🐛 ### Found development bug + Found and attempted fix of 2 bugs: Zipping name error - `--dev` flag loop + - [ski-sketch](https://github.com/ski-sketch) - # Acknowledgments This project would not be possible without the contributions and inspirations from the above-mentioned individuals and diff --git a/MODS/_MOD_SKELETON.py b/MODS/_MOD_SKELETON.py index 28f1e44b..c77a6ecd 100644 --- a/MODS/_MOD_SKELETON.py +++ b/MODS/_MOD_SKELETON.py @@ -6,6 +6,7 @@ # To know more check the WiKi from logicytics import log # And more if needed + # Your actual code, must be able to run without any interference by outside actions # USE log.debug, log.info, log.error, log.warning and log.critical and log.string as well # You can choose to use any other of the code without issues diff --git a/PLANS.md b/PLANS.md index 2a9bd992..099bae94 100644 --- a/PLANS.md +++ b/PLANS.md @@ -7,9 +7,9 @@ > - ✅ ➡️ Will be done, 100% sure | Task | Version | Might or Will be done? | -| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | ---------------------- | -| Remake VulnScan .pkl and .pth to be more accurate | v3.6.0 | ❌ | -| Encrypted Volume Detection and Analysis, Advanced USB Device History Tracker | v3.6.0 | ❌ | -| Merge `sensitive data miner` with `vulnscan` to be 1 tool | v4.0.0 | ✅ | -| Remake Logicytics End-Execution cycle, where files created must go in `temp/` directory, and zipper takes it from there only, simplifying any code logic with this as well | v4.0.0 | ✅ | -| Replace Logger.py with Util that contains (tprint), also implement the ExceptionHandler and UpdateManager from Util | v4.0.0 | ✅ | +|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|------------------------| +| Remake VulnScan .pkl and .pth to be more accurate | v3.6.0 | ❌ | +| Encrypted Volume Detection and Analysis, Advanced USB Device History Tracker | v3.6.0 | ❌ | +| Merge `sensitive data miner` with `vulnscan` to be 1 tool | v4.0.0 | ✅ | +| Remake Logicytics End-Execution cycle, where files created must go in `temp/` directory, and zipper takes it from there only, simplifying any code logic with this as well | v4.0.0 | ✅ | +| Replace Logger.py with Util that contains (tprint), also implement the ExceptionHandler and UpdateManager from Util | v4.0.0 | ✅ | diff --git a/README.md b/README.md index f0345e5f..6d27f0c9 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Logicytics: System Data Harvester 📎 -Logicytics is a cutting-edge tool designed to +Logicytics is a cutting-edge tool designed to meticulously harvest and collect a vast array of Windows system data for forensic analysis. Crafted with Python 🐍, it's an actively developed project that is dedicated to gathering as much sensitive data as possible and packaging it neatly into a ZIP file 📦. @@ -22,17 +22,21 @@ This comprehensive guide is here to equip you with everything you need to use Lo > [!CAUTION] -> By using this software, you agree to the license, and agree that you hold responsibility of how you use and modify the code. +> By using this software, you agree to the license, and agree that you hold responsibility of how you use and modify the +> code. ## 🛠️ Installation and Setup 🛠️ To install and setup Logicytics, follow these steps: -1. **Install Python**: If you don't have Python installed, you can download it from the [official website](https://www.python.org/downloads/). +1. **Install Python**: If you don't have Python installed, you can download it from + the [official website](https://www.python.org/downloads/). -2. **Install Dependencies**: Logicytics requires Python modules. You can install all the required modules by running the following command in your terminal: `pip install -r requirements.txt` +2. **Install Dependencies**: Logicytics requires Python modules. You can install all the required modules by running the + following command in your terminal: `pip install -r requirements.txt` -3. **Run Logicytics**: To run Logicytics, simply run the following command in your terminal: `python Logicytics.py -h` - This opens a help menu. +3. **Run Logicytics**: To run Logicytics, simply run the following command in your terminal: `python Logicytics.py -h` - + This opens a help menu. > [!IMPORTANT] > We recommend Python Version `3.11` or higher, as the project is developed and tested on this version. @@ -46,61 +50,76 @@ To install and setup Logicytics, follow these steps: ### Prerequisites -- **Python**: The project requires Python 3.8 or higher. You can download Python from the [official website](https://www.python.org/downloads/). +- **Python**: The project requires Python 3.8 or higher. You can download Python from + the [official website](https://www.python.org/downloads/). -- **Dependencies**: The project requires certain Python modules to be installed. You can install all the required modules by running the following command in your terminal: `pip install -r requirements.txt`. +- **Dependencies**: The project requires certain Python modules to be installed. You can install all the required + modules by running the following command in your terminal: `pip install -r requirements.txt`. -- **Administrative Privileges**: To be able to run the program using certain features of the project, like registry modification, you must run the program with administrative privileges. +- **Administrative Privileges**: To be able to run the program using certain features of the project, like registry + modification, you must run the program with administrative privileges. -- **System Requirements**: The project has been tested on Windows 10 and 11. It will not work on other operating systems. +- **System Requirements**: The project has been tested on Windows 10 and 11. It will not work on other operating + systems. -- **Knowledge of Command Line**: The project uses command line options for the user to interact with the program. It is recommended to have a basic understanding of command line options. +- **Knowledge of Command Line**: The project uses command line options for the user to interact with the program. It is + recommended to have a basic understanding of command line options. > [!IMPORTANT] -> You may create a `.sys.ignore` file in the `CODE/SysInternal_Suite` directory to not extract the exe binaries from the ZIP file (This is done for the OpenSSF score and to discourage binaries being used without source code), if the `.sys.ignore` file is not found, it will auto extract the binaries and run them using `Logicytics`. +> You may create a `.sys.ignore` file in the `CODE/SysInternal_Suite` directory to not extract the exe binaries from the +> ZIP file (This is done for the OpenSSF score and to discourage binaries being used without source code), if the +`.sys.ignore` file is not found, it will auto extract the binaries and run them using `Logicytics`. > -> For more details on these binaries, go [here](https://learn.microsoft.com/en-us/sysinternals/downloads/sysinternals-suite) - For you weary cautious internet crusaders, you can view the [source code here](https://github.com/MicrosoftDocs/sysinternals) and compare hashes and perform your audits. +> For more details on these binaries, +> go [here](https://learn.microsoft.com/en-us/sysinternals/downloads/sysinternals-suite) - For you weary cautious +> internet +> crusaders, you can view the [source code here](https://github.com/MicrosoftDocs/sysinternals) and compare hashes and +> perform your audits. ## Step-by-Step Installation and Usage 1) Install Python 🐍 -If you don't have Python installed, you can download it from the official website. -Make sure to select the option to "Add Python to PATH" during installation. + If you don't have Python installed, you can download it from the official + website. + Make sure to select the option to "Add Python to PATH" during installation. 2) Install Dependencies 📦 -Logicytics requires Python modules. You can install all the required modules by running the following command in your terminal: -`pip install -r requirements.txt` + Logicytics requires Python modules. You can install all the required modules by running the following command in your + terminal: + `pip install -r requirements.txt` 3) Run Logicytics 🚀 -To run Logicytics, simply run the following command in your terminal: -python Logicytics.py -h -This opens a help menu. + To run Logicytics, simply run the following command in your terminal: + python Logicytics.py -h + This opens a help menu. 4) Run the Program 👾 -Once you have run the program, you can run the program with the following command: -`python Logicytics.py -h` -Replace the flags with the ones you want to use. -you must have admin privileges while running! + Once you have run the program, you can run the program with the following command: + `python Logicytics.py -h` + Replace the flags with the ones you want to use. + you must have admin privileges while running! > [!TIP] -> Although it's really recommended to use admin, by setting debug in the config.json to true, you can bypass this requirement +> Although it's really recommended to use admin, by setting debug in the config.json to true, you can bypass this +> requirement 5) Wait for magic to happen 🧙‍♀️ -Logicytics will now run and gather data according to the flags you used. + Logicytics will now run and gather data according to the flags you used. 6) Enjoy the gathered data 🎉 -Once the program has finished running, you can find the gathered data in the "ACCESS/DATA" folder. Both Zip and Hash will be found there. + Once the program has finished running, you can find the gathered data in the "ACCESS/DATA" folder. Both Zip and Hash + will be found there. > [!NOTE] > All Zips and Hashes follow a conventional naming mechanism that goes as follows > `Logicytics_{CODE-or-MODS}_{Flag-Used}_{Date-And-Time}.zip` 7) Share the love ❤️ -If you like Logicytics, please consider sharing it with others or spreading the word about it. + If you like Logicytics, please consider sharing it with others or spreading the word about it. 8) Contribute to the project 👥 -If you have an idea or want to contribute to the project, you can submit an issue or PR on the GitHub repository. - + If you have an idea or want to contribute to the project, you can submit an issue or PR on + the GitHub repository. ### Basic Usage @@ -108,7 +127,8 @@ After running and successfully collecting data, you may traverse the ACCESS dire Remove add and delete files, it's the safe directory where your backups, hashes, data zips and logs are found. > [!TIP] -> Watch this [video](https://www.youtube.com/watch?v=XVTBmdTQqOs) to see a real life demo of Logicytics (Although the tools and interface may be changed as it's an older version `2.1.1` - `2.3.3`) +> Watch this [video](https://www.youtube.com/watch?v=XVTBmdTQqOs) to see a real life demo of Logicytics (Although the +> tools and interface may be changed as it's an older version `2.1.1` - `2.3.3`) ## 🔧 Configuration 🔧 @@ -120,7 +140,7 @@ The config.ini file is used to store the DEBUG flag bool, the VERSION, and the C It is also used to store and save settings for other programs. > [!TIP] -> CURRENT_FILES is an array of strings that contains the names of the files you have, +> CURRENT_FILES is an array of strings that contains the names of the files you have, > this is used to later check for corruption or bugs. > VERSION is the version of the project, used to check and pull for updates. @@ -130,23 +150,23 @@ Mods are special files that are run with the `--modded` flag. These files are essentially scripts that are run after the main Logicytics.py script is run and the verified scripts are run. -They are used to add extra functionality to the script. -They are located in the `MODS` directory. In order to make a mod, +They are used to add extra functionality to the script. +They are located in the `MODS` directory. In order to make a mod, you need to create a python file with the `.py` extension or any of the supported extensions `.exe .ps1 .bat` -in the `MODS` directory. +in the `MODS` directory. -These file will be run after the main script is run. -When making a mod, you should avoid acting based on other files directly, -as this can cause conflicts with the data harvesting. -Instead, you should use the `Logicytics.py` file and other scripts as a reference +These file will be run after the main script is run. +When making a mod, you should avoid acting based on other files directly, +as this can cause conflicts with the data harvesting. +Instead, you should use the `Logicytics.py` file and other scripts as a reference for how to add features to the script. -The `--modded` flag is used to run all files in the `MODS` directory. +The `--modded` flag is used to run all files in the `MODS` directory. This flag is not needed for other files in the `CODE` directory to run, -but it is needed for mods to run. +but it is needed for mods to run. The `--modded` flag can also be used to run custom scripts. -If you want to run a custom script with the `--modded` flag, +If you want to run a custom script with the `--modded` flag, you can add the script to the `MODS` directory, and it will be run with the `--modded` flag. To check all the mods and how to make your own, you can check the `Logicytics.py` file and the Wiki. @@ -156,20 +176,22 @@ Also refer to the contributing.md for more info If you are having issues, here are some troubleshooting tips: -Some errors may not necessarily mean the script is at fault, +Some errors may not necessarily mean the script is at fault, but other OS related faults like files not existing, or files not being modified, or files not being created. Some tips are: + - Check if the script is running as admin and not in a VM - Check if the script has the correct permissions and correct dependencies to run - Check if the script is not being blocked by a firewall or antivirus or by a VPN or proxy - Check if the script is not being blocked by any other software or service If those don't work attempt: + - Try running the script with powershell instead of cmd, or vice versa - Try running the script in a different directory, computer or python version above 3.8 - - Note: The version used to develop, test and run the script is 3.11 + - Note: The version used to develop, test and run the script is 3.11 - Try running the `--debug` flag and check the logs ### Support Resources @@ -215,7 +237,7 @@ Here are some of the data points that Logicytics extracts: | bluetooth_logger.py | Collect, log, and analyze Bluetooth-related data, by accessing the Windows registry and Event Viewer. | | | network_psutil.py | The `network_psutil.py` file collects and logs various network-related information. | | -This is not an exhaustive list, +This is not an exhaustive list, but it should give you a good idea of what data Logicytics is capable of extracting. > [!NOTE] @@ -239,13 +261,14 @@ Tips and tricks of the given modules/APIs can be found [here](https://github.com/DefinetlyNotAI/Logicytics/wiki/6-Code-tips-and-tricks) too! > [!IMPORTANT] -> Always adhere to the [coding standards](https://github.com/DefinetlyNotAI/Logicytics/wiki/7-Advanced-Coding-Standards) of Logicytics! +> Always adhere to the [coding standards](https://github.com/DefinetlyNotAI/Logicytics/wiki/7-Advanced-Coding-Standards) +> of Logicytics! ## 🌟 Conclusion 🌟 Logicytics is a powerful tool that can extract a wide variety of data from a Windows system. With its ability to extract data from various sources, Logicytics can be used for a variety of purposes, -from forensics to system information gathering. +from forensics to system information gathering. Its ability to extract data from various sources makes it a valuable tool for any Windows system administrator or forensic investigator. diff --git a/SECURITY.md b/SECURITY.md index b9fb057b..596a1fa9 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -26,7 +26,6 @@ This section outlines the versions of our project that are currently supported w | 1.1.x | ❌ | May 10, 2024 | | 1.0.x | ❌ | May 4, 2024 | - ### Key: | Key | Desc |