diff --git a/.github/auto-assign-config.yml b/.github/auto-assign-config.yml
deleted file mode 100644
index b3d46a4..0000000
--- a/.github/auto-assign-config.yml
+++ /dev/null
@@ -1,26 +0,0 @@
-# Set to true to add reviewers to pull requests
-addReviewers: true
-
-# Set to true to add assignees to pull requests
-addAssignees: author
-
-# A list of reviewers to be added to pull requests (GitHub user name)
-reviewers:
- - iamwatchdogs
-
-# A number of reviewers added to the pull request
-# Set 0 to add all the reviewers (default: 0)
-numberOfReviewers: 1
-
-# A list of assignees, overrides reviewers if set
-# assignees:
-# - assigneeA
-
-# A number of assignees to add to the pull request
-# Set to 0 to add all of the assignees.
-# Uses numberOfReviewers if unset.
-# numberOfAssignees: 2
-
-# A list of keywords to be skipped the process that add reviewers if pull requests include it
-# skipKeywords:
-# - wip
\ No newline at end of file
diff --git a/.github/scripts/convert_to_html_tables.py b/.github/scripts/convert_to_html_tables.py
index 80a0b1a..e1976c0 100644
--- a/.github/scripts/convert_to_html_tables.py
+++ b/.github/scripts/convert_to_html_tables.py
@@ -1,7 +1,9 @@
#!/usr/bin/env python
import os
+import sys
import json
+from collections import OrderedDict
'''
This script requires following environment variables:
@@ -11,150 +13,268 @@
> GitHub action variable: ${{ github.repository }}
'''
+class UpdateFileContent:
+ """Class that updates `index.md` based on contributors-log."""
-def find_table_points(lines):
- """
- Find table points within a given list of lines.
+ # Setting static variables
+ DATA = None
+ REPO_NAME = None
- The table points are determined by the presence of the markers:
-
-
+ def __init__(self, FILE_PATH, condition=None):
- Args:
- lines (list): List of lines to search in.
+ # Displaying starting Message
+ print(f'\n--- Updating {FILE_PATH} ---\n')
- Returns:
- tuple: A tuple of two integers containing the start and end indices of
- the table points.
+ # Setting Constant values
+ self.FILE_PATH = FILE_PATH
- Raises:
- SystemExit: If the table markers are not found or if the table end
- marker appears before the table start marker.
- """
+ # Retriving data as modifiable lines
+ self.lines = self.get_lines()
- # Setting default return values
- table_start = None
- table_end = None
+ # Updates lines based on the data
+ self.update_table_of_contributors(condition)
+ self.update_table_of_content(condition)
- # Setting the markers
- table_start_marker = ''
- table_end_marker = ''
+ # Updating target file content
+ self.write_lines_into_file()
- # Iterating over lines to find the markers
- for index, line in enumerate(lines):
- if table_start is None and table_start_marker in line:
- table_start = index
- elif table_end is None and table_end_marker in line:
- table_end = index
- if table_start is not None and table_end is not None:
- break
- # Checking for possible errors
- if table_start is None or table_end is None:
- print('Table not found in the file.')
- exit(1)
- elif table_start >= table_end:
- print('Invaild use of table markers.')
- exit(2)
+ def get_lines(self):
+
+ # Reading lines from the file
+ with open(self.FILE_PATH, 'r') as file:
+ lines = file.readlines()
+
+ return lines
+
+ def write_lines_into_file(self):
+
+ # Updating the target file
+ with open(self.FILE_PATH, 'w') as file:
+ file.writelines(self.lines)
+
+ # Printing Success Message
+ print(f"Updated '{self.FILE_PATH}' Successfully")
+
+ def find_table_points(self, search_type):
+
+ # Setting default return values
+ table_starting_point = None
+ table_ending_point = None
+
+ # Setting default markers
+ table_start_marker = None
+ table_end_marker = None
+
+ # Selecting respective markers based on `search-type`
+ if search_type == 'contributors':
+ table_start_marker = ''
+ table_end_marker= ''
+ elif search_type == 'table-of-content':
+ table_start_marker = ''
+ table_end_marker= ''
+ else:
+ print('Invalid Argument', file=sys.stderr)
+ exit(1)
+
+ # Iterating over lines to find the markers
+ for index, line in enumerate(self.lines):
+ if table_starting_point is None and table_start_marker in line:
+ table_starting_point = index
+ elif table_ending_point is None and table_end_marker in line:
+ table_ending_point = index
+ if table_starting_point is not None and table_ending_point is not None:
+ break
+
+ # Checking for possible errors
+ if table_starting_point is None or table_ending_point is None:
+ print('Table not found in the file.', file=sys.stderr)
+ exit(2)
+ elif table_starting_point >= table_ending_point:
+ print('Invaild use of table markers.', file=sys.stderr)
+ exit(3)
+
+ return (table_starting_point, table_ending_point)
+
+ def update_table_of_contributors(self, condition):
+
+ # Calculating stating and ending points of the targeted table
+ table_of_contributors_start, table_of_contributors_end = self.find_table_points('contributors')
+
+ # Creating HTML table header to replace md table
+ table_header = list()
+ table_header.append('
\n')
+ table_header.append('\t\n')
+ table_header.append('\t\t| Contribution Title | \n')
+ if condition is None:
+ table_header.append('\t\tCore Contribution | \n')
+ table_header.append('\t\tContributor Names | \n')
+ table_header.append('\t\tPull Requests | \n')
+ table_header.append('\t\tDemo | \n')
+ table_header.append('\t
\n')
+
+ # Initializing empty list for lines
+ updated_lines = list()
+
+ # checking for entries
+ has_at_least_one_entry = False
+
+
+ # Iterating over log to update target file
+ for title, details in self.DATA.items():
+
+ # Modifying based on condition
+ if condition is not None and not condition(details['core']):
+ continue
+
+ # Processing contributors-names
+ contributors_names = details['contributor-name']
+ contributors_names_list = [f'{name}' for name in contributors_names]
+ contributors_names_output = ', '.join(contributors_names_list)
+
+ # Processing core contribution
+ core_contribution = details['core']
+ if condition is None:
+ core_contribution_output = f'{core_contribution}'
+
+ # Processing pull-requests
+ pull_requests = details['pull-request-number']
+ pull_requests_list = [f'#{pr}' for pr in pull_requests]
+ pull_requests_output = ', '.join(pull_requests_list)
+
+ # Processing demo-path
+ demo_path = details['demo-path']
+ specificity = details['specificity']
+ if ' ' in demo_path:
+ demo_path = '%20'.join(demo_path.split())
+ demo_path_output = f'./{core_contribution}/{specificity}'
+ if title == 'root' or title == '{init}':
+ demo_path_output = f'/{self.REPO_NAME}/'
+
+ # Appending all data together
+ updated_lines.append('\t\n')
+ updated_lines.append(f'\t\t| {title} | \n')
+ if condition is None:
+ updated_lines.append(f'\t\t{core_contribution_output} | \n')
+ updated_lines.append(f'\t\t{contributors_names_output} | \n')
+ updated_lines.append(f'\t\t{pull_requests_output} | \n')
+ updated_lines.append(f'\t\t{demo_path_output} | \n')
+ updated_lines.append(f'\t
\n')
+
+ has_at_least_one_entry = True
+
+ # Adding null values if table is completely empty
+ if not has_at_least_one_entry:
+ updated_lines.append('\t\n')
+ updated_lines.append(f'\t\t| - | \n')
+ if condition is None:
+ updated_lines.append(f'\t\t- | \n')
+ updated_lines.append(f'\t\t- | \n')
+ updated_lines.append(f'\t\t- | \n')
+ updated_lines.append(f'\t\t- | \n')
+ updated_lines.append(f'\t
\n')
+
+ # Table footer
+ table_footer = ['
\n']
+
+ # Updating the lines with updated data
+ self.lines[table_of_contributors_start+1:table_of_contributors_end] = table_header + updated_lines + table_footer
+
+ # Printing Success Message
+ print('Successfully updated the contributor details !!!...')
+
+ def update_table_of_content(self, condition):
+
+ # Calculating stating and ending points of the targeted table
+ table_of_content_start, table_of_content_end = self.find_table_points('table-of-content')
+
+ # Initializing required variables
+ updated_lines = list()
+ table_of_content = { 'Theory': {}, 'Solved-Problems': {}, 'Repo': {} }
+
+ # Extracting data into required format
+ for title, data in self.DATA.items():
+
+ # Setting values for ease of use and more readibility
+ core = data['core']
+ specificity = data['specificity']
+
+ # Sorting out required data
+ if specificity not in table_of_content[core]:
+ table_of_content[core][specificity] = None if specificity == title else [title]
+ elif title != specificity and title not in table_of_content[core][specificity]:
+ if table_of_content[core][specificity] is None:
+ table_of_content[core][specificity] = [title]
+ else:
+ table_of_content[core][specificity].append(title)
+
+ # Sorting extracted data
+ for key, value in table_of_content.items():
+ for sub_value in value.values():
+ if type(sub_value) == list:
+ sub_value.sort()
+ table_of_content[key] = OrderedDict(sorted(value.items()))
+
+ # Updating lines based on the extracted data
+ for core, data in table_of_content.items():
+
+ # Modifying based on condition
+ if condition is not None and not condition(core) or core == 'Repo':
+ continue
+
+ # Setting Main Heading (Only for Root)
+ if condition is None:
+ updated_lines.append(f'- [__{core}__]({core} "goto {core}")\n')
+
+ # Adding all headings
+ for heading, sub_heading_list in data.items():
+ if condition is None:
+ updated_lines.append(f'\t- [{heading}]({core}/{heading} "goto {heading}")\n')
+ else:
+ updated_lines.append(f'- [__{heading}__]({heading} "goto {heading}")\n')
+ if sub_heading_list is not None:
+ for sub_heading in sub_heading_list:
+ if condition is None:
+ updated_lines.append(f'\t\t- [{sub_heading}]({core}/{heading}/{sub_heading} "goto {sub_heading}")\n')
+ else:
+ updated_lines.append(f'\t- [{sub_heading}]({heading}/{sub_heading} "goto {sub_heading}")\n')
+
+ # Updating the lines with updated data
+ self.lines[table_of_content_start+1:table_of_content_end] = updated_lines
+
+ # Printing Success Message
+ print('Successfully updated the table of content !!!...')
- return (table_start, table_end)
def main():
- """
- Update the index.md file with the latest contributors data.
-
- This function retrieves the REPO_NAME environment variable and the
- CONTRIBUTORS_LOG file path. It then reads the log file and extracts the
- data from it. The function then reads the index.md file and calculates
- the table points. If the table does not exist, it creates the table
- header. The function then iterates over the log data and updates the
- table with the latest data. Finally, it updates the index.md file with
- the updated data and prints a success message.
-
- """
-
- # Retrieving Environmental variables
- REPO_NAME = os.environ.get('REPO_NAME')
-
- # Setting path for the log JSON file
- TARGET_FILE = 'index.md'
- CONTRIBUTORS_LOG = '.github/data/contributors-log.json'
-
- # Retrieving data from log file
- with open(CONTRIBUTORS_LOG, 'r') as json_file:
- data = json.load(json_file)
-
- # Reading lines from the file
- with open(TARGET_FILE, 'r') as file:
- lines = file.readlines()
-
- # Calculating Stating and ending points of the targeted table
- table_start, table_end = find_table_points(lines)
-
- # Creating HTML table header to replace md table
- table_header = list()
- table_header.append('\n')
- table_header.append('\t\n')
- table_header.append('\t\t| Project Title | \n')
- table_header.append('\t\tContributor Names | \n')
- table_header.append('\t\tPull Requests | \n')
- table_header.append('\t\tDemo | \n')
- table_header.append('\t
\n')
-
- # Initializing empty list for lines
- updated_lines = list()
-
- # Iterating over log to update target file
- for title, details in data.items():
-
- # Processing contributors-names
- contributors_names = details['contributor-name']
- contributors_names_list = [
- f'{name}' for name in contributors_names]
- contributors_names_output = ', '.join(contributors_names_list)
-
- # Processing pull-requests
- pull_requests = details['pull-request-number']
- pull_requests_list = [
- f'{pr}' for pr in pull_requests]
- pull_requests_output = ', '.join(pull_requests_list)
-
- # Processing demo-path
- demo_path = details['demo-path']
- if ' ' in demo_path:
- demo_path = '%20'.join(demo_path.split())
- demo_path_output = f'/{REPO_NAME}/{title}/'
- if title == 'root' or title == '{init}':
- demo_path_output = f'/{REPO_NAME}/'
- elif title == '{workflows}':
- demo_path_output = f'/{REPO_NAME}/.github/workflows'
- elif title == '{scripts}':
- demo_path_output = f'/{REPO_NAME}/.github/scripts'
- elif title == '{others}':
- demo_path_output = f'/{REPO_NAME}/.github'
-
- # Appending all data together
- updated_lines.append('\t\n')
- updated_lines.append(f'\t\t| {title} | \n')
- updated_lines.append(f'\t\t{contributors_names_output} | \n')
- updated_lines.append(f'\t\t{pull_requests_output} | \n')
- updated_lines.append(f'\t\t{demo_path_output} | \n')
- updated_lines.append(f'\t
\n')
-
- # Table footer
- table_footer = ['
\n']
-
- # Updating the lines with updated data
- lines[table_start+1:table_end] = table_header+updated_lines+table_footer
-
- # Updating the target file
- with open(TARGET_FILE, 'w') as file:
- file.writelines(lines)
-
- # Printing Success Message
- print(f"Updated '{TARGET_FILE}' Successfully")
+
+ # Retrieving Environmental variables
+ REPO_NAME = os.environ.get('REPO_NAME')
+
+ # Setting path for the log JSON file
+ ROOT_INDEX_FILE_PATH = 'index.md'
+ THEORY_INDEX_FILE_PATH = 'Theory/index.md'
+ THEORY_README_FILE_PATH = 'Theory/README.md'
+ SOLVED_PROBLEM_INDEX_FILE_PATH = 'Solved-Problems/index.md'
+ SOLVED_PROBLEM_README_FILE_PATH = 'Solved-Problems/README.md'
+ CONTRIBUTORS_LOG = '.github/data/contributors-log.json'
+
+ # Retrieving data from log file
+ with open(CONTRIBUTORS_LOG, 'r') as json_file:
+ DATA = json.load(json_file)
+
+ # Assigning values to static members for class `UpdateFileContent`
+ UpdateFileContent.DATA = DATA
+ UpdateFileContent.REPO_NAME = REPO_NAME
+
+ # Updating All required files
+ UpdateFileContent(ROOT_INDEX_FILE_PATH)
+ UpdateFileContent(THEORY_INDEX_FILE_PATH, lambda core: core == 'Theory')
+ UpdateFileContent(THEORY_README_FILE_PATH, lambda core: core == 'Theory')
+ UpdateFileContent(SOLVED_PROBLEM_INDEX_FILE_PATH, lambda core: core == 'Solved-Problems')
+ UpdateFileContent(SOLVED_PROBLEM_README_FILE_PATH, lambda core: core == 'Solved-Problems')
if __name__ == '__main__':
- main()
+ main()
diff --git a/.github/scripts/update_contributors_log.py b/.github/scripts/update_contributors_log.py
index 01bba0a..2fb45f8 100644
--- a/.github/scripts/update_contributors_log.py
+++ b/.github/scripts/update_contributors_log.py
@@ -15,100 +15,56 @@
> GitHub action variable: ${{ github.event.pull_request.number }}
'''
-
-def get_project_title(pr_data):
- """
- Determines the project title based on the file paths in the pull request data.
-
- Args:
- pr_data (dict): The pull request data containing file paths.
-
- Returns:
- str: The project title derived from the directory name in the file path.
- Returns 'root' if changes are made in the root of the repository.
- Special cases include '{workflows}', '{scripts}', and '{others}'
- for certain paths within the '.github' directory.
-
- """
-
+def get_contribution_title(CURRENT_PR):
+
# Setting default value
- project_title = 'root'
+ contribution_title = 'root'
+ path = contribution_title
# Iterating through the "files" list
- for i in pr_data["files"]:
- if '/' in i["path"]:
- project_title = i["path"]
+ for files in CURRENT_PR["files"]:
+ if '/' in files["path"]:
+ contribution_title = files["path"]
+ path = contribution_title
break
- # changes are made in the root of repo
- if project_title == 'root':
- return project_title
-
- if '.github/workflows' in project_title:
- project_title = '{workflows}'
- elif '.github/scripts' in project_title:
- project_title = '{scripts}'
- elif '.github' in project_title:
- project_title = '{others}'
- else:
- project_title = project_title.split('/')[0] # directory name
-
- return project_title
-
-
-def get_contributor_name(pr_data):
- """
- Retrieves the username of the contributor who made the pull request.
-
- Args:
- pr_data (dict): The pull request data containing the author's username.
+ # If we find a directory
+ if contribution_title != 'root':
+ splitted_title = contribution_title.split('/')
+ contribution_title = splitted_title[-2] if '.' in contribution_title else splitted_title[-1]
- Returns:
- str: The username of the contributor.
- """
- return pr_data["author"]["login"]
+ return (contribution_title, path)
+def get_contributor_name(CURRENT_PR):
+ return CURRENT_PR["author"]["login"]
-def get_demo_path(pr_data):
- """
- Retrieves the demo path for the pull request.
+def get_core_type(CONTRIBUTION_TITLE, USED_PATH):
+ return USED_PATH.split('/')[0] if CONTRIBUTION_TITLE != 'root' else 'Repo'
- Args:
- pr_data (dict): The pull request data containing information about the pull request.
+def get_specificity(CONTRIBUTION_TITLE, USED_PATH):
+ return USED_PATH.split('/')[1] if CONTRIBUTION_TITLE != 'root' else 'Maintenance'
- Returns:
- str: The demo path of the pull request.
- """
+def get_demo_path(CURRENT_PR, CONTRIBUTION_TITLE, CORE_TYPE, SPECIFICITY):
# Getting required values
REPO_NAME = os.environ.get('REPO_NAME')
- PROJECT_NAME = get_project_title(pr_data)
# Handling a base case
- if PROJECT_NAME == 'root':
+ if CONTRIBUTION_TITLE == 'root':
return f'https://github.com/{REPO_NAME}/'
- url_path = PROJECT_NAME
-
- # Setting custom path for workflow maintance
- SPECIAL_CASES = ['{workflows}', '{scripts}', '{others}']
- if PROJECT_NAME in SPECIAL_CASES:
- url_path = '.github'
- if PROJECT_NAME in SPECIAL_CASES[:2]:
- url_path += f'/{PROJECT_NAME[1:-1]}'
-
# Setting default value
- demo_path = f'https://github.com/{REPO_NAME}/tree/main/{url_path}'
+ demo_path = f'https://github.com/{REPO_NAME}/tree/main/{CORE_TYPE}/{SPECIFICITY}'
found_required_path = False
# Iterating through the "files" list
- for file_data in pr_data["files"]:
- path = file_data["path"]
+ for files in CURRENT_PR["files"]:
+ path = files["path"]
if "index.html" in path:
demo_path = path
found_required_path = True
break
- elif path.lower().endswith('index.md') or path.lower().endswith('readme.md'):
+ elif path.lower().endswith('index.md') or path.lower().endswith('readme.md'):
demo_path = path
found_required_path = True
@@ -122,46 +78,31 @@ def get_demo_path(pr_data):
return demo_path
-
def main():
- """
- Updates the contributors log file after a pull request has been merged.
-
- This function is to be called in a GitHub Actions workflow after a pull request has been merged.
- It reads the details of the current pull request from a JSON file, extracts the required information,
- and updates the contributors log file accordingly.
-
- The contributors log file is a JSON file that contains information about each contributor, including
- their name, the number of the pull request they contributed to, and the path to their project.
-
- The function dumps the data into the log file and outputs a success message upon completion.
-
- Args:
- None
-
- Returns:
- None
- """
-
- # Setting required file paths
- CURRENT_PR_DETAILS_PATH = 'pr.json'
- CONTRIBUTORS_LOG_PATH = '.github/data/contributors-log.json'
+
+ # Setting file paths
+ PR_DETAILS_FILE_PATH = 'pr.json'
+ CONTRIBUTION_LOG_FILE_PATH = '.github/data/contributors-log.json'
# Reading contents from the current pr
- with open(CURRENT_PR_DETAILS_PATH, 'r') as json_file:
- current_pr = json.load(json_file)
-
+ with open(PR_DETAILS_FILE_PATH, 'r') as json_file:
+ CURRENT_PR = json.load(json_file)
+
# Getting required value for update
- PROJECT_TITLE = get_project_title(current_pr)
- CONTRIBUTOR_NAME = get_contributor_name(current_pr)
+ CONTRIBUTION_TITLE, USED_PATH = get_contribution_title(CURRENT_PR)
+ CONTRIBUTOR_NAME = get_contributor_name(CURRENT_PR)
+ CORE_TYPE = get_core_type(CONTRIBUTION_TITLE, USED_PATH)
+ SPECIFICITY = get_specificity(CONTRIBUTION_TITLE, USED_PATH)
PR_NUMBER = os.environ.get('PR_NUMBER')
- DEMO_PATH = get_demo_path(current_pr)
+ DEMO_PATH = get_demo_path(CURRENT_PR, CONTRIBUTION_TITLE, CORE_TYPE, SPECIFICITY)
# Creating a new dict objects for JSON conversion
existing_data = None
new_data = {
- PROJECT_TITLE: {
+ CONTRIBUTION_TITLE: {
"contributor-name": [CONTRIBUTOR_NAME],
+ "core": CORE_TYPE,
+ "specificity": SPECIFICITY,
"pull-request-number": [PR_NUMBER],
"demo-path": DEMO_PATH
}
@@ -169,18 +110,18 @@ def main():
# Processing the data dumps
operation_name = None
- if os.path.exists(CONTRIBUTORS_LOG_PATH):
+ if os.path.exists(CONTRIBUTION_LOG_FILE_PATH):
# Reading existing Log file
- with open(CONTRIBUTORS_LOG_PATH, 'r') as json_file:
+ with open(CONTRIBUTION_LOG_FILE_PATH, 'r') as json_file:
existing_data = json.load(json_file)
# performing updation or addition based on `PROJECT_TITLE`
- if PROJECT_TITLE in existing_data:
- if CONTRIBUTOR_NAME not in existing_data[PROJECT_TITLE]["contributor-name"]:
- existing_data[PROJECT_TITLE]["contributor-name"].append(CONTRIBUTOR_NAME)
- if PR_NUMBER not in existing_data[PROJECT_TITLE]["pull-request-number"]:
- existing_data[PROJECT_TITLE]["pull-request-number"].append(PR_NUMBER)
+ if CONTRIBUTION_TITLE in existing_data:
+ if CONTRIBUTOR_NAME not in existing_data[CONTRIBUTION_TITLE]["contributor-name"]:
+ existing_data[CONTRIBUTION_TITLE]["contributor-name"].append(CONTRIBUTOR_NAME)
+ if PR_NUMBER not in existing_data[CONTRIBUTION_TITLE]["pull-request-number"]:
+ existing_data[CONTRIBUTION_TITLE]["pull-request-number"].append(PR_NUMBER)
operation_name = 'Updated'
else:
existing_data.update(new_data)
@@ -190,12 +131,11 @@ def main():
operation_name = 'Created'
# Dumping the data into log file
- with open(CONTRIBUTORS_LOG_PATH, 'w') as json_file:
+ with open(CONTRIBUTION_LOG_FILE_PATH, 'w') as json_file:
json.dump(existing_data, json_file, indent=2)
# Output message
print(f'Successfully {operation_name} the log file')
-
if __name__ == '__main__':
- main()
+ main()
\ No newline at end of file
diff --git a/.github/scripts/update_index_md.py b/.github/scripts/update_index_md.py
index 7095743..10b2f20 100644
--- a/.github/scripts/update_index_md.py
+++ b/.github/scripts/update_index_md.py
@@ -1,7 +1,9 @@
#!/usr/bin/env python
import os
+import sys
import json
+from collections import OrderedDict
'''
This script requires following environment variables:
@@ -11,141 +13,253 @@
> GitHub action variable: ${{ github.repository }}
'''
+class UpdateFileContent:
+ """Class that updates `index.md` based on contributors-log."""
-def find_table_points(lines):
- """
- Find table points within a given list of lines.
+ # Setting static variables
+ DATA = None
+ REPO_NAME = None
- The table points are determined by the presence of the markers:
-
-
+ def __init__(self, FILE_PATH, condition=None):
- Args:
- lines (list): List of lines to search in.
+ # Displaying starting Message
+ print(f'\n--- Updating {FILE_PATH} ---\n')
- Returns:
- tuple: A tuple of two integers containing the start and end indices of
- the table points.
+ # Setting Constant values
+ self.FILE_PATH = FILE_PATH
- Raises:
- SystemExit: If the table markers are not found or if the table end
- marker appears before the table start marker.
- """
+ # Retriving data as modifiable lines
+ self.lines = self.get_lines()
- # Setting default return values
- table_start = None
- table_end = None
+ # Updates lines based on the data
+ self.update_table_of_contributors(condition)
+ self.update_table_of_content(condition)
- # Setting the markers
- table_start_marker = ''
- table_end_marker = ''
+ # Updating target file content
+ self.write_lines_into_file()
- # Iterating over lines to find the markers
- for index, line in enumerate(lines):
- if table_start is None and table_start_marker in line:
- table_start = index
- elif table_end is None and table_end_marker in line:
- table_end = index
- if table_start is not None and table_end is not None:
- break
- # Checking for possible errors
- if table_start is None or table_end is None:
- print('Table not found in the file.')
- exit(1)
- elif table_start >= table_end:
- print('Invaild use of table markers.')
- exit(2)
+ def get_lines(self):
- return (table_start, table_end)
+ # Reading lines from the file
+ with open(self.FILE_PATH, 'r') as file:
+ lines = file.readlines()
+ return lines
-def main():
- """
- Update the index.md file with the latest contributors data.
+ def write_lines_into_file(self):
+
+ # Updating the target file
+ with open(self.FILE_PATH, 'w') as file:
+ file.writelines(self.lines)
+
+ # Printing Success Message
+ print(f"Updated '{self.FILE_PATH}' Successfully")
+
+ def find_table_points(self, search_type):
+
+ # Setting default return values
+ table_starting_point = None
+ table_ending_point = None
+
+ # Setting default markers
+ table_start_marker = None
+ table_end_marker = None
+
+ # Selecting respective markers based on `search-type`
+ if search_type == 'contributors':
+ table_start_marker = ''
+ table_end_marker= ''
+ elif search_type == 'table-of-content':
+ table_start_marker = ''
+ table_end_marker= ''
+ else:
+ print('Invalid Argument', file=sys.stderr)
+ exit(1)
+
+ # Iterating over lines to find the markers
+ for index, line in enumerate(self.lines):
+ if table_starting_point is None and table_start_marker in line:
+ table_starting_point = index
+ elif table_ending_point is None and table_end_marker in line:
+ table_ending_point = index
+ if table_starting_point is not None and table_ending_point is not None:
+ break
- This function retrieves the REPO_NAME environment variable and the
- CONTRIBUTORS_LOG file path. It then reads the log file and extracts the
- data from it. The function then reads the index.md file and calculates
- the table points. If the table does not exist, it creates the table
- header. The function then iterates over the log data and updates the
- table with the latest data. Finally, it updates the index.md file with
- the updated data and prints a success message.
+ # Checking for possible errors
+ if table_starting_point is None or table_ending_point is None:
+ print('Table not found in the file.', file=sys.stderr)
+ exit(2)
+ elif table_starting_point >= table_ending_point:
+ print('Invaild use of table markers.', file=sys.stderr)
+ exit(3)
+
+ return (table_starting_point, table_ending_point)
+
+ def update_table_of_contributors(self, condition):
+
+ # Calculating stating and ending points of the targeted table
+ table_of_contributors_start, table_of_contributors_end = self.find_table_points('contributors')
+
+ # Creating table header if doesn't exist
+ if table_of_contributors_end - table_of_contributors_start == 1:
+ table_header = list()
+ if condition is None:
+ table_header.append('| Contribution Title | Core Contribution | Contributor Names | Pull Requests | Demo |\n')
+ table_header.append('| --- | --- | --- | --- | --- |\n')
+ else:
+ table_header.append('| Contribution Title | Contributor Names | Pull Requests | Demo |\n')
+ table_header.append('| --- | --- | --- | --- |\n')
+ self.lines[table_of_contributors_start+1:table_of_contributors_end] = table_header
+
+ # Initializing empty list for lines
+ updated_lines = list()
+
+ # Checking for min entries
+ has_at_least_one_entry = False
+
+ # Iterating over log to update target file
+ for title, details in self.DATA.items():
+
+ # Modifying based on condition
+ if condition is not None and not condition(details['core']):
+ continue
+
+ # Processing contributors-names
+ contributors_names = details['contributor-name']
+ contributors_names_list = [f'[{name}](https://github.com/{name} "goto {name} profile")' for name in contributors_names]
+ contributors_names_output = ', '.join(contributors_names_list)
+
+ # Processing core contribution
+ core_contribution = details['core']
+ if condition is None:
+ core_contribution_output = f'[{core_contribution}]({core_contribution} "goto {core_contribution}")'
+
+ # Processing pull-requests
+ pull_requests = details['pull-request-number']
+ pull_requests_list = [f'[#{pr}](https://github.com/{self.REPO_NAME}/pull/{pr} "visit pr \#{pr}")' for pr in pull_requests]
+ pull_requests_output = ', '.join(pull_requests_list)
+
+ # Processing demo-path
+ demo_path = details['demo-path']
+ specificity = details['specificity']
+ if ' ' in demo_path:
+ demo_path = '%20'.join(demo_path.split())
+ demo_path_output = f'[./{core_contribution}/{specificity}/]({demo_path} "view the result of {title}")'
+ if title == 'root' or title == '{init}':
+ demo_path_output = f'[/{self.REPO_NAME}/]({demo_path} "view the result of {title}")'
+
+ # Appending all data together
+ if condition is None:
+ updated_lines.append(f'| {title} | {core_contribution_output} | {contributors_names_output} | {pull_requests_output} | {demo_path_output} |\n')
+ else:
+ updated_lines.append(f'| {title} | {contributors_names_output} | {pull_requests_output} | {demo_path_output} |\n')
+
+ has_at_least_one_entry = True
+
+ # Adding null entries for completely empty table
+ if not has_at_least_one_entry:
+ if condition is None:
+ updated_lines.append('| - | - | - | - | - |\n')
+ else:
+ updated_lines.append('| - | - | - | - |\n')
+
+ # Updating the lines with updated data
+ self.lines[table_of_contributors_start+3:table_of_contributors_end] = updated_lines
+
+ # Printing Success Message
+ print('Successfully updated the contributor details !!!...')
+
+ def update_table_of_content(self, condition):
+
+ # Calculating stating and ending points of the targeted table
+ table_of_content_start, table_of_content_end = self.find_table_points('table-of-content')
+
+ # Initializing required variables
+ updated_lines = list()
+ table_of_content = { 'Theory': {}, 'Solved-Problems': {}, 'Repo': {} }
+
+ # Extracting data into required format
+ for title, data in self.DATA.items():
+
+ # Setting values for ease of use and more readibility
+ core = data['core']
+ specificity = data['specificity']
+
+ # Sorting out required data
+ if specificity not in table_of_content[core]:
+ table_of_content[core][specificity] = None if specificity == title else [title]
+ elif title != specificity and title not in table_of_content[core][specificity]:
+ if table_of_content[core][specificity] is None:
+ table_of_content[core][specificity] = [title]
+ else:
+ table_of_content[core][specificity].append(title)
+
+ # Sorting extracted data
+ for key, value in table_of_content.items():
+ for sub_value in value.values():
+ if type(sub_value) == list:
+ sub_value.sort()
+ table_of_content[key] = OrderedDict(sorted(value.items()))
+
+ # Updating lines based on the extracted data
+ for core, data in table_of_content.items():
+
+ # Modifying based on condition
+ if condition is not None and not condition(core) or core == 'Repo':
+ continue
+
+ # Setting Main Heading (Only for Root)
+ if condition is None:
+ updated_lines.append(f'- [__{core}__]({core} "goto {core}")\n')
+
+ # Adding all headings
+ for heading, sub_heading_list in data.items():
+ if condition is None:
+ updated_lines.append(f'\t- [{heading}]({core}/{heading} "goto {heading}")\n')
+ else:
+ updated_lines.append(f'- [__{heading}__]({heading} "goto {heading}")\n')
+ if sub_heading_list is not None:
+ for sub_heading in sub_heading_list:
+ if condition is None:
+ updated_lines.append(f'\t\t- [{sub_heading}]({core}/{heading}/{sub_heading} "goto {sub_heading}")\n')
+ else:
+ updated_lines.append(f'\t- [{sub_heading}]({heading}/{sub_heading} "goto {sub_heading}")\n')
+
+ # Updating the lines with updated data
+ self.lines[table_of_content_start+1:table_of_content_end] = updated_lines
+
+ # Printing Success Message
+ print('Successfully updated the table of content !!!...')
- """
+
+def main():
# Retrieving Environmental variables
REPO_NAME = os.environ.get('REPO_NAME')
# Setting path for the log JSON file
- TARGET_FILE = 'index.md'
+ ROOT_INDEX_FILE_PATH = 'index.md'
+ THEORY_INDEX_FILE_PATH = 'Theory/index.md'
+ THEORY_README_FILE_PATH = 'Theory/README.md'
+ SOLVED_PROBLEM_INDEX_FILE_PATH = 'Solved-Problems/index.md'
+ SOLVED_PROBLEM_README_FILE_PATH = 'Solved-Problems/README.md'
CONTRIBUTORS_LOG = '.github/data/contributors-log.json'
# Retrieving data from log file
with open(CONTRIBUTORS_LOG, 'r') as json_file:
- data = json.load(json_file)
-
- # Reading lines from the file
- with open(TARGET_FILE, 'r') as file:
- lines = file.readlines()
-
- # Calculating Stating and ending points of the targeted table
- table_start, table_end = find_table_points(lines)
-
- # Creating table header if doesn't exist
- if table_end - table_start == 1:
- table_header = list()
- table_header.append(
- '| Project Title | Contributor Names | Pull Requests | Demo |\n')
- table_header.append('| --- | --- | --- | --- |\n')
- lines[table_start+1:table_end] = table_header
-
- # Initializing empty list for lines
- updated_lines = list()
-
- # Iterating over log to update target file
- for title, details in data.items():
-
- # Processing contributors-names
- contributors_names = details['contributor-name']
- contributors_names_list = [
- f'[{name}](https://github.com/{name} "goto {name} profile")' for name in contributors_names]
- contributors_names_output = ', '.join(contributors_names_list)
-
- # Processing pull-requests
- pull_requests = details['pull-request-number']
- pull_requests_list = [
- f'[#{pr}](https://github.com/{REPO_NAME}/pull/{pr} "visit pr \#{pr}")' for pr in pull_requests]
- pull_requests_output = ', '.join(pull_requests_list)
-
- # Processing demo-path
- demo_path = details['demo-path']
- if ' ' in demo_path:
- demo_path = '%20'.join(demo_path.split())
- demo_path_output = f'[/{REPO_NAME}/{title}/]({demo_path} "view the result of {title}")'
- if title == 'root' or title == '{init}':
- demo_path_output = f'[/{REPO_NAME}/]({demo_path} "view the result of {title}")'
- elif title == '{workflows}':
- demo_path_output = f'[/{REPO_NAME}/.github/workflows]({demo_path} "view the result of {title}")'
- elif title == '{scripts}':
- demo_path_output = f'[/{REPO_NAME}/.github/scripts]({demo_path} "view the result of {title}")'
- elif title == '{others}':
- demo_path_output = f'[/{REPO_NAME}/.github]({demo_path} "view the result of {title}")'
-
- # Appending all data together
- updated_lines.append(
- f'| {title} | {contributors_names_output} | {pull_requests_output} | {demo_path_output} |\n')
-
- # Updating the lines with updated data
- lines[table_start+3:table_end] = updated_lines
-
- # Updating the target file
- with open(TARGET_FILE, 'w') as file:
- file.writelines(lines)
-
- # Printing Success Message
- print(f"Updated '{TARGET_FILE}' Successfully")
-
+ DATA = json.load(json_file)
+
+ # Assigning values to static members for class `UpdateFileContent`
+ UpdateFileContent.DATA = DATA
+ UpdateFileContent.REPO_NAME = REPO_NAME
+
+ # Updating All required files
+ UpdateFileContent(ROOT_INDEX_FILE_PATH)
+ UpdateFileContent(THEORY_INDEX_FILE_PATH, lambda core: core == 'Theory')
+ UpdateFileContent(THEORY_README_FILE_PATH, lambda core: core == 'Theory')
+ UpdateFileContent(SOLVED_PROBLEM_INDEX_FILE_PATH, lambda core: core == 'Solved-Problems')
+ UpdateFileContent(SOLVED_PROBLEM_README_FILE_PATH, lambda core: core == 'Solved-Problems')
if __name__ == '__main__':
main()
diff --git a/.github/workflows/auto-assigner.yml b/.github/workflows/auto-assigner.yml
deleted file mode 100644
index 4406356..0000000
--- a/.github/workflows/auto-assigner.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-name: Auto Assign
-
-on:
- pull_request_target:
- types: [opened, ready_for_review]
- issues:
- types: [opened]
-
-permissions:
- issues: write
- pull-requests: write
-
-jobs:
- auto-assign:
- runs-on: ubuntu-latest
- steps:
- - uses: kentaro-m/auto-assign-action@v1.2.5
- with:
- configuration-path: ".github/auto-assign-config.yml"
diff --git a/.github/workflows/auto-commenter.yml b/.github/workflows/auto-commenter.yml
deleted file mode 100644
index cbfee37..0000000
--- a/.github/workflows/auto-commenter.yml
+++ /dev/null
@@ -1,28 +0,0 @@
-name: Auto-commenter
-
-on:
- pull_request_target:
- types: [opened, closed]
-
-permissions:
- id-token: write
- issues: write
- pull-requests: write
-
-jobs:
- automated-message:
- runs-on: ubuntu-latest
- steps:
- - uses: wow-actions/auto-comment@v1
- with:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- pullRequestOpened: |
- 👋 @{{ author }}
- Thank you for raising your pull request.
- Please make sure you have followed our contributing guidelines. We will review it as soon as possible.
-
- pullRequestClosed: |
- 👋 @{{ author }} This PR is closed. If you think there's been a mistake, please contact the maintainer @iamwatchdogs.
-
- pullRequestMerged: |
- Thank you for contributing @{{ author }}. Make sure to check your contribution on [GitHub Pages](https://grow-with-open-source.github.io/DSA/ "view contributions").
\ No newline at end of file
diff --git a/.github/workflows/auto-labeler.yml b/.github/workflows/auto-labeler.yml
deleted file mode 100644
index bc68a4f..0000000
--- a/.github/workflows/auto-labeler.yml
+++ /dev/null
@@ -1,50 +0,0 @@
-name: hacktoberfest-labeler
-
-on:
- pull_request_target:
- types: [opened, reopened, closed]
-
-
-permissions:
- contents: read
- pull-requests: write
-
-jobs:
- auto-labeler:
- runs-on: ubuntu-latest
- steps:
- - name: Check for hacktoberfest season
- id: check-month
- run: |
- current_month=$(date +'%m')
- if [ "$current_month" == "10" ]; then
- echo "is_october=true" >> $GITHUB_OUTPUT
- else
- echo "is_october=false" >> $GITHUB_OUTPUT
- fi
-
- - name: Creating config file
- env:
- ACTION: ${{ github.event.action }}
- run: |
- touch ./hacktoberfest-labeler.yml
-
- if [ "$ACTION" != "closed" ]; then
- echo "hacktoberfest:" > hacktoberfest-labeler.yml
- else
- echo "hacktoberfest-accepted:" > hacktoberfest-labeler.yml
- fi
- echo "- changed-files:" >> hacktoberfest-labeler.yml
- echo " - any-glob-to-any-file: '**'" >> hacktoberfest-labeler.yml
-
- echo "Created the config file:"
- echo "------------------------"
- cat ./hacktoberfest-labeler.yml
-
- - name: Label the PRs
- if: steps.check-month.outputs.is_october == 'true' ||
- github.event.pull_request.merged == 'true' &&
- contains(github.event.pull_request.labels.*.name, 'hacktoberfest')
- uses: actions/labeler@v5.0.0
- with:
- configuration-path: ./hacktoberfest-labeler.yml
\ No newline at end of file
diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml
index 53c69d7..1204313 100644
--- a/.github/workflows/linting.yml
+++ b/.github/workflows/linting.yml
@@ -68,7 +68,7 @@ jobs:
checks['javascript']='true'
elif path.endswith('.py'):
checks['python']='true'
- elif '.' in path.split('/')[-1] and not (path.startswith('.github') or path.endswith('.md')):
+ elif '.' in path.split('/')[-1] and not path.endswith('.md'):
checks['other']='true'
# Setting output variables based on file extensions
@@ -106,18 +106,15 @@ jobs:
(needs.python-linter.result == 'skipped' || needs.checkout.outputs.needs_python_linting == 'false'))
}}
runs-on: ubuntu-latest
- permissions:
- contents: read
- packages: read
- statuses: write
-
steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- fetch-depth: 0
-
- - name: Super-linter
- uses: super-linter/super-linter@v7.1.0
- env:
+ - name: Checking out the repo
+ uses: actions/checkout@v4.1.1
+ with:
+ fetch-depth: 0
+ ref: ${{ github.event.pull_request.head.ref }}
+ - name: Super Linter
+ uses: super-linter/super-linter@v5.4.3
+ env:
+ VALIDATE_ALL_CODEBASE: false
+ DEFAULT_BRANCH: main
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
deleted file mode 100644
index fc66c4b..0000000
--- a/.github/workflows/stale.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-name: close-stale-issue-and-prs
-
-on:
- schedule:
- - cron: '30 1 * * *'
-
-permissions:
- contents: write
- issues: write
- pull-requests: write
-
-jobs:
- stale:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/stale@v8
- with:
- stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 5 days.'
- stale-pr-message: 'This PR is stale because it has been open 45 days with no activity. Remove stale label or comment or this will be closed in 10 days.'
- close-issue-message: 'This issue was closed because it has been stalled for 5 days with no activity.'
- close-pr-message: 'This PR was closed because it has been stalled for 10 days with no activity.'
- days-before-issue-stale: 30
- days-before-pr-stale: 45
- days-before-issue-close: 5
- days-before-pr-close: 10
\ No newline at end of file