diff --git a/.gitignore b/.gitignore
index f88a0eb..e046d9c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,4 +2,9 @@
.DS_Store
# Exclude venv folder
-venv
\ No newline at end of file
+venv
+
+
+__pycache__
+.vscode/
+do_not_commit.sh
diff --git a/.prettierignore b/.prettierignore
new file mode 100644
index 0000000..eed59f2
--- /dev/null
+++ b/.prettierignore
@@ -0,0 +1,2 @@
+*.yml
+
diff --git a/crowdin/download_translations_from_crowdin.py b/crowdin/download_translations_from_crowdin.py
index bd1553a..298d6fd 100644
--- a/crowdin/download_translations_from_crowdin.py
+++ b/crowdin/download_translations_from_crowdin.py
@@ -43,7 +43,7 @@ def check_error(response):
print(f"{Fore.BLUE}Response: {json.dumps(response.json(), indent=2)}{Style.RESET_ALL}")
sys.exit(1)
-def download_file(url, output_path):
+def download_file(url: str, output_path: str):
"""
Function to download a file from Crowdin
"""
diff --git a/crowdin/generate_android_strings.py b/crowdin/generate_android_strings.py
index e7718d1..b3bbc48 100644
--- a/crowdin/generate_android_strings.py
+++ b/crowdin/generate_android_strings.py
@@ -1,15 +1,17 @@
import os
-import json
import xml.etree.ElementTree as ET
import sys
import argparse
import re
from pathlib import Path
from colorama import Fore, Style
+from generate_shared import load_glossary_dict, clean_string, setup_generation
# Variables that should be treated as numeric (using %d)
NUMERIC_VARIABLES = ['count', 'found_count', 'total_count']
+AUTO_REPLACE_STATIC_STRINGS = False
+
# Parse command-line arguments
parser = argparse.ArgumentParser(description='Convert a XLIFF translation files to Android XML.')
@@ -66,22 +68,8 @@ def repl(match):
return re.sub(r'\{([^}]+)\}', repl, text)
-def clean_string(text):
- # Note: any changes done for all platforms needs most likely to be done on crowdin side.
- # So we don't want to replace -> with → for instance, we want the crowdin strings to not have those at all.
- # We can use standard XML escaped characters for most things (since XLIFF is an XML format) but
- # want the following cases escaped in a particular way
- text = text.replace("'", r"\'")
- text = text.replace(""", "\"")
- text = text.replace("\"", "\\\"")
- text = text.replace("<b>", "")
- text = text.replace("</b>", "")
- text = text.replace("</br>", "\\n")
- text = text.replace("
", "\\n")
- text = text.replace("&", "&") # Assume any remaining ampersands are desired
- return text.strip() # Strip whitespace
-
-def generate_android_xml(translations, app_name):
+
+def generate_android_xml(translations, app_name, glossary_dict):
sorted_translations = sorted(translations.items())
result = '\n'
result += '\n'
@@ -93,25 +81,26 @@ def generate_android_xml(translations, app_name):
if isinstance(target, dict): # It's a plural group
result += f' \n'
for form, value in target.items():
- escaped_value = clean_string(convert_placeholders(value))
+ escaped_value = clean_string(convert_placeholders(value), True, glossary_dict, {})
result += f' - {escaped_value}
\n'
result += ' \n'
else: # It's a regular string (for these we DON'T want to convert the placeholders)
- escaped_target = clean_string(target)
+ escaped_target = clean_string(target, True, glossary_dict, {})
result += f' {escaped_target}\n'
result += ''
return result
-def convert_xliff_to_android_xml(input_file, output_dir, source_locale, locale, app_name):
+def convert_xliff_to_android_xml(input_file, output_dir, source_locale, locale, glossary_dict):
if not os.path.exists(input_file):
raise FileNotFoundError(f"Could not find '{input_file}' in raw translations directory")
# Parse the XLIFF and convert to XML (only include the 'app_name' entry in the source language)
is_source_language = locale == source_locale
translations = parse_xliff(input_file)
- output_data = generate_android_xml(translations, app_name if is_source_language else None)
+ app_name = glossary_dict['app_name']
+ output_data = generate_android_xml(translations, app_name if is_source_language else None, glossary_dict if AUTO_REPLACE_STATIC_STRINGS else {})
# android is pretty smart to resolve resources for translations, see the example here:
# https://developer.android.com/guide/topics/resources/multilingual-support#resource-resolution-examples
@@ -131,17 +120,10 @@ def convert_xliff_to_android_xml(input_file, output_dir, source_locale, locale,
def convert_non_translatable_strings_to_kotlin(input_file, output_path):
- if not os.path.exists(input_file):
- raise FileNotFoundError(f"Could not find '{input_file}' in raw translations directory")
+ glossary_dict = load_glossary_dict(input_file)
- # Process the non-translatable string input
- non_translatable_strings_data = {}
- with open(input_file, 'r', encoding="utf-8") as file:
- non_translatable_strings_data = json.load(file)
-
- entries = non_translatable_strings_data['data']
- max_key_length = max(len(entry['data']['note'].upper()) for entry in entries)
- app_name = None
+ max_key_length = max(len(key) for key in glossary_dict)
+ app_name = glossary_dict['app_name']
# Output the file in the desired format
Path(output_path).parent.mkdir(parents=True, exist_ok=True)
@@ -151,42 +133,24 @@ def convert_non_translatable_strings_to_kotlin(input_file, output_path):
file.write('\n')
file.write('// Non-translatable strings for use with the UI\n')
file.write("object NonTranslatableStringConstants {\n")
- for entry in entries:
- key = entry['data']['note'].upper()
- text = entry['data']['text']
+ for key_lowercase in glossary_dict:
+ key = key_lowercase.upper()
+ text = glossary_dict[key_lowercase]
file.write(f' const val {key:<{max_key_length}} = "{text}"\n')
- if key == 'APP_NAME':
- app_name = text
-
file.write('}\n')
file.write('\n')
- return app_name
-
-def convert_all_files(input_directory):
- # Extract the project information
- print(f"\033[2K{Fore.WHITE}⏳ Processing project info...{Style.RESET_ALL}", end='\r')
- project_info_file = os.path.join(input_directory, "_project_info.json")
- if not os.path.exists(project_info_file):
- raise FileNotFoundError(f"Could not find '{project_info_file}' in raw translations directory")
-
- project_details = {}
- with open(project_info_file, 'r', encoding="utf-8") as file:
- project_details = json.load(file)
-
- # Extract the language info and sort the target languages alphabetically by locale
- source_language = project_details['data']['sourceLanguage']
- target_languages = project_details['data']['targetLanguages']
- target_languages.sort(key=lambda x: x['locale'])
- num_languages = len(target_languages)
- print(f"\033[2K{Fore.GREEN}✅ Project info processed, {num_languages} languages will be converted{Style.RESET_ALL}")
-
- # Convert the non-translatable strings to the desired format
- print(f"\033[2K{Fore.WHITE}⏳ Generating static strings file...{Style.RESET_ALL}", end='\r')
- non_translatable_strings_file = os.path.join(input_directory, "_non_translatable_strings.json")
- app_name = convert_non_translatable_strings_to_kotlin(non_translatable_strings_file, NON_TRANSLATABLE_STRINGS_OUTPUT_PATH)
+ if not app_name:
+ raise ValueError("could not find app_name in glossary_dict")
+
+def convert_all_files(input_directory: str ):
+ setup_values = setup_generation(input_directory)
+ source_language, rtl_languages, non_translatable_strings_file, target_languages = setup_values.values()
+
+ convert_non_translatable_strings_to_kotlin(non_translatable_strings_file, NON_TRANSLATABLE_STRINGS_OUTPUT_PATH)
print(f"\033[2K{Fore.GREEN}✅ Static string generation complete{Style.RESET_ALL}")
+ glossary_dict = load_glossary_dict(non_translatable_strings_file)
# Convert the XLIFF data to the desired format
print(f"\033[2K{Fore.WHITE}⏳ Converting translations to target format...{Style.RESET_ALL}", end='\r')
@@ -199,7 +163,7 @@ def convert_all_files(input_directory):
continue
print(f"\033[2K{Fore.WHITE}⏳ Converting translations for {lang_locale} to target format...{Style.RESET_ALL}", end='\r')
input_file = os.path.join(input_directory, f"{lang_locale}.xliff")
- convert_xliff_to_android_xml(input_file, TRANSLATIONS_OUTPUT_DIRECTORY, source_locale, lang_locale, app_name)
+ convert_xliff_to_android_xml(input_file, TRANSLATIONS_OUTPUT_DIRECTORY, source_locale, lang_locale, glossary_dict)
print(f"\033[2K{Fore.GREEN}✅ All conversions complete{Style.RESET_ALL}")
if __name__ == "__main__":
diff --git a/crowdin/generate_desktop_strings.py b/crowdin/generate_desktop_strings.py
index 44b2cdf..1d50506 100644
--- a/crowdin/generate_desktop_strings.py
+++ b/crowdin/generate_desktop_strings.py
@@ -1,11 +1,12 @@
import os
import json
+from typing import Dict, List
import xml.etree.ElementTree as ET
import sys
import argparse
-import html
from pathlib import Path
-from colorama import Fore, Style, init
+from colorama import Fore, Style
+from generate_shared import clean_string, load_glossary_dict, setup_generation
# Customizable mapping for output folder hierarchy
# Add entries here to customize the output path for specific locales
@@ -37,6 +38,7 @@
TRANSLATIONS_OUTPUT_DIRECTORY = args.translations_output_directory
NON_TRANSLATABLE_STRINGS_OUTPUT_PATH = args.non_translatable_strings_output_path
+
def parse_xliff(file_path):
tree = ET.parse(file_path)
root = tree.getroot()
@@ -69,26 +71,20 @@ def parse_xliff(file_path):
return translations
-def clean_string(text):
- # Note: any changes done for all platforms needs most likely to be done on crowdin side.
- # So we don't want to replace -> with → for instance, we want the crowdin strings to not have those at all.
- text = html.unescape(text) # Unescape any HTML escaping
- return text.strip() # Strip whitespace
-def generate_icu_pattern(target):
+def generate_icu_pattern(target, glossary_dict : Dict[str,str]):
if isinstance(target, dict): # It's a plural group
pattern_parts = []
for form, value in target.items():
if form in ['zero', 'one', 'two', 'few', 'many', 'other', 'exact', 'fractional']:
- # Replace {count} with #
- value = clean_string(value.replace('{count}', '#'))
+ value = clean_string(value, False, glossary_dict, {})
pattern_parts.append(f"{form} [{value}]")
return "{{count, plural, {0}}}".format(" ".join(pattern_parts))
else: # It's a regular string
- return clean_string(target)
+ return clean_string(target, False, glossary_dict, {})
-def convert_xliff_to_json(input_file, output_dir, locale, locale_two_letter_code):
+def convert_xliff_to_json(input_file, output_dir, locale, locale_two_letter_code, glossary_dict):
if not os.path.exists(input_file):
raise FileNotFoundError(f"Could not find '{input_file}' in raw translations directory")
@@ -98,7 +94,7 @@ def convert_xliff_to_json(input_file, output_dir, locale, locale_two_letter_code
converted_translations = {}
for resname, target in sorted_translations:
- converted_translations[resname] = generate_icu_pattern(target)
+ converted_translations[resname] = generate_icu_pattern(target, glossary_dict)
# Generate output files
output_locale = LOCALE_PATH_MAPPING.get(locale, LOCALE_PATH_MAPPING.get(locale_two_letter_code, locale_two_letter_code))
@@ -112,16 +108,10 @@ def convert_xliff_to_json(input_file, output_dir, locale, locale_two_letter_code
file.write('\n')
return output_locale
-def convert_non_translatable_strings_to_type_script(input_file, output_path, exported_locales, rtl_languages):
- if not os.path.exists(input_file):
- raise FileNotFoundError(f"Could not find '{input_file}' in raw translations directory")
- # Process the non-translatable string input
- non_translatable_strings_data = {}
- with open(input_file, 'r', encoding="utf-8") as file:
- non_translatable_strings_data = json.load(file)
- entries = non_translatable_strings_data['data']
+def convert_non_translatable_strings_to_type_script(input_file: str, output_path: str, exported_locales: List[str], rtl_languages: List[str]):
+ glossary_dict = load_glossary_dict(input_file)
rtl_locales = sorted([lang["twoLettersCode"] for lang in rtl_languages])
# Output the file in the desired format
@@ -132,9 +122,8 @@ def convert_non_translatable_strings_to_type_script(input_file, output_path, exp
with open(output_path, 'w', encoding='utf-8') as file:
file.write('export enum LOCALE_DEFAULTS {\n')
- for entry in entries:
- key = entry['data']['note']
- text = entry['data']['text']
+ for key in glossary_dict:
+ text = glossary_dict[key]
file.write(f" {key} = '{text}',\n")
file.write('}\n')
@@ -151,40 +140,27 @@ def convert_non_translatable_strings_to_type_script(input_file, output_path, exp
file.write('\n')
-def convert_all_files(input_directory):
- # Extract the project information
- print(f"\033[2K{Fore.WHITE}⏳ Processing project info...{Style.RESET_ALL}", end='\r')
- project_info_file = os.path.join(input_directory, "_project_info.json")
- if not os.path.exists(project_info_file):
- raise FileNotFoundError(f"Could not find '{project_info_file}' in raw translations directory")
-
- project_details = {}
- with open(project_info_file, 'r', encoding="utf-8") as file:
- project_details = json.load(file)
-
- # Extract the language info and sort the target languages alphabetically by locale
- source_language = project_details['data']['sourceLanguage']
- target_languages = project_details['data']['targetLanguages']
- target_languages.sort(key=lambda x: x['locale'])
- num_languages = len(target_languages)
- print(f"\033[2K{Fore.GREEN}✅ Project info processed, {num_languages} languages will be converted{Style.RESET_ALL}")
+def convert_all_files(input_directory: str):
+ setup_values = setup_generation(input_directory)
+ source_language, rtl_languages, non_translatable_strings_file, target_languages = setup_values.values()
# Convert the XLIFF data to the desired format
print(f"\033[2K{Fore.WHITE}⏳ Converting translations to target format...{Style.RESET_ALL}", end='\r')
exported_locales = []
+ glossary_dict = load_glossary_dict(non_translatable_strings_file)
+
for language in [source_language] + target_languages:
lang_locale = language['locale']
lang_two_letter_code = language['twoLettersCode']
print(f"\033[2K{Fore.WHITE}⏳ Converting translations for {lang_locale} to target format...{Style.RESET_ALL}", end='\r')
input_file = os.path.join(input_directory, f"{lang_locale}.xliff")
- exported_as = convert_xliff_to_json(input_file, TRANSLATIONS_OUTPUT_DIRECTORY, lang_locale, lang_two_letter_code)
+ exported_as = convert_xliff_to_json(input_file, TRANSLATIONS_OUTPUT_DIRECTORY, lang_locale, lang_two_letter_code, glossary_dict)
exported_locales.append(exported_as)
print(f"\033[2K{Fore.GREEN}✅ All conversions complete{Style.RESET_ALL}")
# Convert the non-translatable strings to the desired format
print(f"\033[2K{Fore.WHITE}⏳ Generating static strings file...{Style.RESET_ALL}", end='\r')
- non_translatable_strings_file = os.path.join(input_directory, "_non_translatable_strings.json")
- rtl_languages = [lang for lang in target_languages if lang["textDirection"] == "rtl"]
+
convert_non_translatable_strings_to_type_script(non_translatable_strings_file, NON_TRANSLATABLE_STRINGS_OUTPUT_PATH, exported_locales, rtl_languages)
print(f"\033[2K{Fore.GREEN}✅ Static string generation complete{Style.RESET_ALL}")
diff --git a/crowdin/generate_ios_strings.py b/crowdin/generate_ios_strings.py
index 629e0cc..eb077f1 100644
--- a/crowdin/generate_ios_strings.py
+++ b/crowdin/generate_ios_strings.py
@@ -5,8 +5,12 @@
import argparse
import html
from pathlib import Path
-from colorama import Fore, Style, init
+from colorama import Fore, Style
from datetime import datetime
+from generate_shared import load_glossary_dict, clean_string, setup_generation
+from typing import Dict
+
+AUTO_REPLACE_STATIC_STRINGS = False
# It seems that Xcode uses different language codes and doesn't support all of the languages we get from Crowdin
# (at least in the variants that Crowdin is specifying them in) so need to map/exclude them in order to build correctly
@@ -54,7 +58,7 @@ def parse_xliff(file_path):
target_language = file_elem.get('target-language')
if target_language is None:
raise ValueError(f"Missing target-language in file: {file_path}")
-
+
if target_language in LANGUAGE_MAPPING:
target_language = LANGUAGE_MAPPING[target_language]
@@ -65,7 +69,7 @@ def parse_xliff(file_path):
for trans_unit in group.findall('ns:trans-unit', namespaces=namespace):
if resname is None:
resname = trans_unit.get('resname') or trans_unit.get('id')
-
+
target = trans_unit.find('ns:target', namespaces=namespace)
source = trans_unit.find('ns:source', namespaces=namespace)
context_group = trans_unit.find('ns:context-group', namespaces=namespace)
@@ -103,17 +107,11 @@ def parse_xliff(file_path):
return translations, target_language
-def clean_string(text):
- # Note: any changes done for all platforms needs most likely to be done on crowdin side.
- # So we don't want to replace -> with → for instance, we want the crowdin strings to not have those at all.
- text = html.unescape(text) # Unescape any HTML escaping
- return text.strip() # Strip whitespace
-
-def convert_placeholders_for_plurals(resname, translations):
+def convert_placeholders_for_plurals(translations: Dict[str,str], glossary_dict: Dict[str,str]):
# Replace {count} with %lld for iOS
converted_translations = {}
for form, value in translations.items():
- converted_translations[form] = clean_string(value.replace('{count}', '%lld'))
+ converted_translations[form] = clean_string(value, False, glossary_dict if AUTO_REPLACE_STATIC_STRINGS else {}, {'{count}': '%lld'})
return converted_translations
@@ -125,7 +123,7 @@ def sort_dict_case_insensitive(data):
else:
return data
-def convert_xliff_to_string_catalog(input_dir, output_dir, source_language, target_languages):
+def convert_xliff_to_string_catalog(input_dir, output_dir, source_language, target_languages, glossary_dict):
string_catalog = {
"sourceLanguage": "en",
"strings": {},
@@ -138,7 +136,7 @@ def convert_xliff_to_string_catalog(input_dir, output_dir, source_language, targ
# then the output will differ from what Xcode generates)
all_languages = [source_language] + target_mapped_languages
sorted_languages = sorted(all_languages, key=lambda x: x['mapped_id'])
-
+
for language in sorted_languages:
lang_locale = language['locale']
input_file = os.path.join(input_dir, f"{lang_locale}.xliff")
@@ -152,7 +150,7 @@ def convert_xliff_to_string_catalog(input_dir, output_dir, source_language, targ
raise ValueError(f"Error processing locale {lang_locale}: {str(e)}")
print(f"\033[2K{Fore.WHITE}⏳ Converting translations for {target_language} to target format...{Style.RESET_ALL}", end='\r')
-
+
for resname, translation in translations.items():
if resname not in string_catalog["strings"]:
string_catalog["strings"][resname] = {
@@ -161,7 +159,7 @@ def convert_xliff_to_string_catalog(input_dir, output_dir, source_language, targ
}
if isinstance(translation, dict): # It's a plural group
- converted_translations = convert_placeholders_for_plurals(resname, translation)
+ converted_translations = convert_placeholders_for_plurals(translation, glossary_dict)
# Check if any of the translations contain '{count}'
contains_count = any('{count}' in value for value in translation.values())
@@ -207,7 +205,7 @@ def convert_xliff_to_string_catalog(input_dir, output_dir, source_language, targ
string_catalog["strings"][resname]["localizations"][target_language] = {
"stringUnit": {
"state": "translated",
- "value": clean_string(translation)
+ "value": clean_string(translation, False, glossary_dict if AUTO_REPLACE_STATIC_STRINGS else {}, {})
}
}
@@ -225,15 +223,7 @@ def convert_xliff_to_string_catalog(input_dir, output_dir, source_language, targ
json.dump(sorted_string_catalog, f, ensure_ascii=False, indent=2, separators=(',', ' : '))
def convert_non_translatable_strings_to_swift(input_file, output_path):
- if not os.path.exists(input_file):
- raise FileNotFoundError(f"Could not find '{input_file}' in raw translations directory")
-
- # Process the non-translatable string input
- non_translatable_strings_data = {}
- with open(input_file, 'r', encoding="utf-8") as file:
- non_translatable_strings_data = json.load(file)
-
- entries = non_translatable_strings_data['data']
+ glossary_dict = load_glossary_dict(input_file)
# Output the file in the desired format
Path(output_path).parent.mkdir(parents=True, exist_ok=True)
@@ -245,40 +235,24 @@ def convert_non_translatable_strings_to_swift(input_file, output_path):
file.write('// stringlint:disable\n')
file.write('\n')
file.write('public enum Constants {\n')
- for entry in entries:
- key = entry['data']['note']
- text = entry['data']['text']
+ for key in glossary_dict:
+ text = glossary_dict[key]
file.write(f' public static let {key}: String = "{text}"\n')
file.write('}\n')
def convert_all_files(input_directory):
- # Extract the project information
- print(f"\033[2K{Fore.WHITE}⏳ Processing project info...{Style.RESET_ALL}", end='\r')
- project_info_file = os.path.join(input_directory, "_project_info.json")
- if not os.path.exists(project_info_file):
- raise FileNotFoundError(f"Could not find '{project_info_file}' in raw translations directory")
-
- project_details = {}
- with open(project_info_file, 'r', encoding="utf-8") as file:
- project_details = json.load(file)
-
- # Extract the language info and sort the target languages alphabetically by locale
- source_language = project_details['data']['sourceLanguage']
- target_languages = project_details['data']['targetLanguages']
- target_languages.sort(key=lambda x: x['locale'])
- num_languages = len(target_languages)
- print(f"\033[2K{Fore.GREEN}✅ Project info processed, {num_languages} languages will be converted{Style.RESET_ALL}")
-
- # Convert the non-translatable strings to the desired format
+ setup_values = setup_generation(input_directory)
+ source_language, rtl_languages, non_translatable_strings_file, target_languages = setup_values.values()
+ glossary_dict = load_glossary_dict(non_translatable_strings_file)
print(f"\033[2K{Fore.WHITE}⏳ Generating static strings file...{Style.RESET_ALL}", end='\r')
- non_translatable_strings_file = os.path.join(input_directory, "_non_translatable_strings.json")
+
convert_non_translatable_strings_to_swift(non_translatable_strings_file, NON_TRANSLATABLE_STRINGS_OUTPUT_PATH)
print(f"\033[2K{Fore.GREEN}✅ Static string generation complete{Style.RESET_ALL}")
# Convert the XLIFF data to the desired format
print(f"\033[2K{Fore.WHITE}⏳ Converting translations to target format...{Style.RESET_ALL}", end='\r')
- convert_xliff_to_string_catalog(input_directory, TRANSLATIONS_OUTPUT_DIRECTORY, source_language, target_languages)
+ convert_xliff_to_string_catalog(input_directory, TRANSLATIONS_OUTPUT_DIRECTORY, source_language, target_languages,glossary_dict)
print(f"\033[2K{Fore.GREEN}✅ All conversions complete{Style.RESET_ALL}")
if __name__ == "__main__":
diff --git a/crowdin/generate_shared.py b/crowdin/generate_shared.py
new file mode 100644
index 0000000..a455f37
--- /dev/null
+++ b/crowdin/generate_shared.py
@@ -0,0 +1,80 @@
+from typing import Dict, List
+import html
+import json
+import os
+from colorama import Fore, Style
+
+def clean_string(text: str, is_android: bool, glossary_dict: Dict[str, str], extra_replace_dict :Dict[str,str]):
+ if is_android:
+ # Note: any changes done for all platforms needs most likely to be done on crowdin side.
+ # So we don't want to replace -> with → for instance, we want the crowdin strings to not have those at all.
+ # We can use standard XML escaped characters for most things (since XLIFF is an XML format) but
+ # want the following cases escaped in a particular way (for android only)
+ text = text.replace("'", r"\'")
+ text = text.replace(""", "\"")
+ text = text.replace("\"", "\\\"")
+ text = text.replace("<b>", "")
+ text = text.replace("</b>", "")
+ text = text.replace("</br>", "\\n")
+ text = text.replace("
", "\\n")
+ text = text.replace("&", "&") # Assume any remaining ampersands are desired
+ else:
+ text = html.unescape(text) # Unescape any HTML escaping
+
+ text = text.strip() # Strip whitespace
+
+ # replace all the defined constants (from crowdin's glossary) in the string
+ for glossary_key in glossary_dict:
+ text = text.replace("{" + glossary_key + "}", glossary_dict[glossary_key])
+
+ # if extra_replace_dict has keys, replace those too
+ for extra_key in extra_replace_dict:
+ text = text.replace(extra_key, extra_replace_dict[extra_key])
+ return text
+
+
+def load_glossary_dict(input_file: str) -> Dict[str, str]:
+ if not os.path.exists(input_file):
+ raise FileNotFoundError(f"Could not find '{input_file}' in raw translations directory")
+
+ # Process the non-translatable string input
+ non_translatable_strings_data = {}
+ with open(input_file, 'r', encoding="utf-8") as file:
+ non_translatable_strings_data = json.load(file)
+
+ non_translatable_strings_entries = non_translatable_strings_data['data']
+ glossary_dict = {
+ entry['data']['note']: entry['data']['text']
+ for entry in non_translatable_strings_entries
+ }
+
+ return glossary_dict
+
+
+def setup_generation(input_directory: str):
+ # Extract the project information
+ print(f"\033[2K{Fore.WHITE}⏳ Processing project info...{Style.RESET_ALL}", end='\r')
+ project_info_file = os.path.join(input_directory, "_project_info.json")
+ if not os.path.exists(project_info_file):
+ raise FileNotFoundError(f"Could not find '{project_info_file}' in raw translations directory")
+
+ project_details = {}
+ with open(project_info_file, 'r', encoding="utf-8") as file:
+ project_details = json.load(file)
+
+ non_translatable_strings_file = os.path.join(input_directory, "_non_translatable_strings.json")
+
+ # Extract the language info and sort the target languages alphabetically by locale
+ source_language: str = project_details['data']['sourceLanguage']
+ target_languages: List[str] = project_details['data']['targetLanguages']
+ target_languages.sort(key=lambda x: x['locale'])
+ num_languages = len(target_languages)
+ print(f"\033[2K{Fore.GREEN}✅ Project info processed, {num_languages} languages will be converted{Style.RESET_ALL}")
+
+ # Convert the non-translatable strings to the desired format
+ rtl_languages: List[str] = [lang for lang in target_languages if lang["textDirection"] == "rtl"]
+
+ return {"source_language":source_language,
+ "rtl_languages": rtl_languages,
+ "non_translatable_strings_file":non_translatable_strings_file,
+ "target_languages": target_languages}