diff --git a/Jenkinsfile b/Jenkinsfile index b1696c262..ea9ba0384 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -26,7 +26,7 @@ pipeline { HY_TN_CACHE='/home/jenkins/TestData/text_norm/ci/grammars/03-12-24-0' MR_TN_CACHE='/home/jenkins/TestData/text_norm/ci/grammars/03-12-24-1' JA_TN_CACHE='/home/jenkins/TestData/text_norm/ci/grammars/10-17-24-1' - HI_TN_CACHE='/home/jenkins/TestData/text_norm/ci/grammars/10-31-25-0' + HI_TN_CACHE='/home/jenkins/TestData/text_norm/ci/grammars/01-12-26-0' DEFAULT_TN_CACHE='/home/jenkins/TestData/text_norm/ci/grammars/06-08-23-0' } stages { diff --git a/nemo_text_processing/text_normalization/hi/data/address/__init__.py b/nemo_text_processing/text_normalization/hi/data/address/__init__.py new file mode 100644 index 000000000..4fc25d0d3 --- /dev/null +++ b/nemo_text_processing/text_normalization/hi/data/address/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2026, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/nemo_text_processing/text_normalization/hi/data/address/cities.tsv b/nemo_text_processing/text_normalization/hi/data/address/cities.tsv new file mode 100644 index 000000000..0199bf0cb --- /dev/null +++ b/nemo_text_processing/text_normalization/hi/data/address/cities.tsv @@ -0,0 +1,36 @@ +अमरावती +ईटानगर +दिसपुर +पटना +रायपुर +पणजी +गांधीनगर +चंडीगढ़ +शिमला +रांची +बेंगलुरु +तिरुवनंतपुरम +भोपाल +मुंबई +इम्फाल +शिलांग +आइजोल +कोहिमा +भुवनेश्वर +जयपुर +गंगटोक +चेन्नई +हैदराबाद +अगरतला +लखनऊ +देहरादून +कोलकाता +पोर्ट ब्लेयर +दमन +नई दिल्ली +श्रीनगर +जम्मू +लेह +कारगिल +कवरत्ती +पुडुचेरी diff --git a/nemo_text_processing/text_normalization/hi/data/address/context.tsv b/nemo_text_processing/text_normalization/hi/data/address/context.tsv new file mode 100644 index 000000000..9faadaa3b --- /dev/null +++ b/nemo_text_processing/text_normalization/hi/data/address/context.tsv @@ -0,0 +1,48 @@ +हाउस +प्लॉट +बूथ +अपार्टमेंट +फ्लैट +यूनिट +टावर +कॉम्प्लेक्स +मंजिल +फ्लोर +ब्लॉक +सेक्टर +फेज +रोड +सड़क +मार्ग +स्ट्रीट +गली +राजमार्ग +ड्राइव +डिस्ट्रिक्ट +बाईपास +हाइवे +पार्कवे +कॉलोनी +नगर +पार्क +एस्टेट +बोलवार्ड +मार्केट +सेंटर +पिन +गांव +पास +ब्रिगेड +नियर +स्क्वेर +मॉल +टॉवर +इंस्टीट्यूट +पिलर +मेट्रो +एवेन्यू +वेस्ट +सामने +पीछे +वीया +आर डी \ No newline at end of file diff --git a/nemo_text_processing/text_normalization/hi/data/address/en_to_hi_mapping.tsv b/nemo_text_processing/text_normalization/hi/data/address/en_to_hi_mapping.tsv new file mode 100644 index 000000000..15929b547 --- /dev/null +++ b/nemo_text_processing/text_normalization/hi/data/address/en_to_hi_mapping.tsv @@ -0,0 +1,2 @@ +street स्ट्रीट +southern सदर्न \ No newline at end of file diff --git a/nemo_text_processing/text_normalization/hi/data/address/letters.tsv b/nemo_text_processing/text_normalization/hi/data/address/letters.tsv new file mode 100644 index 000000000..68889ca3f --- /dev/null +++ b/nemo_text_processing/text_normalization/hi/data/address/letters.tsv @@ -0,0 +1,26 @@ +A ए +B बी +C सी +D डी +E ई +F एफ +G जी +H एच +I आई +J जे +K के +L एल +M एम +N एन +O ओ +P पी +Q क्यू +R आर +S एस +T टी +U यू +V वी +W डब्ल्यू +X एक्स +Y वाई +Z ज़ेड \ No newline at end of file diff --git a/nemo_text_processing/text_normalization/hi/data/address/special_characters.tsv b/nemo_text_processing/text_normalization/hi/data/address/special_characters.tsv new file mode 100644 index 000000000..ca5b068bd --- /dev/null +++ b/nemo_text_processing/text_normalization/hi/data/address/special_characters.tsv @@ -0,0 +1,2 @@ +- हाइफ़न +/ बटा \ No newline at end of file diff --git a/nemo_text_processing/text_normalization/hi/data/address/states.tsv b/nemo_text_processing/text_normalization/hi/data/address/states.tsv new file mode 100644 index 000000000..1e2b6c358 --- /dev/null +++ b/nemo_text_processing/text_normalization/hi/data/address/states.tsv @@ -0,0 +1,36 @@ +आंध्र प्रदेश +अरुणाचल प्रदेश +असम +बिहार +छत्तीसगढ़ +गोवा +गुजरात +हरियाणा +हिमाचल प्रदेश +झारखंड +कर्नाटक +केरल +मध्य प्रदेश +महाराष्ट्र +मणिपुर +मेघालय +मिज़ोरम +नागालैंड +ओडिशा +पंजाब +राजस्थान +सिक्किम +तमिलनाडु +तेलंगाना +त्रिपुरा +उत्तर प्रदेश +उत्तराखंड +पश्चिम बंगाल +अंडमान और निकोबार द्वीप समूह +चंडीगढ़ +दादरा और नगर हवेली और दमन और दीव +दिल्ली +जम्मू और कश्मीर +लद्दाख +लक्षद्वीप +पुडुचेरी diff --git a/nemo_text_processing/text_normalization/hi/data/measure/quarterly_units_map.tsv b/nemo_text_processing/text_normalization/hi/data/measure/quarterly_units_map.tsv index dc20bcb21..e190a80ef 100644 --- a/nemo_text_processing/text_normalization/hi/data/measure/quarterly_units_map.tsv +++ b/nemo_text_processing/text_normalization/hi/data/measure/quarterly_units_map.tsv @@ -8,4 +8,3 @@ hp हॉर्सपॉवर d दिन month महीना months महीने - diff --git a/nemo_text_processing/text_normalization/hi/data/ordinal/en_to_hi_digit.tsv b/nemo_text_processing/text_normalization/hi/data/ordinal/en_to_hi_digit.tsv new file mode 100644 index 000000000..a89e99b3c --- /dev/null +++ b/nemo_text_processing/text_normalization/hi/data/ordinal/en_to_hi_digit.tsv @@ -0,0 +1,10 @@ +0 ० +1 १ +2 २ +3 ३ +4 ४ +5 ५ +6 ६ +7 ७ +8 ८ +9 ९ diff --git a/nemo_text_processing/text_normalization/hi/data/ordinal/exceptions.tsv b/nemo_text_processing/text_normalization/hi/data/ordinal/exceptions.tsv index bfe5738d0..26a5efc1b 100644 --- a/nemo_text_processing/text_normalization/hi/data/ordinal/exceptions.tsv +++ b/nemo_text_processing/text_normalization/hi/data/ordinal/exceptions.tsv @@ -6,7 +6,20 @@ ३री तीसरी ४था चौथा ४थी चौथी -५वां पाँचवां -५वीं पाँचवीं ६ठा छठा ६ठी छठी +१st फ़र्स्ट +२nd सेकंड +३rd थर्ड +४th फ़ोर्थ +५th फ़िफ्थ +६th सिक्स्थ +७th सेवंथ +८th एटथ +९th नाइंथ +१०th टेंथ +११th इलेवंथ +१२th ट्वेल्फ्थ +१३th थर्टींथ +१४th फोर्टींथ +१५th फिफ्टींथ diff --git a/nemo_text_processing/text_normalization/hi/graph_utils.py b/nemo_text_processing/text_normalization/hi/graph_utils.py index 5bbc736fd..d498ae489 100644 --- a/nemo_text_processing/text_normalization/hi/graph_utils.py +++ b/nemo_text_processing/text_normalization/hi/graph_utils.py @@ -37,6 +37,24 @@ HI_SADHE = "साढ़े" # half more (X.5) HI_PAUNE = "पौने" # quarter less (0.75) +# Hindi decimal representations +HI_POINT_FIVE = ".५" # .5 +HI_ONE_POINT_FIVE = "१.५" # 1.5 +HI_TWO_POINT_FIVE = "२.५" # 2.5 +HI_DECIMAL_25 = ".२५" # .25 +HI_DECIMAL_75 = ".७५" # .75 + +# Symbol constants +HI_BY = "बाई" +LOWERCASE_X = "x" +UPPERCASE_X = "X" +ASTERISK = "*" +HYPHEN = "-" +SLASH = "/" +COMMA = "," +PERIOD = "." +HI_PERIOD = "।" + NEMO_LOWER = pynini.union(*string.ascii_lowercase).optimize() NEMO_UPPER = pynini.union(*string.ascii_uppercase).optimize() NEMO_ALPHA = pynini.union(NEMO_LOWER, NEMO_UPPER).optimize() diff --git a/nemo_text_processing/text_normalization/hi/taggers/measure.py b/nemo_text_processing/text_normalization/hi/taggers/measure.py index b7d74731e..31ae54dc0 100644 --- a/nemo_text_processing/text_normalization/hi/taggers/measure.py +++ b/nemo_text_processing/text_normalization/hi/taggers/measure.py @@ -16,24 +16,39 @@ from pynini.lib import pynutil from nemo_text_processing.text_normalization.hi.graph_utils import ( + ASTERISK, + COMMA, + HI_BY, + HI_DECIMAL_25, + HI_DECIMAL_75, HI_DEDH, HI_DHAI, + HI_ONE_POINT_FIVE, HI_PAUNE, + HI_PERIOD, + HI_POINT_FIVE, HI_SADHE, HI_SAVVA, + HI_TWO_POINT_FIVE, + HYPHEN, + INPUT_LOWER_CASED, + LOWERCASE_X, + NEMO_CHAR, + NEMO_DIGIT, + NEMO_HI_DIGIT, + NEMO_NOT_SPACE, NEMO_SPACE, + NEMO_WHITE_SPACE, + PERIOD, + SLASH, + UPPERCASE_X, GraphFst, + capitalized_input_graph, delete_space, insert_space, ) from nemo_text_processing.text_normalization.hi.utils import get_abs_path -HI_POINT_FIVE = ".५" # .5 -HI_ONE_POINT_FIVE = "१.५" # 1.5 -HI_TWO_POINT_FIVE = "२.५" # 2.5 -HI_DECIMAL_25 = ".२५" # .25 -HI_DECIMAL_75 = ".७५" # .75 - digit = pynini.string_file(get_abs_path("data/numbers/digit.tsv")) teens_ties = pynini.string_file(get_abs_path("data/numbers/teens_and_ties.tsv")) teens_and_ties = pynutil.add_weight(teens_ties, -0.1) @@ -44,6 +59,7 @@ class MeasureFst(GraphFst): Finite state transducer for classifying measure, suppletive aware, e.g. -१२kg -> measure { negative: "true" cardinal { integer: "बारह" } units: "किलोग्राम" } -१२.२kg -> measure { decimal { negative: "true" integer_part: "बारह" fractional_part: "दो"} units: "किलोग्राम" } + मुंबई ८८४४०४ -> measure { units: "address" cardinal { integer: "मुंबई आठ आठ चार चार शून्य चार" } preserve_order: true } Args: cardinal: CardinalFst @@ -52,7 +68,139 @@ class MeasureFst(GraphFst): for False multiple transduction are generated (used for audio-based normalization) """ - def __init__(self, cardinal: GraphFst, decimal: GraphFst): + def get_structured_address_graph(self, ordinal: GraphFst, input_case: str): + """ + Minimal address tagger for state/city + pincode patterns only. + Highly optimized for performance. + + Examples: + "मुंबई ८८४४०४" -> "मुंबई आठ आठ चार चार शून्य चार" + "गोवा १२३४५६" -> "गोवा एक दो तीन चार पाँच छह" + """ + # State/city keywords + states = pynini.string_file(get_abs_path("data/address/states.tsv")) + cities = pynini.string_file(get_abs_path("data/address/cities.tsv")) + state_city_names = pynini.union(states, cities).optimize() + + # Digit mappings + num_token = ( + digit + | pynini.string_file(get_abs_path("data/numbers/zero.tsv")) + | pynini.string_file(get_abs_path("data/telephone/number.tsv")) + ).optimize() + + # Pincode (6 digits) + pincode = (num_token + pynini.closure(insert_space + num_token, 5, 5)).optimize() + + # Street number (1-4 digits) + street_num = (num_token + pynini.closure(insert_space + num_token, 0, 3)).optimize() + + # Text: words with trailing separator (comma? + space) + any_digit = pynini.union(NEMO_HI_DIGIT, NEMO_DIGIT).optimize() + punctuation = pynini.union(COMMA, PERIOD, HI_PERIOD).optimize() + word_char = pynini.difference(NEMO_NOT_SPACE, pynini.union(any_digit, punctuation)).optimize() + word = pynini.closure(word_char, 1) + + # Separator: optional comma followed by mandatory space + sep = pynini.closure(pynini.accep(COMMA), 0, 1) + pynini.accep(NEMO_SPACE) + word_with_sep = word + sep + text = pynini.closure(word_with_sep, 0, 5).optimize() + + # Pattern: [street_num + sep]? text state/city [space pincode] + pattern = ( + pynini.closure(street_num + sep, 0, 1) + + text + + state_city_names + + pynini.closure(pynini.accep(NEMO_SPACE) + pincode, 0, 1) + ).optimize() + + graph = ( + pynutil.insert('units: "address" cardinal { integer: "') + + pattern + + pynutil.insert('" } preserve_order: true') + ) + return pynutil.add_weight(graph, 1.0).optimize() + + def get_address_graph(self, ordinal: GraphFst, input_case: str): + """ + Address tagger that converts digits/hyphens/slashes character-by-character + when address context keywords are present. + English words and ordinals are converted to Hindi transliterations. + + Examples: + "७०० ओक स्ट्रीट" -> "सात शून्य शून्य ओक स्ट्रीट" + "६६-४ पार्क रोड" -> "छह छह हाइफ़न चार पार्क रोड" + """ + ordinal_graph = ordinal.graph + # Alphanumeric to word mappings (digits, special characters, telephone digits) + char_to_word = ( + digit + | pynini.string_file(get_abs_path("data/numbers/zero.tsv")) + | pynini.string_file(get_abs_path("data/address/special_characters.tsv")) + | pynini.string_file(get_abs_path("data/telephone/number.tsv")) + ).optimize() + # Letter to transliterated word mapping (A -> ए, B -> बी, ...) + letter_to_word = pynini.string_file(get_abs_path("data/address/letters.tsv")) + address_keywords_hi = pynini.string_file(get_abs_path("data/address/context.tsv")) + + # English address keywords with Hindi translation (case-insensitive) + en_to_hi_map = pynini.string_file(get_abs_path("data/address/en_to_hi_mapping.tsv")) + if input_case != INPUT_LOWER_CASED: + en_to_hi_map = capitalized_input_graph(en_to_hi_map) + address_keywords_en = pynini.project(en_to_hi_map, "input") + address_keywords = pynini.union(address_keywords_hi, address_keywords_en) + + # Alphanumeric processing: treat digits, letters, and -/ as convertible tokens + single_digit = pynini.union(NEMO_DIGIT, NEMO_HI_DIGIT).optimize() + special_chars = pynini.union(HYPHEN, SLASH).optimize() + single_letter = pynini.project(letter_to_word, "input").optimize() + convertible_char = pynini.union(single_digit, special_chars, single_letter) + non_space_char = pynini.difference( + NEMO_CHAR, pynini.union(NEMO_WHITE_SPACE, convertible_char, pynini.accep(COMMA)) + ).optimize() + + # Token processors with weights: prefer ordinals and known English→Hindi words + # Delete space before comma to avoid Sparrowhawk "sil" issue + comma_processor = pynutil.add_weight(delete_space + pynini.accep(COMMA), 0.0) + ordinal_processor = pynutil.add_weight(insert_space + ordinal_graph, -5.0) + english_word_processor = pynutil.add_weight(insert_space + en_to_hi_map, -3.0) + letter_processor = pynutil.add_weight(insert_space + pynini.compose(single_letter, letter_to_word), 0.5) + digit_char_processor = pynutil.add_weight(insert_space + pynini.compose(convertible_char, char_to_word), 0.0) + other_word_processor = pynutil.add_weight(insert_space + pynini.closure(non_space_char, 1), 0.1) + + token_processor = ( + ordinal_processor + | english_word_processor + | letter_processor + | digit_char_processor + | pynini.accep(NEMO_SPACE) + | comma_processor + | other_word_processor + ).optimize() + full_string_processor = pynini.closure(token_processor, 1).optimize() + + # Window-based context matching around address keywords for robust detection + word_boundary = pynini.union( + NEMO_WHITE_SPACE, pynini.accep(COMMA), pynini.accep(HI_PERIOD), pynini.accep(PERIOD) + ).optimize() + non_boundary_char = pynini.difference(NEMO_CHAR, word_boundary) + word = pynini.closure(non_boundary_char, 1).optimize() + word_with_boundary = word + pynini.closure(word_boundary) + window = pynini.closure(word_with_boundary, 0, 5).optimize() + boundary = pynini.closure(word_boundary, 1).optimize() + input_pattern = pynini.union( + address_keywords + boundary + window, + window + boundary + address_keywords + pynini.closure(boundary + window, 0, 1), + ).optimize() + address_graph = pynini.compose(input_pattern, full_string_processor).optimize() + graph = ( + pynutil.insert('units: "address" cardinal { integer: "') + + address_graph + + pynutil.insert('" } preserve_order: true') + ) + return pynutil.add_weight(graph, 1.05).optimize() + + def __init__(self, cardinal: GraphFst, decimal: GraphFst, ordinal: GraphFst, input_case: str): super().__init__(name="measure", kind="classify") cardinal_graph = ( @@ -107,12 +255,11 @@ def __init__(self, cardinal: GraphFst, decimal: GraphFst): + pynutil.insert(NEMO_SPACE) ) - # Handling symbols like x, X, * symbol_graph = pynini.string_map( [ - ("x", "बाई"), - ("X", "बाई"), - ("*", "बाई"), + (LOWERCASE_X, HI_BY), + (UPPERCASE_X, HI_BY), + (ASTERISK, HI_BY), ] ) @@ -229,6 +376,9 @@ def __init__(self, cardinal: GraphFst, decimal: GraphFst): + pynutil.insert("\"") ) + address_graph = self.get_address_graph(ordinal, input_case) + structured_address_graph = self.get_structured_address_graph(ordinal, input_case) + graph = ( pynutil.add_weight(graph_decimal, 0.1) | pynutil.add_weight(graph_cardinal, 0.1) @@ -237,6 +387,8 @@ def __init__(self, cardinal: GraphFst, decimal: GraphFst): | pynutil.add_weight(graph_savva, -0.1) | pynutil.add_weight(graph_sadhe, -0.1) | pynutil.add_weight(graph_paune, -0.5) + | address_graph + | structured_address_graph ) self.graph = graph.optimize() diff --git a/nemo_text_processing/text_normalization/hi/taggers/ordinal.py b/nemo_text_processing/text_normalization/hi/taggers/ordinal.py index 5f1cefed4..b07c31392 100644 --- a/nemo_text_processing/text_normalization/hi/taggers/ordinal.py +++ b/nemo_text_processing/text_normalization/hi/taggers/ordinal.py @@ -15,7 +15,7 @@ import pynini from pynini.lib import pynutil -from nemo_text_processing.text_normalization.hi.graph_utils import GraphFst +from nemo_text_processing.text_normalization.hi.graph_utils import NEMO_CHAR, GraphFst from nemo_text_processing.text_normalization.hi.taggers.cardinal import CardinalFst from nemo_text_processing.text_normalization.hi.utils import get_abs_path @@ -39,11 +39,27 @@ def __init__(self, cardinal: CardinalFst, deterministic: bool = True): suffixes_fst = pynini.union(suffixes_list, suffixes_map) exceptions = pynini.string_file(get_abs_path("data/ordinal/exceptions.tsv")) - graph = cardinal.final_graph + suffixes_fst + en_to_hi_digits = pynini.string_file(get_abs_path("data/ordinal/en_to_hi_digit.tsv")) + digit_normalizer = pynini.cdrewrite(en_to_hi_digits, "", "", pynini.closure(NEMO_CHAR)) + + # Limit cardinal graph to thousands range for faster compilation + limited_cardinal_graph = ( + cardinal.digit + | cardinal.zero + | cardinal.teens_and_ties + | cardinal.graph_hundreds + | cardinal.graph_thousands + | cardinal.graph_ten_thousands + ).optimize() + + graph = limited_cardinal_graph + suffixes_fst exceptions = pynutil.add_weight(exceptions, -0.1) graph = pynini.union(exceptions, graph) - final_graph = pynutil.insert("integer: \"") + graph + pynutil.insert("\"") + graph_with_normalization = pynini.compose(digit_normalizer, graph) + self.graph = graph_with_normalization.optimize() + + final_graph = pynutil.insert("integer: \"") + graph_with_normalization + pynutil.insert("\"") final_graph = self.add_tokens(final_graph) self.fst = final_graph.optimize() diff --git a/nemo_text_processing/text_normalization/hi/taggers/tokenize_and_classify.py b/nemo_text_processing/text_normalization/hi/taggers/tokenize_and_classify.py index e3e6fc5d8..cb03ebce6 100644 --- a/nemo_text_processing/text_normalization/hi/taggers/tokenize_and_classify.py +++ b/nemo_text_processing/text_normalization/hi/taggers/tokenize_and_classify.py @@ -94,15 +94,15 @@ def __init__( timefst = TimeFst(cardinal=cardinal) time_graph = timefst.fst - measure = MeasureFst(cardinal=cardinal, decimal=decimal) + ordinal = OrdinalFst(cardinal=cardinal, deterministic=deterministic) + ordinal_graph = ordinal.fst + + measure = MeasureFst(cardinal=cardinal, decimal=decimal, ordinal=ordinal, input_case=input_case) measure_graph = measure.fst money = MoneyFst(cardinal=cardinal) money_graph = money.fst - ordinal = OrdinalFst(cardinal=cardinal, deterministic=deterministic) - ordinal_graph = ordinal.fst - whitelist_graph = WhiteListFst( input_case=input_case, deterministic=deterministic, input_file=whitelist ).fst diff --git a/nemo_text_processing/text_normalization/hi/verbalizers/measure.py b/nemo_text_processing/text_normalization/hi/verbalizers/measure.py index d6d17ac37..cba08057d 100644 --- a/nemo_text_processing/text_normalization/hi/verbalizers/measure.py +++ b/nemo_text_processing/text_normalization/hi/verbalizers/measure.py @@ -27,7 +27,7 @@ class MeasureFst(GraphFst): Args: decimal: DecimalFst - cardinal: CardinalFs + cardinal: CardinalFst deterministic: if True will provide a single transduction option, for False multiple transduction are generated (used for audio-based normalization) """ @@ -41,7 +41,12 @@ def __init__(self, cardinal: GraphFst, decimal: GraphFst): 1, ) - unit = pynutil.delete("units: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"") + delete_space + unit = ( + pynutil.delete("units: \"") + + pynini.difference(pynini.closure(NEMO_NOT_QUOTE, 1), pynini.accep("address")) + + pynutil.delete("\"") + + delete_space + ) graph_decimal = ( pynutil.delete("decimal {") @@ -64,6 +69,18 @@ def __init__(self, cardinal: GraphFst, decimal: GraphFst): ) graph = (graph_cardinal | graph_decimal) + delete_space + insert_space + unit + + preserve_order = pynutil.delete("preserve_order:") + delete_space + pynutil.delete("true") + delete_space + address = ( + pynutil.delete("units: \"address\" ") + + delete_space + + graph_cardinal + + delete_space + + pynini.closure(preserve_order) + ) + + graph |= address + self.decimal = graph_decimal delete_tokens = self.delete_tokens(graph) self.fst = delete_tokens.optimize() diff --git a/nemo_text_processing/text_normalization/hi/verbalizers/post_processing.py b/nemo_text_processing/text_normalization/hi/verbalizers/post_processing.py index d838ca6ff..595180241 100644 --- a/nemo_text_processing/text_normalization/hi/verbalizers/post_processing.py +++ b/nemo_text_processing/text_normalization/hi/verbalizers/post_processing.py @@ -16,13 +16,15 @@ import os import pynini +from pynini.lib import pynutil -from nemo_text_processing.text_normalization.en.graph_utils import ( - NEMO_NOT_SPACE, +from nemo_text_processing.text_normalization.hi.graph_utils import ( + MIN_NEG_WEIGHT, + NEMO_CHAR, NEMO_SIGMA, - delete_space, generator_main, ) +from nemo_text_processing.text_normalization.hi.taggers.punctuation import PunctuationFst from nemo_text_processing.utils.logging import logger @@ -46,68 +48,49 @@ def __init__(self, cache_dir: str = None, overwrite_cache: bool = False): self.fst = pynini.Far(far_file, mode="r")["post_process_graph"] logger.info(f'Post processing graph was restored from {far_file}.') else: - self.set_punct_dict() self.fst = self.get_punct_postprocess_graph() if far_file: generator_main(far_file, {"post_process_graph": self.fst}) - def set_punct_dict(self): - self.punct_marks = { - "'": [ - "'", - '´', - 'ʹ', - 'ʻ', - 'ʼ', - 'ʽ', - 'ʾ', - 'ˈ', - 'ˊ', - 'ˋ', - '˴', - 'ʹ', - '΄', - '՚', - '՝', - 'י', - '׳', - 'ߴ', - 'ߵ', - 'ᑊ', - 'ᛌ', - '᾽', - '᾿', - '`', - '´', - '῾', - '‘', - '’', - '‛', - '′', - '‵', - 'ꞌ', - ''', - '`', - '𖽑', - '𖽒', - ], - } - def get_punct_postprocess_graph(self): """ Returns graph to post process punctuation marks. - {``} quotes are converted to {"}. Note, if there are spaces around single quote {'}, they will be kept. - By default, a space is added after a punctuation mark, and spaces are removed before punctuation marks. + By default, spaces are removed before punctuation marks like comma, period, etc. """ - - remove_space_around_single_quote = pynini.cdrewrite( - delete_space, NEMO_NOT_SPACE, NEMO_NOT_SPACE, pynini.closure(NEMO_SIGMA) + punct_marks_all = PunctuationFst().punct_marks + + # Punctuation marks that should NOT have space before them + # (most punctuation except quotes, dashes, and opening brackets) + quotes = ["'", "\"", "«"] + dashes = ["-", "—"] + brackets = ["<", "{", "(", r"\["] + allow_space_before_punct = quotes + dashes + brackets + + no_space_before_punct = [m for m in punct_marks_all if m not in allow_space_before_punct] + # Add Hindi-specific punctuation + no_space_before_punct.extend(["।", ",", ".", ";", ":", "!", "?"]) + # Remove duplicates + no_space_before_punct = list(set(no_space_before_punct)) + no_space_before_punct = pynini.union(*no_space_before_punct) + + delete_space = pynutil.delete(" ") + + # Delete space before no_space_before_punct marks + non_punct = pynini.difference(NEMO_CHAR, no_space_before_punct).optimize() + graph = ( + pynini.closure(non_punct) + + pynini.closure( + no_space_before_punct | pynutil.add_weight(delete_space + no_space_before_punct, MIN_NEG_WEIGHT) + ) + + pynini.closure(non_punct) ) - # this works if spaces in between (good) - # delete space between 2 NEMO_NOT_SPACE(left and right to the space) that are with in a content of NEMO_SIGMA + graph = pynini.closure(graph).optimize() - graph = remove_space_around_single_quote.optimize() + # Remove space after opening brackets + no_space_after_punct = pynini.union(*brackets) + no_space_after_punct = pynini.cdrewrite(delete_space, no_space_after_punct, NEMO_SIGMA, NEMO_SIGMA).optimize() + graph = pynini.compose(graph, no_space_after_punct).optimize() return graph diff --git a/nemo_text_processing/text_normalization/hi/verbalizers/verbalize.py b/nemo_text_processing/text_normalization/hi/verbalizers/verbalize.py index 12ae316b1..30d076c93 100644 --- a/nemo_text_processing/text_normalization/hi/verbalizers/verbalize.py +++ b/nemo_text_processing/text_normalization/hi/verbalizers/verbalize.py @@ -54,6 +54,9 @@ def __init__(self, deterministic: bool = True): time = TimeFst(cardinal=cardinal) time_graph = time.fst + ordinal = OrdinalFst(deterministic=deterministic) + ordinal_graph = ordinal.fst + measure = MeasureFst(cardinal=cardinal, decimal=decimal) measure_graph = measure.fst @@ -62,8 +65,6 @@ def __init__(self, deterministic: bool = True): telephone = TelephoneFst() telephone_graph = telephone.fst - ordinal = OrdinalFst(deterministic=deterministic) - ordinal_graph = ordinal.fst whitelist_graph = WhiteListFst(deterministic=deterministic).fst diff --git a/nemo_text_processing/text_normalization/normalize.py b/nemo_text_processing/text_normalization/normalize.py index 82f8f43d2..73263f454 100644 --- a/nemo_text_processing/text_normalization/normalize.py +++ b/nemo_text_processing/text_normalization/normalize.py @@ -161,7 +161,11 @@ def __init__( from nemo_text_processing.text_normalization.ar.verbalizers.verbalize_final import VerbalizeFinalFst elif lang == 'hi': from nemo_text_processing.text_normalization.hi.taggers.tokenize_and_classify import ClassifyFst + from nemo_text_processing.text_normalization.hi.verbalizers.post_processing import PostProcessingFst from nemo_text_processing.text_normalization.hi.verbalizers.verbalize_final import VerbalizeFinalFst + + if post_process: + self.post_processor = PostProcessingFst(cache_dir=cache_dir, overwrite_cache=overwrite_cache) elif lang == 'it': from nemo_text_processing.text_normalization.it.taggers.tokenize_and_classify import ClassifyFst from nemo_text_processing.text_normalization.it.verbalizers.verbalize_final import VerbalizeFinalFst @@ -374,7 +378,7 @@ def normalize( return text output = SPACE_DUP.sub(' ', output[1:]) - if self.lang == "en" and hasattr(self, 'post_processor'): + if self.lang in ["en", "hi"] and hasattr(self, 'post_processor') and self.post_processor is not None: output = self.post_process(output) if punct_post_process: diff --git a/tests/nemo_text_processing/hi/data_text_normalization/test_cases_address.txt b/tests/nemo_text_processing/hi/data_text_normalization/test_cases_address.txt new file mode 100644 index 000000000..788a8efdc --- /dev/null +++ b/tests/nemo_text_processing/hi/data_text_normalization/test_cases_address.txt @@ -0,0 +1,55 @@ +७०० ओक स्ट्रीट~सात शून्य शून्य ओक स्ट्रीट +११ जंगल रोड~एक एक जंगल रोड +३०१ पार्क एवेन्यू~तीन शून्य एक पार्क एवेन्यू +गली नंबर १७ जीएकगढ़~गली नंबर एक सात जीएकगढ़ +अदनान अपार्टमेंट फ्लैट नंबर ५५~अदनान अपार्टमेंट फ्लैट नंबर पाँच पाँच +प्लॉट नंबर ८ बालाजी मार्केट~प्लॉट नंबर आठ बालाजी मार्केट +शॉप नंबर १०९ ९ और १० डिवाइडिंग रोड सेक्टर १० फरीदाबाद~शॉप नंबर एक शून्य नौ नौ और एक शून्य डिवाइडिंग रोड सेक्टर एक शून्य फरीदाबाद +बूथ ७०, सेक्टर ८, चंडीगढ़~बूथ सात शून्य, सेक्टर आठ, चंडीगढ़ +२२२१ Southern Street~दो दो दो एक सदर्न स्ट्रीट +७०० ओक स्ट्रीट~सात शून्य शून्य ओक स्ट्रीट +६२५ स्कूल स्ट्रीट~छह दो पाँच स्कूल स्ट्रीट +१४७० एस वाशिंगटन स्ट्रीट~एक चार सात शून्य एस वाशिंगटन स्ट्रीट +५०६ स्टेट रोड~पाँच शून्य छह स्टेट रोड +६६-४ पार्कहर्स्ट आर डी~छह छह हाइफ़न चार पार्कहर्स्ट आर डी +५७९ ट्रॉय-शेंक्टाडी रोड~पाँच सात नौ ट्रॉय हाइफ़न शेंक्टाडी रोड +७८३० - ई वेटरन्स पार्कवे, कोलंबस, जी ए ३१९०९~सात आठ तीन शून्य हाइफ़न ई वेटरन्स पार्कवे, कोलंबस, जी ए तीन एक नौ शून्य नौ +६६-४, पार्कहर्स्ट रोड~छह छह हाइफ़न चार, पार्कहर्स्ट रोड +८४०/१, १०० फीट रोड, मेट्रो पिलर ५६-५७, इंदिरानगर, बैंगलोर~आठ चार शून्य बटा एक, एक शून्य शून्य फीट रोड, मेट्रो पिलर पाँच छह हाइफ़न पाँच सात, इंदिरानगर, बैंगलोर +१७-१८, राजलक्ष्मी नगर, ७th क्रॉस स्ट्रीट, १०० फीट बाईपास रोड, वेलाचेरी, चेन्नई~एक सात हाइफ़न एक आठ, राजलक्ष्मी नगर, सेवंथ क्रॉस स्ट्रीट, एक शून्य शून्य फीट बाईपास रोड, वेलाचेरी, चेन्नई +४/५ न्यू म्युनिसिपल मार्केट रोड नंबर ५ और ६ सेन्टाक्रूज़ वेस्ट~चार बटा पाँच न्यू म्युनिसिपल मार्केट रोड नंबर पाँच और छह सेन्टाक्रूज़ वेस्ट +१६/१७ ४th फ्लोर जवाहर नगर मटरू मंदिर रोड नंबर २~एक छह बटा एक सात फ़ोर्थ फ्लोर जवाहर नगर मटरू मंदिर रोड नंबर दो +५/३०४ सिक्का कॉम्प्लेक्स विकास मार्ग एक्सटेंशन~पाँच बटा तीन शून्य चार सिक्का कॉम्प्लेक्स विकास मार्ग एक्सटेंशन +२१/२ २nd फ्लोर १st मेन रोड गांधी नगर~दो एक बटा दो सेकंड फ्लोर फ़र्स्ट मेन रोड गांधी नगर +नंबर २२/१८ ३rd फ्लोर सराय बोउ अली शू मार्केट~नंबर दो दो बटा एक आठ थर्ड फ्लोर सराय बोउ अली शू मार्केट +१४/३, मथुरा रोड~एक चार बटा तीन, मथुरा रोड +यूनिट ३ १st फ्लोर नंबर ३७ सोलेमान खतर स्ट्रीट~यूनिट तीन फ़र्स्ट फ्लोर नंबर तीन सात सोलेमान खतर स्ट्रीट +१st फ्लोर नंबर ५२ नॉर्थ अबूज़र स्ट्रीट खान ए अंसारी स्ट्रीट शरीयती स्ट्रीट १६६१७~फ़र्स्ट फ्लोर नंबर पाँच दो नॉर्थ अबूज़र स्ट्रीट खान ए अंसारी स्ट्रीट शरीयती स्ट्रीट एक छह छह एक सात +२०६ जय कॉम कॉम्प्लेक्स १st पोखरन रोड~दो शून्य छह जय कॉम कॉम्प्लेक्स फ़र्स्ट पोखरन रोड +नंबर ३६ २nd फ्लोर सुपर ८ फेज १ एकबतन टाउन तेहरान १३९४७~नंबर तीन छह सेकंड फ्लोर सुपर आठ फेज एक एकबतन टाउन तेहरान एक तीन नौ चार सात +२nd फ्लोर नंबर ८०८ आजादी स्ट्रीट~सेकंड फ्लोर नंबर आठ शून्य आठ आजादी स्ट्रीट +२nd फ्लोर नंबर १५ बिफ़ोर कांदि स्ट्रीट नॉर्थ सोहरावर्दी स्ट्रीट १५६६९~सेकंड फ्लोर नंबर एक पाँच बिफ़ोर कांदि स्ट्रीट नॉर्थ सोहरावर्दी स्ट्रीट एक पाँच छह छह नौ +यूनिट ४ नंबर २५ २nd गोलहा स्ट्रीट काशनी स्ट्रीट नूर स्क्वेर~यूनिट चार नंबर दो पाँच सेकंड गोलहा स्ट्रीट काशनी स्ट्रीट नूर स्क्वेर +ईस्ट ३rd फ्लोर नंबर ७० नेक्स्ट दो तोहीद इंस्टीट्यूट परचम स्ट्रीट~ईस्ट थर्ड फ्लोर नंबर सात शून्य नेक्स्ट दो तोहीद इंस्टीट्यूट परचम स्ट्रीट +३rd फ्लोर नंबर ५ हमेदन एली अपोज़िट लाले पार्क नॉर्थ कारगर स्ट्रीट~थर्ड फ्लोर नंबर पाँच हमेदन एली अपोज़िट लाले पार्क नॉर्थ कारगर स्ट्रीट +४th फ्लोर नंबर ११२४ जमहोरी स्ट्रीट~फ़ोर्थ फ्लोर नंबर एक एक दो चार जमहोरी स्ट्रीट +५th फ्लोर नंबर ७/१ १३th एली शाहिद अराबली स्ट्रीट~फ़िफ्थ फ्लोर नंबर सात बटा एक थर्टींथ एली शाहिद अराबली स्ट्रीट +११, ८० फीट रोड, इंडियन ऑयल पेट्रोल पंप, कोरमंगला ६th ब्लॉक, बैंगलोर के सामने~एक एक, आठ शून्य फीट रोड, इंडियन ऑयल पेट्रोल पंप, कोरमंगला सिक्स्थ ब्लॉक, बैंगलोर के सामने +२१/११, जे ब्लॉक, ६th एवेन्यू मेन रोड, अन्ना नगर पूर्व, चेन्नई~दो एक बटा एक एक, जे ब्लॉक, सिक्स्थ एवेन्यू मेन रोड, अन्ना नगर पूर्व, चेन्नई +३२A नाज़ प्लाज़ा मेरिस रोड~तीन दो ए नाज़ प्लाज़ा मेरिस रोड +२१४ बी गोविंद पूरी स्ट्रीट नंबर २~दो एक चार बी गोविंद पूरी स्ट्रीट नंबर दो +४३६२ १६वीं एवेन्यू एसडब्ल्यू, देवदार रैपिड्स, आई ए ५२४०४~चार तीन छह दो सोलहवीं एवेन्यू एसडब्ल्यू, देवदार रैपिड्स, आई ए बावन हज़ार चार सौ चार +अमरावती ६५५९३०~अमरावती छह पाँच पाँच नौ तीन शून्य +शिमला, हिमाचल प्रदेश ५९३९८८~शिमला, हिमाचल प्रदेश पाँच नौ तीन नौ आठ आठ +२७०४४० डॉसन आर डी, अल्बानी, जीए ३१७०७~दो सात शून्य चार चार शून्य डॉसन आर डी, अल्बानी, जीए तीन एक सात शून्य सात +रांची, झारखंड ७३६५५७~रांची, झारखंड सात तीन छह पाँच पाँच सात +कोहिमा, नागालैंड ४४८३७७~कोहिमा, नागालैंड चार चार आठ तीन सात सात +मुंबई, महाराष्ट्र ८३९४८८~मुंबई, महाराष्ट्र आठ तीन नौ चार आठ आठ +अमरावती ४६८२५२~अमरावती चार छह आठ दो पाँच दो +गांधीनगर, गुजरात ८०८३७४~गांधीनगर, गुजरात आठ शून्य आठ तीन सात चार +मुंबई, महाराष्ट्र २९०९३७~मुंबई, महाराष्ट्र दो नौ शून्य नौ तीन सात +श्रीनगर, जम्मू और कश्मीर ९६४५२३~श्रीनगर, जम्मू और कश्मीर नौ छह चार पाँच दो तीन +रायपुर, छत्तीसगढ़ ११०६३५~रायपुर, छत्तीसगढ़ एक एक शून्य छह तीन पाँच +भोपाल, मध्य प्रदेश ७५१२२५~भोपाल, मध्य प्रदेश सात पाँच एक दो दो पाँच +अगरतला, त्रिपुरा ९१५३०५~अगरतला, त्रिपुरा नौ एक पाँच तीन शून्य पाँच +लखनऊ, उत्तर प्रदेश ८०२४८१~लखनऊ, उत्तर प्रदेश आठ शून्य दो चार आठ एक diff --git a/tests/nemo_text_processing/hi/data_text_normalization/test_cases_ordinal.txt b/tests/nemo_text_processing/hi/data_text_normalization/test_cases_ordinal.txt index 9bdcab2a4..95184a603 100644 --- a/tests/nemo_text_processing/hi/data_text_normalization/test_cases_ordinal.txt +++ b/tests/nemo_text_processing/hi/data_text_normalization/test_cases_ordinal.txt @@ -59,4 +59,14 @@ १७८२वीं~सत्रह सौ बयासीवीं १८९०वां~एक हज़ार आठ सौ नब्बेवां १९८१वीं~उन्नीस सौ इक्यासीवीं -९८२६वीं~अट्ठानबे सौ छब्बीसवीं \ No newline at end of file +९८२६वीं~अट्ठानबे सौ छब्बीसवीं +1st~फ़र्स्ट +2nd~सेकंड +3rd~थर्ड +4th~फ़ोर्थ +5th~फ़िफ्थ +6th~सिक्स्थ +7th~सेवंथ +8th~एटथ +9th~नाइंथ +10th~टेंथ diff --git a/tests/nemo_text_processing/hi/test_address.py b/tests/nemo_text_processing/hi/test_address.py new file mode 100644 index 000000000..41b905f11 --- /dev/null +++ b/tests/nemo_text_processing/hi/test_address.py @@ -0,0 +1,33 @@ +# Copyright (c) 2026, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from parameterized import parameterized + +from nemo_text_processing.text_normalization.normalize import Normalizer + +from ..utils import CACHE_DIR, parse_test_case_file + + +class TestAddress: + normalizer = Normalizer( + input_case='cased', lang='hi', cache_dir=CACHE_DIR, overwrite_cache=False, post_process=True + ) + + @parameterized.expand(parse_test_case_file('hi/data_text_normalization/test_cases_address.txt')) + @pytest.mark.run_only_on('CPU') + @pytest.mark.unit + def test_norm(self, test_input, expected): + pred = self.normalizer.normalize(test_input, verbose=False, punct_post_process=True) + assert pred == expected diff --git a/tests/nemo_text_processing/hi/test_sparrowhawk_normalization.sh b/tests/nemo_text_processing/hi/test_sparrowhawk_normalization.sh index a0b0931e2..621383a8d 100644 --- a/tests/nemo_text_processing/hi/test_sparrowhawk_normalization.sh +++ b/tests/nemo_text_processing/hi/test_sparrowhawk_normalization.sh @@ -106,10 +106,10 @@ testTNWord() { runtest $input } -#testTNAddress() { -# input=$PROJECT_DIR/en/data_text_normalization/test_cases_address.txt -# runtest $input -#} +testTNAddress() { + input=$PROJECT_DIR/hi/data_text_normalization/test_cases_address.txt + runtest $input +} #testTNMath() { # input=$PROJECT_DIR/en/data_text_normalization/test_cases_math.txt