Skip to content
28 changes: 28 additions & 0 deletions Lib/test/test_tools/test_i18n.py
Original file line number Diff line number Diff line change
Expand Up @@ -447,6 +447,34 @@ def test_extract_all_comments(self):
'''), args=(arg,), raw=True)
self.assertIn('#. Translator comment', data)

def test_comments_with_multiple_tags(self):
"""
Test that multiple --add-comments tags can be specified.
"""
for arg in ('--add-comments={}', '-c{}'):
with self.subTest(arg=arg):
args = (arg.format('foo:'), arg.format('bar:'))
data = self.extract_from_str(dedent('''\
# foo: comment
_("foo")

# bar: comment
_("bar")
'''), args=args, raw=True)
self.assertIn('#. foo: comment', data)
self.assertIn('#. bar: comment', data)

def test_comments_not_extracted_without_tags(self):
"""
Test that translator comments are not extracted without
specifying --add-comments.
"""
data = self.extract_from_str(dedent('''\
# Translator comment
_("foo")
'''), raw=True)
self.assertNotIn('#.', data)


def update_POT_snapshots():
for input_file in DATA_DIR.glob('*.py'):
Expand Down
3 changes: 1 addition & 2 deletions Tools/i18n/pygettext.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,6 @@
import importlib.machinery
import importlib.util
import os
import re
import sys
import time
import tokenize
Expand Down Expand Up @@ -331,7 +330,7 @@ def get_source_comments(source):
for token in tokenize.tokenize(BytesIO(source).readline):
if token.type == tokenize.COMMENT:
# Remove any leading combination of '#' and whitespace
comment = re.sub(r'^[#\s]+', '', token.string)
comment = token.string.lstrip('# \t')
comments[token.start[0]] = comment
return comments

Expand Down
Loading