Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion .github/workflows/tail-call.yml
Original file line number Diff line number Diff line change
Expand Up @@ -137,4 +137,3 @@ jobs:
CC=clang-20 ./configure --with-tail-call-interp --disable-gil
make all --jobs 4
./python -m test --multiprocess 0 --timeout 4500 --verbose2 --verbose3

8 changes: 5 additions & 3 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -43,12 +43,14 @@ repos:
exclude: ^Lib/test/test_tomllib/
- id: check-yaml
- id: end-of-file-fixer
types: [python]
types_or: [python, yaml]
exclude: Lib/test/tokenizedata/coding20731.py
- id: end-of-file-fixer
files: '^\.github/CODEOWNERS$'
- id: trailing-whitespace
types_or: [c, inc, python, rst]
types_or: [c, inc, python, rst, yaml]
- id: trailing-whitespace
files: '\.(gram)$'
files: '^\.github/CODEOWNERS|\.(gram)$'

- repo: https://github.com/python-jsonschema/check-jsonschema
rev: 0.33.0
Expand Down
1 change: 0 additions & 1 deletion .readthedocs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,4 +32,3 @@ build:
- make -C Doc venv html
- mkdir _readthedocs
- mv Doc/build/html _readthedocs/html

3 changes: 2 additions & 1 deletion Doc/reference/lexical_analysis.rst
Original file line number Diff line number Diff line change
Expand Up @@ -489,8 +489,9 @@ String literals are described by the following lexical definitions:

.. productionlist:: python-grammar
stringliteral: [`stringprefix`](`shortstring` | `longstring`)
stringprefix: "r" | "u" | "R" | "U" | "f" | "F"
stringprefix: "r" | "u" | "R" | "U" | "f" | "F" | "t" | "T"
: | "fr" | "Fr" | "fR" | "FR" | "rf" | "rF" | "Rf" | "RF"
: | "tr" | "Tr" | "tR" | "TR" | "rt" | "rT" | "Rt" | "RT"
shortstring: "'" `shortstringitem`* "'" | '"' `shortstringitem`* '"'
longstring: "'''" `longstringitem`* "'''" | '"""' `longstringitem`* '"""'
shortstringitem: `shortstringchar` | `stringescapeseq`
Expand Down
2 changes: 1 addition & 1 deletion Lib/test/support/interpreters/channels.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def list_all():
if not hasattr(send, '_unboundop'):
send._set_unbound(unboundop)
else:
assert send._unbound[0] == op
assert send._unbound[0] == unboundop
channels.append(chan)
return channels

Expand Down
25 changes: 16 additions & 9 deletions Lib/test/test__interpreters.py
Original file line number Diff line number Diff line change
Expand Up @@ -474,13 +474,15 @@ def setUp(self):

def test_signatures(self):
# See https://github.com/python/cpython/issues/126654
msg = "expected 'shared' to be a dict"
msg = r'_interpreters.exec\(\) argument 3 must be dict, not int'
with self.assertRaisesRegex(TypeError, msg):
_interpreters.exec(self.id, 'a', 1)
with self.assertRaisesRegex(TypeError, msg):
_interpreters.exec(self.id, 'a', shared=1)
msg = r'_interpreters.run_string\(\) argument 3 must be dict, not int'
with self.assertRaisesRegex(TypeError, msg):
_interpreters.run_string(self.id, 'a', shared=1)
msg = r'_interpreters.run_func\(\) argument 3 must be dict, not int'
with self.assertRaisesRegex(TypeError, msg):
_interpreters.run_func(self.id, lambda: None, shared=1)

Expand Down Expand Up @@ -952,7 +954,8 @@ def test_invalid_syntax(self):
""")

with self.subTest('script'):
self.assert_run_failed(SyntaxError, script)
with self.assertRaises(SyntaxError):
_interpreters.run_string(self.id, script)

with self.subTest('module'):
modname = 'spam_spam_spam'
Expand Down Expand Up @@ -1019,12 +1022,19 @@ def script():
with open(w, 'w', encoding="utf-8") as spipe:
with contextlib.redirect_stdout(spipe):
print('it worked!', end='')
failed = None
def f():
_interpreters.set___main___attrs(self.id, dict(w=w))
_interpreters.run_func(self.id, script)
nonlocal failed
try:
_interpreters.set___main___attrs(self.id, dict(w=w))
_interpreters.run_func(self.id, script)
except Exception as exc:
failed = exc
t = threading.Thread(target=f)
t.start()
t.join()
if failed:
raise Exception from failed

with open(r, encoding="utf-8") as outfile:
out = outfile.read()
Expand Down Expand Up @@ -1053,19 +1063,16 @@ def test_closure(self):
spam = True
def script():
assert spam

with self.assertRaises(TypeError):
with self.assertRaises(ValueError):
_interpreters.run_func(self.id, script)

# XXX This hasn't been fixed yet.
@unittest.expectedFailure
def test_return_value(self):
def script():
return 'spam'
with self.assertRaises(ValueError):
_interpreters.run_func(self.id, script)

@unittest.skip("we're not quite there yet")
# @unittest.skip("we're not quite there yet")
def test_args(self):
with self.subTest('args'):
def script(a, b=0):
Expand Down
23 changes: 15 additions & 8 deletions Lib/test/test_interpreters/test_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -839,9 +839,16 @@ def test_bad_script(self):
interp.exec(10)

def test_bytes_for_script(self):
r, w = self.pipe()
RAN = b'R'
DONE = b'D'
interp = interpreters.create()
with self.assertRaises(TypeError):
interp.exec(b'print("spam")')
interp.exec(f"""if True:
import os
os.write({w}, {RAN!r})
""")
os.write(w, DONE)
self.assertEqual(os.read(r, 1), RAN)

def test_with_background_threads_still_running(self):
r_interp, w_interp = self.pipe()
Expand Down Expand Up @@ -1010,8 +1017,6 @@ def test_call(self):

for i, (callable, args, kwargs) in enumerate([
(call_func_noop, (), {}),
(call_func_return_shareable, (), {}),
(call_func_return_not_shareable, (), {}),
(Spam.noop, (), {}),
]):
with self.subTest(f'success case #{i+1}'):
Expand All @@ -1036,6 +1041,8 @@ def test_call(self):
(call_func_complex, ('custom', 'spam!'), {}),
(call_func_complex, ('custom-inner', 'eggs!'), {}),
(call_func_complex, ('???',), {'exc': ValueError('spam')}),
(call_func_return_shareable, (), {}),
(call_func_return_not_shareable, (), {}),
]):
with self.subTest(f'invalid case #{i+1}'):
with self.assertRaises(Exception):
Expand All @@ -1051,8 +1058,6 @@ def test_call_in_thread(self):

for i, (callable, args, kwargs) in enumerate([
(call_func_noop, (), {}),
(call_func_return_shareable, (), {}),
(call_func_return_not_shareable, (), {}),
(Spam.noop, (), {}),
]):
with self.subTest(f'success case #{i+1}'):
Expand All @@ -1079,6 +1084,8 @@ def test_call_in_thread(self):
(call_func_complex, ('custom', 'spam!'), {}),
(call_func_complex, ('custom-inner', 'eggs!'), {}),
(call_func_complex, ('???',), {'exc': ValueError('spam')}),
(call_func_return_shareable, (), {}),
(call_func_return_not_shareable, (), {}),
]):
with self.subTest(f'invalid case #{i+1}'):
if args or kwargs:
Expand Down Expand Up @@ -1618,8 +1625,8 @@ def test_exec(self):
def test_call(self):
with self.subTest('no args'):
interpid = _interpreters.create()
exc = _interpreters.call(interpid, call_func_return_shareable)
self.assertIs(exc, None)
with self.assertRaises(ValueError):
_interpreters.call(interpid, call_func_return_shareable)

with self.subTest('uncaught exception'):
interpid = _interpreters.create()
Expand Down
56 changes: 56 additions & 0 deletions Lib/test/test_tokenize.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import contextlib
import itertools
import os
import re
import string
import tempfile
import token
import tokenize
Expand Down Expand Up @@ -3238,5 +3240,59 @@ def test_exact_flag(self):
self.check_output(source, expect, flag)


class StringPrefixTest(unittest.TestCase):
def test_prefixes(self):
# Get the list of defined string prefixes. I don't see an
# obvious documented way of doing this, but probably the best
# thing is to split apart tokenize.StringPrefix.

# Make sure StringPrefix begins and ends in parens.
self.assertEqual(tokenize.StringPrefix[0], '(')
self.assertEqual(tokenize.StringPrefix[-1], ')')

# Then split apart everything else by '|'.
defined_prefixes = set(tokenize.StringPrefix[1:-1].split('|'))

# Now compute the actual string prefixes, by exec-ing all
# valid prefix combinations, followed by an empty string.

# Try all prefix lengths until we find a length that has zero
# valid prefixes. This will miss the case where for example
# there are no valid 3 character prefixes, but there are valid
# 4 character prefixes. That seems extremely unlikely.

# Note that the empty prefix is being included, because length
# starts at 0. That's expected, since StringPrefix includes
# the empty prefix.

valid_prefixes = set()
for length in itertools.count():
num_at_this_length = 0
for prefix in (
"".join(l) for l in list(itertools.combinations(string.ascii_lowercase, length))
):
for t in itertools.permutations(prefix):
for u in itertools.product(*[(c, c.upper()) for c in t]):
p = ''.join(u)
if p == "not":
# 'not' can never be a string prefix,
# because it's a valid expression: not ""
continue
try:
eval(f'{p}""')

# No syntax error, so p is a valid string
# prefix.

valid_prefixes.add(p)
num_at_this_length += 1
except SyntaxError:
pass
if num_at_this_length == 0:
break

self.assertEqual(defined_prefixes, valid_prefixes)


if __name__ == "__main__":
unittest.main()
2 changes: 1 addition & 1 deletion Lib/tokenize.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def _all_string_prefixes():
# The valid string prefixes. Only contain the lower case versions,
# and don't contain any permutations (include 'fr', but not
# 'rf'). The various permutations will be generated.
_valid_string_prefixes = ['b', 'r', 'u', 'f', 'br', 'fr']
_valid_string_prefixes = ['b', 'r', 'u', 'f', 't', 'br', 'fr', 'tr']
# if we add binary f-strings, add: ['fb', 'fbr']
result = {''}
for prefix in _valid_string_prefixes:
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Fix performance regression in calling a :mod:`ctypes` function pointer in :term:`free threading`.
Loading
Loading