Skip to content

Commit 59e88be

Browse files
committed
Blacken files
1 parent 33ca9bd commit 59e88be

14 files changed

+65
-50
lines changed

docs/source/conf.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -17,12 +17,12 @@
1717

1818
# -- Project information -----------------------------------------------------
1919

20-
project = 'mathics-scanner'
21-
copyright = '2021, The Mathics Team'
22-
author = 'The Mathics Team'
20+
project = "mathics-scanner"
21+
copyright = "2021, The Mathics Team"
22+
author = "The Mathics Team"
2323

2424
# The full version, including alpha/beta/rc tags
25-
release = '1.0.1'
25+
release = "1.0.1"
2626

2727

2828
# -- General configuration ---------------------------------------------------
@@ -33,7 +33,7 @@
3333
extensions = ["sphinx.ext.autodoc"]
3434

3535
# Add any paths that contain templates here, relative to this directory.
36-
templates_path = ['_templates']
36+
templates_path = ["_templates"]
3737

3838
# List of patterns, relative to source directory, that match files and
3939
# directories to ignore when looking for source files.
@@ -46,9 +46,9 @@
4646
# The theme to use for HTML and HTML Help pages. See the documentation for
4747
# a list of builtin themes.
4848
#
49-
html_theme = 'alabaster'
49+
html_theme = "alabaster"
5050

5151
# Add any paths that contain custom static files (such as style sheets) here,
5252
# relative to this directory. They are copied after the builtin static files,
5353
# so a file named "default.css" will overwrite the builtin "default.css".
54-
html_static_path = ['_static']
54+
html_static_path = ["_static"]

mathics_scanner/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
replace_unicode_with_wl,
1515
replace_wl_with_plain_text,
1616
)
17+
1718
# TODO: Move is_symbol_name to the characters module
1819
from mathics_scanner.tokeniser import is_symbol_name, Tokeniser, Token
1920
from mathics_scanner.errors import (

mathics_scanner/characters.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,7 @@
5353
# ESC sequence aliases
5454
aliased_characters = _data["aliased-characters"]
5555

56+
5657
def replace_wl_with_plain_text(wl_input: str, use_unicode=True) -> str:
5758
"""
5859
The Wolfram Language uses specific Unicode characters to represent Wolfram
@@ -75,6 +76,7 @@ def replace_wl_with_plain_text(wl_input: str, use_unicode=True) -> str:
7576

7677
return r.sub(lambda m: d[m.group(0)], wl_input)
7778

79+
7880
def replace_unicode_with_wl(unicode_input: str) -> str:
7981
"""
8082
The Wolfram Language uses specific Unicode characters to represent Wolfram
@@ -91,6 +93,4 @@ def replace_unicode_with_wl(unicode_input: str) -> str:
9193
<https://reference.wolfram.com/language/guide/ListingOfNamedCharacters.html>`_
9294
and ``implementation.rst`` respectively.
9395
"""
94-
return _unicode_to_wl_re.sub(
95-
lambda m: _unicode_to_wl[m.group(0)], unicode_input
96-
)
96+
return _unicode_to_wl_re.sub(lambda m: _unicode_to_wl[m.group(0)], unicode_input)

mathics_scanner/generate/build_tables.py

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -129,9 +129,9 @@ def compile_tables(data: dict) -> dict:
129129
}
130130

131131
# Operators with ASCII sequences
132-
ascii_operators = sorted([
133-
v["ascii"] for v in data.values() if "operator-name" in v and "ascii" in v
134-
])
132+
ascii_operators = sorted(
133+
[v["ascii"] for v in data.values() if "operator-name" in v and "ascii" in v]
134+
)
135135

136136
# ESC sequence aliases
137137
aliased_characters = {
@@ -146,10 +146,13 @@ def compile_tables(data: dict) -> dict:
146146
}
147147

148148
# ESC sequence aliases
149-
unicode_operators = sorted([
150-
v["unicode-equivalent"] for v in data.values() if "operator-name" in v and "unicode-equivalent" in v
151-
])
152-
149+
unicode_operators = sorted(
150+
[
151+
v["unicode-equivalent"]
152+
for v in data.values()
153+
if "operator-name" in v and "unicode-equivalent" in v
154+
]
155+
)
153156

154157
# operator-to-unicode dictionary
155158
unicode_to_operator = {

mathics_scanner/generate/rl_inputrc.py

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,11 @@
1111
from mathics_scanner.characters import replace_wl_with_plain_text as r
1212
from mathics_scanner.characters import aliased_characters
1313

14+
1415
def _escape(s: str) -> str:
1516
"""Escapes special chracters in inputrc strings"""
16-
return s.replace("\\", "\\\\").replace("\"", "\\\"")
17+
return s.replace("\\", "\\\\").replace('"', '\\"')
18+
1719

1820
def _format(c: str, use_unicode: bool) -> str:
1921
"""Formats a single key-value pair"""
@@ -25,6 +27,7 @@ def _format(c: str, use_unicode: bool) -> str:
2527

2628
return f'"\\e{key}\\e": "{val}"\n'
2729

30+
2831
def generate_inputrc(fd=sys.stdout, use_unicode=True) -> None:
2932
"""
3033
Generates inputrc files that maps Wolfram Language ESC sequence aliases to
@@ -42,14 +45,18 @@ def usage():
4245
sys.stderr.write("usage: %s {inputrc-unicode | inputrc-no-unicode}\n" % sys.argv[0])
4346
sys.exit(1)
4447

48+
4549
if __name__ == "__main__":
4650
if len(sys.argv) < 2:
4751
usage()
4852

4953
if sys.argv[1] == "inputrc-unicode":
5054
default_encoding = sys.getdefaultencoding()
51-
if default_encoding != "utf-8":
52-
sys.stderr.write("sys.defaultencoding() is %s so we can't generate unicode output\n" % (default_encoding))
55+
if default_encoding != "utf-8":
56+
sys.stderr.write(
57+
"sys.defaultencoding() is %s so we can't generate unicode output\n"
58+
% (default_encoding)
59+
)
5360
sys.exit(2)
5461
generate_inputrc(use_unicode=True)
5562
elif sys.argv[1] == "inputrc-no-unicode":

mathics_scanner/load.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,11 +4,13 @@
44
import yaml
55
import json
66

7+
78
def load_mathics_character_yaml():
89
with open(DEFAULT_DATA_DIR / "named-characters.yml", "r") as yaml_file:
910
yaml_data = yaml.load(yaml_file, Loader=yaml.FullLoader)
1011
return yaml_data
1112

13+
1214
def load_mathics_character_json():
1315
with open(DEFAULT_DATA_DIR / "characters.json", "r") as json_file:
1416
json_data = json.load(json_file)

mathics_scanner/tokeniser.py

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
"""
3030
names_wildcards = "@*"
3131
base_names_pattern = r"((?![0-9])([0-9${0}{1}{2}])+)".format(
32-
_letters, _letterlikes, names_wildcards
32+
_letters, _letterlikes, names_wildcards
3333
)
3434
full_names_pattern = r"(`?{0}(`{0})*)".format(base_names_pattern)
3535

@@ -295,9 +295,7 @@ def compile_tokens(token_list):
295295
return [(tag, compile_pattern(pattern)) for tag, pattern in token_list]
296296

297297

298-
filename_tokens = [
299-
("Filename", filename_pattern),
300-
]
298+
filename_tokens = [("Filename", filename_pattern)]
301299

302300
token_indices = find_indices(literal_tokens)
303301
tokens = compile_tokens(tokens)
@@ -316,6 +314,7 @@ def is_symbol_name(text):
316314

317315
class Token(object):
318316
"A representation of a Wolfram Language token."
317+
319318
def __init__(self, tag, text, pos):
320319
"""
321320
:param tag: A string that indicates which type of token this is.
@@ -341,10 +340,8 @@ class Tokeniser(object):
341340
"""
342341
A tokeniser for the Wolfram Language.
343342
"""
344-
modes = {
345-
"expr": (tokens, token_indices),
346-
"filename": (filename_tokens, {}),
347-
}
343+
344+
modes = {"expr": (tokens, token_indices), "filename": (filename_tokens, {})}
348345

349346
def __init__(self, feeder):
350347
"""

mathics_scanner/version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,4 +4,4 @@
44
# This file is suitable for sourcing inside POSIX shell as
55
# well as importing into Python. That's why there is no
66
# space around "=" below.
7-
__version__="1.2.1.dev0" # noqa
7+
__version__ = "1.2.1.dev0" # noqa

setup.py

Lines changed: 5 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ def read(*rnames):
6262
# General Requirements
6363
INSTALL_REQUIRES = [
6464
"chardet", # Used in mathics_scanner.feed
65-
"PyYAML", # Used in mathics-generate-json-table
65+
"PyYAML", # Used in mathics-generate-json-table
6666
# "ujson", # Optional Used in mathics_scanner.characters
6767
"click", # Usin in CLI: mathics-generate-json-table
6868
]
@@ -74,9 +74,8 @@ def read(*rnames):
7474
requires = re.sub(r"([^#]+)(\s*#.*$)?", r"\1", line)
7575
extra_requires.append(requires)
7676

77-
EXTRA_REQUIRES = {
78-
"full": extra_requires
79-
}
77+
EXTRA_REQUIRES = {"full": extra_requires}
78+
8079

8180
def subdirs(root, file="*.*", depth=10):
8281
for k in range(depth):
@@ -86,10 +85,7 @@ def subdirs(root, file="*.*", depth=10):
8685
setup(
8786
name="Mathics_Scanner",
8887
version=__version__,
89-
packages=[
90-
"mathics_scanner",
91-
"mathics_scanner.generate",
92-
],
88+
packages=["mathics_scanner", "mathics_scanner.generate"],
9389
install_requires=INSTALL_REQUIRES,
9490
extra_requires=EXTRA_REQUIRES,
9591
entry_points={
@@ -98,11 +94,7 @@ def subdirs(root, file="*.*", depth=10):
9894
]
9995
},
10096
package_data={
101-
"mathics_scanner": [
102-
"data/*.csv",
103-
"data/*.json",
104-
"data/ExampleData/*",
105-
],
97+
"mathics_scanner": ["data/*.csv", "data/*.json", "data/ExampleData/*"]
10698
},
10799
long_description=long_description,
108100
long_description_content_type="text/x-rst",

test/test_has_unicode_inverse_sanity.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,5 +31,6 @@ def test_has_unicode_inverse_sanity():
3131
v["wl-unicode"] == wl
3232
and v.get("unicode-equivalent") == uni
3333
and v["has-unicode-inverse"]
34-
for v in yaml_data.values() if "wl-unicode" in v
34+
for v in yaml_data.values()
35+
if "wl-unicode" in v
3536
), f"key {uni} is in unicode-to-wl-dict but there is not corresponding entry in the YAML table"

0 commit comments

Comments
 (0)