Skip to content

Commit cbfff7d

Browse files
committed
Standardise whitespace and shebang in Python scripts
1 parent af13225 commit cbfff7d

8 files changed

+377
-365
lines changed

scripts/create_output_supplemental_data.py

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -6,24 +6,24 @@
66
import re
77

88
def get_release_version(doxyfile_path):
9-
version = "unknown"
10-
with open(doxyfile_path) as f:
11-
doxy_content = f.read()
12-
version_search = re.search(r"(\nPROJECT_NUMBER\s*=\s*)([\d.]+)", doxy_content)
13-
if version_search is not None:
14-
version = version_search.group(2)
15-
return version
9+
version = "unknown"
10+
with open(doxyfile_path) as f:
11+
doxy_content = f.read()
12+
version_search = re.search(r"(\nPROJECT_NUMBER\s*=\s*)([\d.]+)", doxy_content)
13+
if version_search is not None:
14+
version = version_search.group(2)
15+
return version
1616

1717
def write_new_data_file(output_json_file, data_obj):
18-
f = open(output_json_file, 'w')
19-
f.write(json.dumps(data_obj))
20-
f.close()
18+
f = open(output_json_file, 'w')
19+
f.write(json.dumps(data_obj))
20+
f.close()
2121

2222
if __name__ == "__main__":
23-
# read the doxygen config file
24-
doxyfile_path = sys.argv[1]
25-
# output the new data file
26-
output_json_file = sys.argv[2]
27-
version = get_release_version(doxyfile_path)
28-
data_obj = {"pico_sdk_release": version}
29-
write_new_data_file(output_json_file, data_obj)
23+
# read the doxygen config file
24+
doxyfile_path = sys.argv[1]
25+
# output the new data file
26+
output_json_file = sys.argv[2]
27+
version = get_release_version(doxyfile_path)
28+
data_obj = {"pico_sdk_release": version}
29+
write_new_data_file(output_json_file, data_obj)
Lines changed: 133 additions & 131 deletions
Original file line numberDiff line numberDiff line change
@@ -1,149 +1,151 @@
1+
#!/usr/bin/env python3
2+
13
import re
24
import sys
35
import os
46
import json
57

68
def cleanup_text_page(adoc_file, output_adoc_path, link_targets):
7-
filename = os.path.basename(adoc_file)
8-
with open(adoc_file) as f:
9-
adoc_content = f.read()
10-
# remove any errant spaces before anchors
11-
adoc_content = re.sub(r'( +)(\[\[[^[]*?\]\])', "\\2", adoc_content)
12-
# collect link targets
13-
for line in adoc_content.split('\n'):
14-
link_targets = collect_link_target(line, filename)
15-
with open(adoc_file, 'w') as f:
16-
f.write(adoc_content)
17-
return link_targets
9+
filename = os.path.basename(adoc_file)
10+
with open(adoc_file) as f:
11+
adoc_content = f.read()
12+
# remove any errant spaces before anchors
13+
adoc_content = re.sub(r'( +)(\[\[[^[]*?\]\])', "\\2", adoc_content)
14+
# collect link targets
15+
for line in adoc_content.split('\n'):
16+
link_targets = collect_link_target(line, filename)
17+
with open(adoc_file, 'w') as f:
18+
f.write(adoc_content)
19+
return link_targets
1820

1921
def collect_link_target(line, chapter_filename):
20-
# collect a list of all link targets, so we can fix internal links
21-
l = re.search(r'(#)([^,\]]+)([,\]])', line)
22-
if l is not None:
23-
link_targets[l.group(2)] = chapter_filename
24-
return link_targets
22+
# collect a list of all link targets, so we can fix internal links
23+
l = re.search(r'(#)([^,\]]+)([,\]])', line)
24+
if l is not None:
25+
link_targets[l.group(2)] = chapter_filename
26+
return link_targets
2527

2628
def resolve_links(adoc_file, link_targets):
27-
filename = os.path.basename(adoc_file)
28-
with open(adoc_file) as f:
29-
adoc_content = f.read()
30-
output_content = []
31-
for line in adoc_content.split('\n'):
32-
# e.g., <<examples_page,here>>
33-
m = re.search("(<<)([^,]+)(,?[^>]*>>)", line)
34-
if m is not None:
35-
target = m.group(2)
36-
# only resolve link if it points to another file
37-
if target in link_targets and link_targets[target] != filename:
38-
new_target = link_targets[target]+"#"+target
39-
line = re.sub("(<<)([^,]+)(,?[^>]*>>)", f"\\1{new_target}\\3", line)
40-
output_content.append(line)
41-
with open(adoc_file, 'w') as f:
42-
f.write('\n'.join(output_content))
43-
return
29+
filename = os.path.basename(adoc_file)
30+
with open(adoc_file) as f:
31+
adoc_content = f.read()
32+
output_content = []
33+
for line in adoc_content.split('\n'):
34+
# e.g., <<examples_page,here>>
35+
m = re.search("(<<)([^,]+)(,?[^>]*>>)", line)
36+
if m is not None:
37+
target = m.group(2)
38+
# only resolve link if it points to another file
39+
if target in link_targets and link_targets[target] != filename:
40+
new_target = link_targets[target]+"#"+target
41+
line = re.sub("(<<)([^,]+)(,?[^>]*>>)", f"\\1{new_target}\\3", line)
42+
output_content.append(line)
43+
with open(adoc_file, 'w') as f:
44+
f.write('\n'.join(output_content))
45+
return
4446

4547
def build_json(sections, output_path):
46-
json_path = os.path.join(output_path, "picosdk_index.json")
47-
with open(json_path, 'w') as f:
48-
f.write(json.dumps(sections, indent="\t"))
49-
return
48+
json_path = os.path.join(output_path, "picosdk_index.json")
49+
with open(json_path, 'w') as f:
50+
f.write(json.dumps(sections, indent="\t"))
51+
return
5052

5153
def tag_content(adoc_content):
52-
# this is dependent on the same order of attributes every time
53-
ids_to_tag = re.findall(r'(\[#)(.*?)(,.*?contextspecific,tag=)(.*?)(,type=)(.*?)(\])', adoc_content)
54-
for this_id in ids_to_tag:
55-
tag = re.sub("PICO_", "", this_id[3])
56-
img = f" [.contexttag {tag}]*{tag}*"
57-
# `void <<group_hardware_gpio_1ga5d7dbadb2233e2e6627e9101411beb27,gpio_rp2040>> ()`:: An rp2040 function.
58-
adoc_content = re.sub(rf'(\n`.*?<<{this_id[1]},.*?`)(::)', f"\\1{img}\\2", adoc_content)
59-
# |<<group_hardware_base,hardware_base>>\n|Low-level types and (atomic) accessors for memory-mapped hardware registers.
60-
adoc_content = re.sub(rf'(\n\|<<{this_id[1]},.*?>>\n\|.*?)(\n)', f"\\1{img}\\2", adoc_content)
61-
# [#group_cyw43_ll_1ga0411cd49bb5b71852cecd93bcbf0ca2d,role=contextspecific,tag=PICO_RP2040,type=PICO_RP2040]\n=== anonymous enum
62-
HEADING_RE = re.compile(r'(\[#.*?role=contextspecific.*?tag=P?I?C?O?_?)(.*?)(,.*?\]\s*?\n\s*=+\s+\S*?)(\n)')
63-
# [#group_cyw43_ll_1ga0411cd49bb5b71852cecd93bcbf0ca2d,role=h6 contextspecific,tag=PICO_RP2040,type=PICO_RP2040]\n*anonymous enum*
64-
H6_HEADING_RE = re.compile(r'(\[#.*?role=h6 contextspecific.*?tag=P?I?C?O?_?)(.*?)(,.*?\]\s*?\n\s*\*\S+.*?)(\n)')
65-
# [#group_cyw43_ll_1ga0411cd49bb5b71852cecd93bcbf0ca2d,role=h6 contextspecific,tag=PICO_RP2040,type=PICO_RP2040]\n----
66-
NONHEADING_RE = re.compile(r'(\[#.*?role=h?6?\s?contextspecific.*?tag=P?I?C?O?_?)(.*?)(,.*?\]\s*?\n\s*[^=\*])')
67-
adoc_content = re.sub(HEADING_RE, f'\\1\\2\\3 [.contexttag \\2]*\\2*\n', adoc_content)
68-
adoc_content = re.sub(H6_HEADING_RE, f'\\1\\2\\3 [.contexttag \\2]*\\2*\n', adoc_content)
69-
adoc_content = re.sub(NONHEADING_RE, f'[.contexttag \\2]*\\2*\n\n\\1\\2\\3', adoc_content)
70-
return adoc_content
54+
# this is dependent on the same order of attributes every time
55+
ids_to_tag = re.findall(r'(\[#)(.*?)(,.*?contextspecific,tag=)(.*?)(,type=)(.*?)(\])', adoc_content)
56+
for this_id in ids_to_tag:
57+
tag = re.sub("PICO_", "", this_id[3])
58+
img = f" [.contexttag {tag}]*{tag}*"
59+
# `void <<group_hardware_gpio_1ga5d7dbadb2233e2e6627e9101411beb27,gpio_rp2040>> ()`:: An rp2040 function.
60+
adoc_content = re.sub(rf'(\n`.*?<<{this_id[1]},.*?`)(::)', f"\\1{img}\\2", adoc_content)
61+
# |<<group_hardware_base,hardware_base>>\n|Low-level types and (atomic) accessors for memory-mapped hardware registers.
62+
adoc_content = re.sub(rf'(\n\|<<{this_id[1]},.*?>>\n\|.*?)(\n)', f"\\1{img}\\2", adoc_content)
63+
# [#group_cyw43_ll_1ga0411cd49bb5b71852cecd93bcbf0ca2d,role=contextspecific,tag=PICO_RP2040,type=PICO_RP2040]\n=== anonymous enum
64+
HEADING_RE = re.compile(r'(\[#.*?role=contextspecific.*?tag=P?I?C?O?_?)(.*?)(,.*?\]\s*?\n\s*=+\s+\S*?)(\n)')
65+
# [#group_cyw43_ll_1ga0411cd49bb5b71852cecd93bcbf0ca2d,role=h6 contextspecific,tag=PICO_RP2040,type=PICO_RP2040]\n*anonymous enum*
66+
H6_HEADING_RE = re.compile(r'(\[#.*?role=h6 contextspecific.*?tag=P?I?C?O?_?)(.*?)(,.*?\]\s*?\n\s*\*\S+.*?)(\n)')
67+
# [#group_cyw43_ll_1ga0411cd49bb5b71852cecd93bcbf0ca2d,role=h6 contextspecific,tag=PICO_RP2040,type=PICO_RP2040]\n----
68+
NONHEADING_RE = re.compile(r'(\[#.*?role=h?6?\s?contextspecific.*?tag=P?I?C?O?_?)(.*?)(,.*?\]\s*?\n\s*[^=\*])')
69+
adoc_content = re.sub(HEADING_RE, f'\\1\\2\\3 [.contexttag \\2]*\\2*\n', adoc_content)
70+
adoc_content = re.sub(H6_HEADING_RE, f'\\1\\2\\3 [.contexttag \\2]*\\2*\n', adoc_content)
71+
adoc_content = re.sub(NONHEADING_RE, f'[.contexttag \\2]*\\2*\n\n\\1\\2\\3', adoc_content)
72+
return adoc_content
7173

7274
def postprocess_doxygen_adoc(adoc_file, output_adoc_path, link_targets):
73-
output_path = re.sub(r'[^/]+$', "", adoc_file)
74-
sections = [{
75-
"group_id": "index_doxygen",
76-
"name": "Introduction",
77-
"description": "An introduction to the Pico SDK",
78-
"html": "index_doxygen.html",
79-
"subitems": []
80-
}]
81-
with open(adoc_file) as f:
82-
adoc_content = f.read()
83-
# first, lets add any tags
84-
adoc_content = tag_content(adoc_content)
85-
# now split the file into top-level sections:
86-
# toolchain expects all headings to be two levels lower
87-
adoc_content = re.sub(r'(\n==)(=+ \S+)', "\n\\2", adoc_content)
88-
# then make it easier to match the chapter breaks
89-
adoc_content = re.sub(r'(\[#.*?,reftext=".*?"\])(\s*\n)(= )', "\\1\\3", adoc_content)
90-
# find all the chapter descriptions, to use later
91-
descriptions = re.findall(r'(\[#.*?,reftext=".*?"\])(= .*?\n\s*\n)(.*?)(\n)', adoc_content)
92-
CHAPTER_START_RE = re.compile(r'(\[#)(.*?)(,reftext=".*?"\]= )(.*?$)')
93-
# check line by line; if the line matches our chapter break,
94-
# then pull all following lines into the chapter list until a new match.
95-
chapter_filename = "all_groups.adoc"
96-
current_chapter = None
97-
chapter_dict = {}
98-
counter = 0
99-
for line in adoc_content.split('\n'):
100-
link_targets = collect_link_target(line, chapter_filename)
101-
m = CHAPTER_START_RE.match(line)
102-
if m is not None:
103-
# write the previous chapter
104-
if current_chapter is not None:
105-
with open(chapter_path, 'w') as f:
106-
f.write('\n'.join(current_chapter))
107-
# start the new chapter
108-
current_chapter = []
109-
# set the data for this chapter
110-
group_id = re.sub("^group_+", "", m.group(2))
111-
chapter_filename = group_id+".adoc"
112-
chapter_path = os.path.join(output_path, chapter_filename)
113-
chapter_dict = {
114-
"group_id": group_id,
115-
"html": group_id+".html",
116-
"name": m.group(4),
117-
"subitems": [],
118-
"description": descriptions[counter][2]
119-
}
120-
sections.append(chapter_dict)
121-
# re-split the line into 2
122-
start_line = re.sub("= ", "\n= ", line)
123-
current_chapter.append(start_line)
124-
counter += 1
125-
else:
126-
current_chapter.append(line)
127-
# write the last chapter
128-
if current_chapter is not None:
129-
with open(chapter_path, 'w') as f:
130-
f.write('\n'.join(current_chapter))
131-
build_json(sections, output_path)
132-
os.remove(adoc_file)
133-
return link_targets
75+
output_path = re.sub(r'[^/]+$', "", adoc_file)
76+
sections = [{
77+
"group_id": "index_doxygen",
78+
"name": "Introduction",
79+
"description": "An introduction to the Pico SDK",
80+
"html": "index_doxygen.html",
81+
"subitems": []
82+
}]
83+
with open(adoc_file) as f:
84+
adoc_content = f.read()
85+
# first, lets add any tags
86+
adoc_content = tag_content(adoc_content)
87+
# now split the file into top-level sections:
88+
# toolchain expects all headings to be two levels lower
89+
adoc_content = re.sub(r'(\n==)(=+ \S+)', "\n\\2", adoc_content)
90+
# then make it easier to match the chapter breaks
91+
adoc_content = re.sub(r'(\[#.*?,reftext=".*?"\])(\s*\n)(= )', "\\1\\3", adoc_content)
92+
# find all the chapter descriptions, to use later
93+
descriptions = re.findall(r'(\[#.*?,reftext=".*?"\])(= .*?\n\s*\n)(.*?)(\n)', adoc_content)
94+
CHAPTER_START_RE = re.compile(r'(\[#)(.*?)(,reftext=".*?"\]= )(.*?$)')
95+
# check line by line; if the line matches our chapter break,
96+
# then pull all following lines into the chapter list until a new match.
97+
chapter_filename = "all_groups.adoc"
98+
current_chapter = None
99+
chapter_dict = {}
100+
counter = 0
101+
for line in adoc_content.split('\n'):
102+
link_targets = collect_link_target(line, chapter_filename)
103+
m = CHAPTER_START_RE.match(line)
104+
if m is not None:
105+
# write the previous chapter
106+
if current_chapter is not None:
107+
with open(chapter_path, 'w') as f:
108+
f.write('\n'.join(current_chapter))
109+
# start the new chapter
110+
current_chapter = []
111+
# set the data for this chapter
112+
group_id = re.sub("^group_+", "", m.group(2))
113+
chapter_filename = group_id+".adoc"
114+
chapter_path = os.path.join(output_path, chapter_filename)
115+
chapter_dict = {
116+
"group_id": group_id,
117+
"html": group_id+".html",
118+
"name": m.group(4),
119+
"subitems": [],
120+
"description": descriptions[counter][2]
121+
}
122+
sections.append(chapter_dict)
123+
# re-split the line into 2
124+
start_line = re.sub("= ", "\n= ", line)
125+
current_chapter.append(start_line)
126+
counter += 1
127+
else:
128+
current_chapter.append(line)
129+
# write the last chapter
130+
if current_chapter is not None:
131+
with open(chapter_path, 'w') as f:
132+
f.write('\n'.join(current_chapter))
133+
build_json(sections, output_path)
134+
os.remove(adoc_file)
135+
return link_targets
134136

135137
if __name__ == '__main__':
136-
output_adoc_path = sys.argv[1]
137-
adoc_files = [f for f in os.listdir(output_adoc_path) if re.search(".adoc", f) is not None]
138-
link_targets = {}
139-
for adoc_file in adoc_files:
140-
adoc_filepath = os.path.join(output_adoc_path, adoc_file)
141-
if re.search("all_groups.adoc", adoc_file) is not None:
142-
link_targets = postprocess_doxygen_adoc(adoc_filepath, output_adoc_path, link_targets)
143-
else:
144-
link_targets = cleanup_text_page(adoc_filepath, output_adoc_path, link_targets)
145-
# now that we have a complete list of all link targets, resolve all internal links
146-
adoc_files = [f for f in os.listdir(output_adoc_path) if re.search(".adoc", f) is not None]
147-
for adoc_file in adoc_files:
148-
adoc_filepath = os.path.join(output_adoc_path, adoc_file)
149-
resolve_links(adoc_filepath, link_targets)
138+
output_adoc_path = sys.argv[1]
139+
adoc_files = [f for f in os.listdir(output_adoc_path) if re.search(".adoc", f) is not None]
140+
link_targets = {}
141+
for adoc_file in adoc_files:
142+
adoc_filepath = os.path.join(output_adoc_path, adoc_file)
143+
if re.search("all_groups.adoc", adoc_file) is not None:
144+
link_targets = postprocess_doxygen_adoc(adoc_filepath, output_adoc_path, link_targets)
145+
else:
146+
link_targets = cleanup_text_page(adoc_filepath, output_adoc_path, link_targets)
147+
# now that we have a complete list of all link targets, resolve all internal links
148+
adoc_files = [f for f in os.listdir(output_adoc_path) if re.search(".adoc", f) is not None]
149+
for adoc_file in adoc_files:
150+
adoc_filepath = os.path.join(output_adoc_path, adoc_file)
151+
resolve_links(adoc_filepath, link_targets)

0 commit comments

Comments
 (0)