Skip to content

Commit 15f5eeb

Browse files
committed
fix 2 listsings in 9
1 parent dbb1d1f commit 15f5eeb

File tree

2 files changed

+85
-77
lines changed

2 files changed

+85
-77
lines changed

chapter_09_docker.asciidoc

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -770,12 +770,12 @@ CMD python manage.py runserver
770770

771771
<1> Here's where we create our virtualenv
772772

773-
<3> You can't really "activate" a virtualenv inside a Dockerfile,
773+
<2> You can't really "activate" a virtualenv inside a Dockerfile,
774774
so instead we change the system PATH so that the venv versions
775775
of `pip` and `python` become the default ones
776776
(this is actually one of the things that `activate` does, under the hood).
777777

778-
<2> We copy our requirements file in, just like the src folder.
778+
<3> We copy our requirements file in, just like the src folder.
779779

780780
<4> Now we install our dependencies with `pip`,
781781
pointing it at our _requirements.txt_.
@@ -893,6 +893,7 @@ WORKDIR /src
893893
894894
CMD python manage.py runserver 8888
895895
----
896+
====
896897

897898
Ctrl+C the current dockerized container process if it's still running in your terminal,
898899
give it another `build && run`:
@@ -1096,6 +1097,7 @@ WORKDIR /src
10961097
10971098
CMD python manage.py runserver 0.0.0.0:8888
10981099
----
1100+
====
10991101

11001102
Rebuild and re-run your server, and if you have eagle eyes,
11011103
you'll spot it's binding to `0.0.0.0` instead of `127.0.0.1`:

copy_html_to_site_and_print_toc.py

Lines changed: 81 additions & 75 deletions
Original file line numberDiff line numberDiff line change
@@ -1,84 +1,83 @@
11
#!/usr/bin/env python
22

3+
import json
4+
import subprocess
35
from collections import namedtuple
46
from pathlib import Path
5-
import json
7+
68
from lxml import html
7-
import subprocess
89

9-
DEST = Path('/home/harry/workspace/www.obeythetestinggoat.com/content/book')
10+
DEST = Path("~/workspace/www.obeythetestinggoat.com/content/book").expanduser()
1011

1112
CHAPTERS = [
12-
c.replace('.asciidoc', '.html')
13-
for c in json.loads(open('atlas.json').read())['files']
13+
c.replace(".asciidoc", ".html")
14+
for c in json.loads(Path("atlas.json").read_text())["files"]
1415
]
15-
for tweak_chap in ['praise.html', 'part1.html', 'part2.html', 'part3.html']:
16-
CHAPTERS[CHAPTERS.index(tweak_chap)] = tweak_chap.replace('.', '.forbook.')
17-
CHAPTERS.remove('cover.html')
18-
CHAPTERS.remove('titlepage.html')
19-
CHAPTERS.remove('copyright.html')
20-
CHAPTERS.remove('toc.html')
21-
CHAPTERS.remove('ix.html')
22-
CHAPTERS.remove('author_bio.html')
23-
CHAPTERS.remove('colo.html')
16+
for tweak_chap in ["praise.html", "part1.html", "part2.html", "part3.html"]:
17+
CHAPTERS[CHAPTERS.index(tweak_chap)] = tweak_chap.replace(".", ".forbook.")
18+
CHAPTERS.remove("cover.html")
19+
CHAPTERS.remove("titlepage.html")
20+
CHAPTERS.remove("copyright.html")
21+
CHAPTERS.remove("toc.html")
22+
CHAPTERS.remove("ix.html")
23+
CHAPTERS.remove("author_bio.html")
24+
CHAPTERS.remove("colo.html")
2425

25-
ChapterInfo = namedtuple('ChapterInfo', 'href_id chapter_title subheaders xrefs')
26+
ChapterInfo = namedtuple("ChapterInfo", "href_id chapter_title subheaders xrefs")
2627

2728

2829
def make_chapters():
2930
for chapter in CHAPTERS:
30-
subprocess.check_call(['make', chapter], stdout=subprocess.PIPE)
31+
subprocess.check_call(["make", chapter], stdout=subprocess.PIPE)
3132

3233

3334
def parse_chapters():
3435
for chapter in CHAPTERS:
35-
raw_html = open(chapter).read()
36+
raw_html = Path(chapter).read_text()
3637
yield chapter, html.fromstring(raw_html)
3738

3839

3940
def get_anchor_targets(parsed_html):
40-
ignores = {'header', 'content', 'footnotes', 'footer', 'footer-text'}
41-
all_ids = [
42-
a.get('id') for a in parsed_html.cssselect('*[id]')
43-
]
44-
return [i for i in all_ids if not i.startswith('_') and i not in ignores]
41+
ignores = {"header", "content", "footnotes", "footer", "footer-text"}
42+
all_ids = [a.get("id") for a in parsed_html.cssselect("*[id]")]
43+
return [i for i in all_ids if not i.startswith("_") and i not in ignores]
44+
4545

4646
def get_chapter_info():
4747
chapter_info = {}
48-
appendix_numbers = list('ABCDEFGHIJKL')
48+
appendix_numbers = list("ABCDEFGHIJKL")
4949
chapter_numbers = list(range(1, 100))
5050
part_numbers = list(range(1, 10))
5151

5252
for chapter, parsed_html in parse_chapters():
53-
print('getting info from', chapter)
53+
print("getting info from", chapter)
5454

55-
if not parsed_html.cssselect('h2'):
56-
header = parsed_html.cssselect('h1')[0]
55+
if not parsed_html.cssselect("h2"):
56+
header = parsed_html.cssselect("h1")[0]
5757
else:
58-
header = parsed_html.cssselect('h2')[0]
59-
href_id = header.get('id')
58+
header = parsed_html.cssselect("h2")[0]
59+
href_id = header.get("id")
6060
if href_id is None:
61-
href_id = parsed_html.cssselect('body')[0].get('id')
62-
subheaders = [h.get('id') for h in parsed_html.cssselect('h3')]
61+
href_id = parsed_html.cssselect("body")[0].get("id")
62+
subheaders = [h.get("id") for h in parsed_html.cssselect("h3")]
6363

6464
chapter_title = header.text_content()
65-
chapter_title = chapter_title.replace('Appendix A: ', '')
65+
chapter_title = chapter_title.replace("Appendix A: ", "")
6666

67-
if chapter.startswith('chapter_'):
67+
if chapter.startswith("chapter_"):
6868
chapter_no = chapter_numbers.pop(0)
69-
chapter_title = f'Chapter {chapter_no}: {chapter_title}'
69+
chapter_title = f"Chapter {chapter_no}: {chapter_title}"
7070

71-
if chapter.startswith('appendix_'):
71+
if chapter.startswith("appendix_"):
7272
appendix_no = appendix_numbers.pop(0)
73-
chapter_title = f'Appendix {appendix_no}: {chapter_title}'
73+
chapter_title = f"Appendix {appendix_no}: {chapter_title}"
7474

75-
if chapter.startswith('part'):
75+
if chapter.startswith("part"):
7676
part_no = part_numbers.pop(0)
77-
chapter_title = f'Part {part_no}: {chapter_title}'
78-
79-
if chapter.startswith('epilogue'):
80-
chapter_title = f'Epilogue: {chapter_title}'
77+
chapter_title = f"Part {part_no}: {chapter_title}"
8178

79+
if chapter.startswith("epilogue"):
80+
chapter_title = f"Epilogue: {chapter_title}"
8281

8382
xrefs = get_anchor_targets(parsed_html)
8483
chapter_info[chapter] = ChapterInfo(href_id, chapter_title, subheaders, xrefs)
@@ -88,74 +87,78 @@ def get_chapter_info():
8887

8988
def fix_xrefs(contents, chapter, chapter_info):
9089
parsed = html.fromstring(contents)
91-
links = parsed.cssselect('a[href^=\#]')
90+
links = parsed.cssselect(r"a[href^=\#]")
9291
for link in links:
9392
for other_chap in CHAPTERS:
9493
if other_chap == chapter:
9594
continue
9695
chapter_id = chapter_info[other_chap].href_id
97-
href = link.get('href')
98-
targets = ['#' + x for x in chapter_info[other_chap].xrefs]
99-
if href == '#' + chapter_id:
100-
link.set('href', f'/book/{other_chap}')
96+
href = link.get("href")
97+
targets = ["#" + x for x in chapter_info[other_chap].xrefs]
98+
if href == "#" + chapter_id:
99+
link.set("href", f"/book/{other_chap}")
101100
elif href in targets:
102-
link.set('href', f'/book/{other_chap}{href}')
101+
link.set("href", f"/book/{other_chap}{href}")
103102

104103
return html.tostring(parsed)
105104

106105

107106
def fix_title(contents, chapter, chapter_info):
108107
parsed = html.fromstring(contents)
109-
titles = parsed.cssselect('h2')
110-
if titles and titles[0].text.startswith('Appendix A'):
108+
titles = parsed.cssselect("h2")
109+
if titles and titles[0].text.startswith("Appendix A"):
111110
title = titles[0]
112111
title.text = chapter_info[chapter].chapter_title
113112
return html.tostring(parsed)
114113

114+
115115
def copy_chapters_across_with_fixes(chapter_info, fixed_toc):
116-
comments_html = open('disqus_comments.html').read()
117-
buy_book_div = html.fromstring(open('buy_the_book_banner.html').read())
118-
analytics_div = html.fromstring(open('analytics.html').read())
119-
load_toc_script = open('load_toc.js').read()
116+
comments_html = Path("disqus_comments.html").read_text()
117+
buy_book_div = html.fromstring(Path("buy_the_book_banner.html").read_text())
118+
analytics_div = html.fromstring(Path("analytics.html").read_text())
119+
load_toc_script = Path("load_toc.js").read_text()
120120

121121
for chapter in CHAPTERS:
122-
old_contents = open(chapter).read()
122+
old_contents = Path(chapter).read_text()
123123
new_contents = fix_xrefs(old_contents, chapter, chapter_info)
124124
new_contents = fix_title(new_contents, chapter, chapter_info)
125125
parsed = html.fromstring(new_contents)
126-
body = parsed.cssselect('body')[0]
127-
if parsed.cssselect('#header'):
128-
head = parsed.cssselect('head')[0]
129-
head.append(html.fragment_fromstring('<script>' + load_toc_script + '</script>'))
130-
body.set('class', 'article toc2 toc-left')
126+
body = parsed.cssselect("body")[0]
127+
if parsed.cssselect("#header"):
128+
head = parsed.cssselect("head")[0]
129+
head.append(
130+
html.fragment_fromstring("<script>" + load_toc_script + "</script>")
131+
)
132+
body.set("class", "article toc2 toc-left")
131133
body.insert(0, buy_book_div)
132-
body.append(html.fromstring(
133-
comments_html.replace('CHAPTER_NAME', chapter.split('.')[0])
134-
))
134+
body.append(
135+
html.fromstring(
136+
comments_html.replace("CHAPTER_NAME", chapter.split(".")[0])
137+
)
138+
)
135139
body.append(analytics_div)
136140
fixed_contents = html.tostring(parsed)
137141

138-
with open(DEST / chapter, 'w') as f:
139-
f.write(fixed_contents.decode('utf8'))
140-
with open(DEST / 'toc.html', 'w') as f:
141-
f.write(html.tostring(fixed_toc).decode('utf8'))
142+
with open(DEST / chapter, "w") as f:
143+
f.write(fixed_contents.decode("utf8"))
144+
with open(DEST / "toc.html", "w") as f:
145+
f.write(html.tostring(fixed_toc).decode("utf8"))
142146

143147

144148
def extract_toc_from_book():
145-
subprocess.check_call(['make', 'book.html'], stdout=subprocess.PIPE)
146-
parsed = html.fromstring(open('book.html').read())
147-
return parsed.cssselect('#toc')[0]
148-
149+
subprocess.check_call(["make", "book.html"], stdout=subprocess.PIPE)
150+
parsed = html.fromstring(Path("book.html").read_text())
151+
return parsed.cssselect("#toc")[0]
149152

150153

151154
def fix_toc(toc, chapter_info):
152155
href_mappings = {}
153156
for chapter in CHAPTERS:
154157
chap = chapter_info[chapter]
155158
if chap.href_id:
156-
href_mappings['#' + chap.href_id] = f'/book/{chapter}'
159+
href_mappings["#" + chap.href_id] = f"/book/{chapter}"
157160
for subheader in chap.subheaders:
158-
href_mappings['#' + subheader] = f'/book/{chapter}#{subheader}'
161+
href_mappings["#" + subheader] = f"/book/{chapter}#{subheader}"
159162

160163
def fix_link(href):
161164
if href in href_mappings:
@@ -164,18 +167,21 @@ def fix_link(href):
164167
return href
165168

166169
toc.rewrite_links(fix_link)
167-
toc.set('class', 'toc2')
170+
toc.set("class", "toc2")
168171
return toc
169172

170173

171174
def print_toc_md(chapter_info):
172175
for chapter in CHAPTERS:
173176
title = chapter_info[chapter].chapter_title
174-
print(f'* [{title}](/book/{chapter})')
177+
print(f"* [{title}](/book/{chapter})")
175178

176179

177180
def rsync_images():
178-
subprocess.run(['rsync', '-a', '-v', 'images/', DEST / 'images/'])
181+
subprocess.run(
182+
["rsync", "-a", "-v", "images/", DEST / "images/"],
183+
check=True,
184+
)
179185

180186

181187
def main():
@@ -188,5 +194,5 @@ def main():
188194
print_toc_md(chapter_info)
189195

190196

191-
if __name__ == '__main__':
197+
if __name__ == "__main__":
192198
main()

0 commit comments

Comments
 (0)