2323# parser.add_argument('srcdir')
2424# parser.add_argument('outdir')
2525parser .add_argument ("--persistent_worker" , action = "store_true" )
26- parser .add_argument ("--doctree-dir" )
26+ ## parser.add_argument("--doctree-dir")
2727
28+ logger = logging .getLogger ('sphinxdocs-build' )
2829
2930class Worker :
3031
3132 def __init__ (self , instream : "typing.TextIO" , outstream : "typing.TextIO" ):
3233 self ._instream = instream
3334 self ._outstream = outstream
34- self ._logger = logging .getLogger ("worker" )
35- logging .basicConfig (filename = 'echo.log' , encoding = 'utf-8' , level = logging .DEBUG )
36- self ._logger .info ("starting worker" )
35+ # Annoying. Sphinx resets its loging config as part of main()
36+ # and the Sphinx() app setup/invocation. So any logging we try
37+ # to setup here to get info out of sphinx is meaningless.
38+ # -v -v -v will output more logging, but to stderr/stdout, and thus
39+ # bazel's worker log file, due to sphinx's logging re-configuration.
40+ # one-liner to get most recent worker log:
41+ # find $workerLogDir -type f -printf '%T@ %p\n' | sort -n | tail -1 | awk '{print $2}'
42+ logging .basicConfig (
43+ ##filename='/tmp/sphinx-builder.log', encoding='utf-8',
44+ level = logging .DEBUG
45+ )
46+ logger .info ("starting worker" )
3747 self ._current = {}
3848 self ._previous = {}
3949 self ._cache = {}
@@ -45,13 +55,14 @@ def run(self) -> None:
4555 try :
4656 request = self ._get_next_request ()
4757 if request is None :
48- self . _logger .info ("Empty request: exiting" )
58+ logger .info ("Empty request: exiting" )
4959 break
5060 response = self ._process_request (request )
61+ logger .info ("response:%s" , response )
5162 if response :
5263 self ._send_response (response )
5364 except Exception :
54- self . _logger .exception ("Unhandled error: request=%s" , request )
65+ logger .exception ("Unhandled error: request=%s" , request )
5566 output = (
5667 f"Unhandled error:\n Request: { request } \n "
5768 + traceback .format_exc ()
@@ -65,7 +76,7 @@ def run(self) -> None:
6576 }
6677 )
6778 finally :
68- self . _logger .info ("Worker shutting down" )
79+ logger .info ("Worker shutting down" )
6980
7081 def _get_next_request (self ) -> "object | None" :
7182 line = self ._instream .readline ()
@@ -81,13 +92,14 @@ def inputs(self):
8192
8293 def _update_digest (self , request ):
8394 args , unknown = parser .parse_known_args (request ["arguments" ])
84- # Make room for the new build's data.
95+ # Make room for the new build's data.
8596 self ._previous = self ._current
8697 # Rearrange the new data into a dict to make comparisons easier.
8798 self ._current = {}
8899 for page in request ["inputs" ]:
89100 path = page ["path" ]
90101 self ._current [path ] = page ["digest" ]
102+ logger .info ("path mtime: %s" , pathlib .Path (path ).stat ().st_mtime )
91103 # Compare the content hashes to determine what pages have changed.
92104 changed_paths = []
93105 for path in self ._current :
@@ -104,20 +116,21 @@ def _update_digest(self, request):
104116 # Normalize the paths into docnames
105117 digest = []
106118 for path in changed_paths :
119+ logger .info ("Changed: %s" , path )
107120 if not path .endswith (".rst" ):
108121 continue
109122 srcdir = self .args [0 ]
110123 docname = path .replace (srcdir + "/" , "" )
111124 docname = docname .replace (".rst" , "" )
112125 digest .append (docname )
113126 args , unknown = parser .parse_known_args (self .args )
114- # Save the digest.
115- doctree_dir = Path (args .doctree_dir )
116- # On a fresh build, _restore_cache() does nothing, so this dir won't exist yet.
117- if not doctree_dir .is_dir ():
118- doctree_dir .mkdir (parents = True )
119- with open (doctree_dir / Path ("digest.json" ), "w" ) as f :
120- json .dump (digest , f , indent = 2 )
127+ ### Save the digest.
128+ ## doctree_dir = Path(args.doctree_dir)
129+ ### On a fresh build, _restore_cache() does nothing, so this dir won't exist yet.
130+ ## if not doctree_dir.is_dir():
131+ ## doctree_dir.mkdir(parents=True)
132+ ## with open(doctree_dir / Path("digest.json"), "w") as f:
133+ ## json.dump(digest, f, indent=2)
121134
122135 def _restore_cache (self ):
123136 for filepath in self ._cache :
@@ -138,13 +151,20 @@ def _update_cache(self):
138151 self ._cache [str (filepath )] = f .read ()
139152
140153 def _process_request (self , request : "WorkRequest" ) -> "WorkResponse | None" :
154+ logger .info ("request:%s" , json .dumps (request , sort_keys = True , indent = 2 ))
141155 if request .get ("cancel" ):
142156 return None
143157 self .args = request ["arguments" ]
144- self ._restore_cache ()
145- self ._update_digest (request )
146- main (self .args )
147- self ._update_cache ()
158+ ##self._restore_cache()
159+ ##self._update_digest(request)
160+ logger .info ("main: %s" , self .args )
161+ orig_stdout = sys .stdout
162+ sys .stdout = sys .stderr
163+ try :
164+ main (self .args )
165+ finally :
166+ sys .stdout = orig_stdout
167+ ##self._update_cache()
148168 response = {
149169 "requestId" : request .get ("requestId" , 0 ),
150170 "exitCode" : 0 ,
0 commit comments