-
Notifications
You must be signed in to change notification settings - Fork 12
Expand file tree
/
Copy pathutils.py
More file actions
440 lines (372 loc) · 16 KB
/
utils.py
File metadata and controls
440 lines (372 loc) · 16 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
import asyncio
import base64
import logging
from collections.abc import Iterable
from enum import StrEnum, auto
from typing import TYPE_CHECKING, assert_never
import os
import nbformat
from traitlets.config import Config
from nbconvert import HTMLExporter
from aiodocker.containers import DockerContainer
from aviary.utils import MultipleChoiceQuestion
from . import config as cfg
if TYPE_CHECKING:
from jupyter_client.asynchronous.client import AsyncKernelClient
logger = logging.getLogger(__name__)
JUPYTER_IMAGE_OUTPUT_TYPES = {
"image/png",
"image/jpeg",
"image/jpg",
}
JUPYTER_TABLE_OUTPUT_TYPES_TO_IGNORE = {
"text/latex",
"text/html",
"text/markdown",
}
def configure_logging():
"""Configure logging for the application."""
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
loggers_config = {
"LiteLLM": logging.WARNING,
"LiteLLM Router": logging.WARNING,
"LiteLLM Proxy": logging.WARNING,
"httpx": logging.WARNING,
"httpcore.http11": logging.WARNING,
}
for logger_name, level in loggers_config.items():
logging.getLogger(logger_name).setLevel(level)
class NBLanguage(StrEnum):
PYTHON = auto()
R = auto()
def make_kernelspec(self) -> dict[str, str]:
match self:
# These are the default kernelspecs set by Jupyter and IRkernel respectively.
case NBLanguage.PYTHON:
kspec = {"name": "python", "display_name": "Python 3 (ipykernel)"}
case NBLanguage.R:
kspec = {"name": "ir", "display_name": "R"}
case _:
assert_never(self)
return kspec | {"language": self.value}
def limit_notebook_output(output: str | list[str]) -> str:
"""Limit notebook output to configured length.
Args:
output: String output from notebook cell
Returns:
String output, truncated if longer than configured limit with
indication of truncation
"""
if isinstance(output, list):
raise TypeError("Only string output truncation is supported")
output_length = len(output)
if output_length < cfg.NB_OUTPUT_LIMIT:
return output
cutoff = int(cfg.NB_OUTPUT_LIMIT / 2)
# Sometimes error tracebacks have important information at the end
# and at the beginning so important to keep those sections
return output[:cutoff] + "\n<...output limited...>\n" + output[-cutoff:]
def process_cell_output(
output, md: list[str], images: list[str], cell_streams: list[str]
) -> None:
"""Process a single output from a notebook cell."""
if output.output_type == "stream":
cell_streams.append(output.text)
elif output.output_type == "execute_result":
data = output.get("data", {}).get("text/plain", "")
md.append(limit_notebook_output(data))
elif output.output_type == "error":
traceback_str = (
"\n".join(output.traceback)
if isinstance(output.traceback, list)
else output.traceback
)
md.append(limit_notebook_output(traceback_str))
elif output.output_type in {"display_data"}.union(JUPYTER_IMAGE_OUTPUT_TYPES):
data_type = next(iter(output.data.keys()), "")
if data_type in JUPYTER_TABLE_OUTPUT_TYPES_TO_IGNORE:
return
if data_type == "text/plain":
md.append(limit_notebook_output(output.data[data_type]))
elif data_type in JUPYTER_IMAGE_OUTPUT_TYPES:
md.append(f"<{len(images) + 1}>")
image_format = data_type.split("/")[-1]
image_prefix = f"data:image/{image_format};base64,"
images.append(image_prefix + encode_image_to_base64(output.data[data_type]))
else:
logger.warning(f"Unknown data type: {data_type}")
md.append(limit_notebook_output(output.data[data_type]))
def view_notebook(
cells: list[nbformat.NotebookNode], language: str
) -> tuple[str, list[str]]:
"""Process notebook cells and convert them to markdown format with images.
Args:
cells: List of notebook cells to process
language: Programming language of the notebook code cells
Returns:
tuple containing:
- Markdown string with cell contents and outputs
- List of base64 encoded images found in cell outputs
"""
md: list[str] = []
images: list[str] = []
for idx, cell in enumerate(cells):
md.append(f"### Cell {idx}:")
if cell.cell_type == "code":
md.extend((f"```{language}", cell.source, "```"))
outputs = cell.get("outputs", [])
if outputs:
md.extend([f"### Output {idx}:", "```"])
cell_streams: list[str] = []
for output in outputs:
process_cell_output(output, md, images, cell_streams)
if cell_streams:
combined_stream = "\n".join(cell_streams)
md.append(limit_notebook_output(combined_stream))
md.append("```")
elif cell.cell_type in {"markdown", "raw"}:
md.append(cell.source)
return "\n".join(md), images
def encode_image_to_base64(image: str) -> str:
decoded_image = base64.b64decode(image)
return base64.b64encode(decoded_image).decode("utf-8")
async def nbformat_run_notebook(
cells: Iterable[nbformat.NotebookNode],
client: "AsyncKernelClient",
cell_idx: int | None = None,
) -> list[str]:
"""Execute notebook cells using a kernel client and collect outputs.
Args:
cells: Notebook cell dictionaries to execute sequentially
client: KernelClient instance to use for code execution
Raises:
ValueError: If there is an error executing a cell
Returns:
List of error messages from cells that raised an error
"""
error_messages = []
logger.debug(f"Running notebook with cell_idx: {cell_idx}")
try:
logger.debug("Beginning cell execution")
for idx, cell in enumerate(cells):
if cell_idx is not None and idx != cell_idx:
logger.debug(f"Skipping cell {idx} because cell_idx is {cell_idx}")
continue
if cell.cell_type == "code":
logger.debug(f"Executing code cell {idx}")
cell.outputs = [] # Initialize empty outputs list
msg_id = client.execute(cell.source)
logger.debug(f"Message ID for cell {idx}: {msg_id}")
while True:
msg = await client.get_iopub_msg()
logger.debug(f"Received message type: {msg['msg_type']}")
if msg["parent_header"].get("msg_id") == msg_id:
msg_type = msg["msg_type"]
content = msg["content"]
if msg_type in {
"execute_result",
"display_data",
"stream",
}:
if msg_type == "stream":
output = nbformat.v4.new_output(
output_type="stream",
name=content["name"],
text=content["text"],
)
elif msg_type == "execute_result":
output = nbformat.v4.new_output(
output_type="execute_result",
data=content.get("data", {}),
metadata=content.get("metadata", {}),
execution_count=content.get("execution_count"),
)
else: # display_data
output = nbformat.v4.new_output(
output_type="display_data",
data=content.get("data", {}),
metadata=content.get("metadata", {}),
)
cell.outputs.append(output)
logger.debug(
f"Added output of type {msg_type} to cell {idx}"
)
elif msg_type == "error":
# Create error output and add it to cell outputs
error_output = nbformat.v4.new_output(
output_type="error",
ename=content.get("ename", ""),
evalue=content.get("evalue", ""),
traceback=content.get("traceback", []),
)
cell.outputs.append(error_output)
error_msg = (
f"Error executing cell {idx}:\n"
f"Name: {content.get('ename', 'Unknown')}\n"
f"Value: {content.get('evalue', 'No error message')}\n"
f"Traceback: {content.get('traceback', [])}"
)
error_messages.append(
f"Cell {idx}: {content.get('evalue', '')}"
)
logger.error(error_msg)
# raise ValueError(error_msg)
elif (
msg_type == "status"
and content["execution_state"] == "idle"
):
logger.debug(f"Cell {idx} execution finished")
break
finally:
logger.debug("Stopping kernel channels")
client.stop_channels()
return error_messages
async def exec_cmd(
container: DockerContainer, exec_command: list[str], timeout: float | None = 300
) -> str:
"""Execute a command in a Docker container and capture output.
Args:
container: Docker container instance to execute command in
exec_command: Command to execute as list of strings
timeout: Maximum time in seconds to wait for command completion
Returns:
tuple containing:
- Exit code from command execution
- stdout output as string
- stderr output as string
Raises:
TimeoutError: If command execution exceeds timeout period
"""
try:
async with asyncio.timeout(timeout):
exec_instance = await container.exec(
cmd=exec_command,
tty=True,
privileged=True,
)
# Start the execution
stream = exec_instance.start()
stdout = ""
stderr = ""
while True:
try:
message = await stream.read_out()
if message is None:
break
# Messages come as tuples of (stream_type, data)
stream_type, data = message
if stream_type == cfg.DOCKER_STREAM_TYPE_STDOUT: # stdout
stdout += data.decode()
elif stream_type == cfg.DOCKER_STREAM_TYPE_STDERR: # stderr
stderr += data.decode()
except EOFError:
break
exit_code = (await exec_instance.inspect())["ExitCode"]
logger.debug(f"Command output:\nSTDOUT:\n{stdout}\nSTDERR:\n{stderr}")
return exit_code
except TimeoutError as err:
raise TimeoutError(
f"Command execution timed out after {timeout} seconds"
) from err
def collect_notebook_stats(nb: nbformat.NotebookNode) -> dict[str, int]:
"""Count lines, cells, outputs, and different language usage in a Jupyter notebook."""
stats = {
"code_lines": 0,
"comment_lines": 0,
"markdown_lines": 0,
"code_cells": 0,
"markdown_cells": 0,
"images": 0,
"tables": 0,
"r_cells": 0,
"bash_cells": 0,
"shell_commands": 0,
}
for cell in nb.cells:
# Split cell source into lines and count non-empty lines
lines = [line for line in cell.source.split("\n") if line.strip()]
if cell.cell_type == "code":
stats["code_cells"] += 1
# Process each line in code cells
for line in lines:
line = line.strip()
# Check if line is a comment (starts with # but not #!)
if line.startswith("#") and not line.startswith("#!"):
stats["comment_lines"] += 1
else:
stats["code_lines"] += 1
# Check for R and bash cells
if lines:
first_line = lines[0].strip()
if first_line.startswith("%%R"):
stats["r_cells"] += 1
elif first_line.startswith("%%bash"):
stats["bash_cells"] += 1
# Count shell commands (lines starting with !)
stats["shell_commands"] += sum(
1 for line in lines if line.strip().startswith("!")
)
# Check outputs for images and tables
if hasattr(cell, "outputs"):
for output in cell.outputs:
# Check for images
if output.get("output_type") in {"display_data", "execute_result"}:
if "image/png" in output.get("data", {}):
stats["images"] += 1
# Check for HTML tables or DataFrame representations
if "text/html" in output.get("data", {}):
html_content = output["data"]["text/html"]
if isinstance(html_content, list):
html_content = "".join(html_content)
if "<table" in html_content:
stats["tables"] += 1
# Check for plain text DataFrame representations
elif "text/plain" in output.get("data", {}):
text_content = output["data"]["text/plain"]
if isinstance(text_content, list):
text_content = "".join(text_content)
if any(
marker in text_content
for marker in ("DataFrame", "Series")
):
stats["tables"] += 1
elif cell.cell_type == "markdown":
stats["markdown_lines"] += len(lines)
stats["markdown_cells"] += 1
# Count markdown images
for line in lines:
if "![" in line or "<img" in line:
stats["images"] += 1
return stats
def load_mcq(
mcq: dict, open_question: bool = False, question_id: str | None = None
) -> MultipleChoiceQuestion:
return MultipleChoiceQuestion(
question=mcq["question"],
options=[
mcq["ideal_answer"],
mcq["distractor_1"],
mcq["distractor_2"],
mcq["distractor_3"],
],
ideal_answer=mcq["ideal_answer"],
shuffle_seed=MultipleChoiceQuestion.SEED_USING_QUESTION,
prompt_without_options=open_question,
question_id=question_id or "Q",
)
def nb_to_html(nb: nbformat.NotebookNode) -> str:
# This configuration is necessary for the HTMLExporter to find the templates on GCP Cloud Jobs
template_paths = [
os.path.join(os.path.dirname(__file__), "templates"),
os.path.join(os.path.dirname(__file__), "templates/base"),
os.path.join(os.path.dirname(__file__), "templates/lab"),
]
c = Config()
c.TemplateExporter.template_paths = template_paths
c.TemplateExporter.template_name = "lab/index.html.j2"
exporter = HTMLExporter(config=c)
html, _ = exporter.from_notebook_node(nb)
return html