From d7fe4b6c360797f1cb0e2ad7c5495fa4d4802907 Mon Sep 17 00:00:00 2001 From: Chris Sewell Date: Sun, 19 Dec 2021 14:45:07 +0100 Subject: [PATCH 01/75] =?UTF-8?q?=E2=AC=86=EF=B8=8F=20UPGRADE:=20myst-pars?= =?UTF-8?q?er=20v0.16.1?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .pre-commit-config.yaml | 2 +- setup.cfg | 2 +- tests/test_execute/test_complex_outputs_unrun_auto.xml | 2 +- tests/test_execute/test_complex_outputs_unrun_cache.xml | 2 +- tests/test_parser/test_complex_outputs.xml | 2 +- tests/test_render_outputs/test_complex_outputs.xml | 2 +- tests/test_render_outputs/test_complex_outputs_latex.xml | 2 +- tox.ini | 3 +++ 8 files changed, 10 insertions(+), 7 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d618df75..66a447e8 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -48,7 +48,7 @@ repos: - id: mypy args: [--config-file=setup.cfg] additional_dependencies: - - myst-parser~=0.14.0 + - myst-parser~=0.16.1 files: > (?x)^( myst_nb/parser.py| diff --git a/setup.cfg b/setup.cfg index 04ddf5c7..8bbc9376 100644 --- a/setup.cfg +++ b/setup.cfg @@ -44,7 +44,7 @@ install_requires = ipywidgets>=7.0.0,<8 jupyter-cache~=0.4.1 jupyter_sphinx~=0.3.2 - myst-parser~=0.15.2 + myst-parser~=0.16.1 nbconvert>=5.6,<7 nbformat~=5.0 pyyaml diff --git a/tests/test_execute/test_complex_outputs_unrun_auto.xml b/tests/test_execute/test_complex_outputs_unrun_auto.xml index 3555ea21..8e835129 100644 --- a/tests/test_execute/test_complex_outputs_unrun_auto.xml +++ b/tests/test_execute/test_complex_outputs_unrun_auto.xml @@ -28,7 +28,7 @@ something else A numbered list - + something diff --git a/tests/test_execute/test_complex_outputs_unrun_cache.xml b/tests/test_execute/test_complex_outputs_unrun_cache.xml index 707f28a7..17ee2a52 100644 --- a/tests/test_execute/test_complex_outputs_unrun_cache.xml +++ b/tests/test_execute/test_complex_outputs_unrun_cache.xml @@ -28,7 +28,7 @@ something else A numbered list - + something diff --git a/tests/test_parser/test_complex_outputs.xml b/tests/test_parser/test_complex_outputs.xml index f4127ea2..833fcd60 100644 --- a/tests/test_parser/test_complex_outputs.xml +++ b/tests/test_parser/test_complex_outputs.xml @@ -28,7 +28,7 @@ something else A numbered list - + something diff --git a/tests/test_render_outputs/test_complex_outputs.xml b/tests/test_render_outputs/test_complex_outputs.xml index 519bb609..8c48e499 100644 --- a/tests/test_render_outputs/test_complex_outputs.xml +++ b/tests/test_render_outputs/test_complex_outputs.xml @@ -28,7 +28,7 @@ something else A numbered list - + something diff --git a/tests/test_render_outputs/test_complex_outputs_latex.xml b/tests/test_render_outputs/test_complex_outputs_latex.xml index 4312ce6c..5f81f3a4 100644 --- a/tests/test_render_outputs/test_complex_outputs_latex.xml +++ b/tests/test_render_outputs/test_complex_outputs_latex.xml @@ -28,7 +28,7 @@ something else A numbered list - + something diff --git a/tox.ini b/tox.ini index 32eca9fc..245e3d24 100644 --- a/tox.ini +++ b/tox.ini @@ -13,6 +13,9 @@ [tox] envlist = py37-sphinx4 +[testenv] +usedevelop = true + [testenv:py{37,38,39}-sphinx{3,4}] extras = testing deps = From 6854166ad89800dfd6bc86ab28f064f65cb378f6 Mon Sep 17 00:00:00 2001 From: Chris Sewell Date: Sun, 19 Dec 2021 14:46:10 +0100 Subject: [PATCH 02/75] Move jupyter-sphinx/ipywidgets functionality into jsphinx --- myst_nb/__init__.py | 13 ++- myst_nb/_static/mystnb.css | 1 + myst_nb/jsphinx.py | 164 +++++++++++++++++++++++++++++++++++++ myst_nb/parser.py | 9 +- myst_nb/render_outputs.py | 10 ++- setup.cfg | 3 +- 6 files changed, 188 insertions(+), 12 deletions(-) create mode 100644 myst_nb/jsphinx.py diff --git a/myst_nb/__init__.py b/myst_nb/__init__.py index df572913..0570cf05 100644 --- a/myst_nb/__init__.py +++ b/myst_nb/__init__.py @@ -7,10 +7,6 @@ from docutils import nodes as docnodes from IPython.lib.lexers import IPython3Lexer, IPythonTracebackLexer -from ipywidgets.embed import DEFAULT_EMBED_REQUIREJS_URL, DEFAULT_EMBED_SCRIPT_URL -from jupyter_sphinx import REQUIRE_URL_DEFAULT -from jupyter_sphinx.ast import JupyterWidgetStateNode, JupyterWidgetViewNode -from jupyter_sphinx.utils import sphinx_abs_dir from myst_parser import setup_sphinx as setup_myst_parser from sphinx.addnodes import download_reference from sphinx.application import Sphinx @@ -22,6 +18,14 @@ from .exec_table import setup_exec_table from .execution import update_execution_cache +from .jsphinx import ( + DEFAULT_EMBED_REQUIREJS_URL, + DEFAULT_EMBED_SCRIPT_URL, + REQUIRE_URL_DEFAULT, + JupyterWidgetStateNode, + JupyterWidgetViewNode, + sphinx_abs_dir, +) from .nb_glue import glue # noqa: F401 from .nb_glue.domain import ( NbGlueDomain, @@ -281,6 +285,7 @@ def load_ipywidgets_js(app: Sphinx, env: BuildEnvironment) -> None: return builder = cast(StandaloneHTMLBuilder, app.builder) + # TODO change this logic? require_url_default = ( REQUIRE_URL_DEFAULT if "jupyter_sphinx_require_url" not in app.config diff --git a/myst_nb/_static/mystnb.css b/myst_nb/_static/mystnb.css index e0ca0e1b..7374e231 100644 --- a/myst_nb/_static/mystnb.css +++ b/myst_nb/_static/mystnb.css @@ -36,6 +36,7 @@ div.cell_input > div, div.cell_output div.output > div.highlight { margin-top: 1em; } +/* TODO remove/replace? */ /* Outputs from jupyter_sphinx overrides to remove extra CSS */ div.section div.jupyter_container { padding: .4em; diff --git a/myst_nb/jsphinx.py b/myst_nb/jsphinx.py new file mode 100644 index 00000000..55eb71f6 --- /dev/null +++ b/myst_nb/jsphinx.py @@ -0,0 +1,164 @@ +"""Replacements for jupyter-sphinx""" +import json +import os +import warnings +from pathlib import Path + +import docutils +import nbconvert +import nbformat +from nbconvert.preprocessors import ExtractOutputPreprocessor +from nbconvert.writers import FilesWriter + +# from ipywidgets (7.6.5) +_HTML_MANGER_URL = "https://unpkg.com/@jupyter-widgets/html-manager@^0.20.0" +DEFAULT_EMBED_SCRIPT_URL = f"{_HTML_MANGER_URL}/dist/embed.js" +DEFAULT_EMBED_REQUIREJS_URL = f"{_HTML_MANGER_URL}/dist/embed-amd.js" +snippet_template = """ +{load} + +{widget_views} +""" +widget_view_template = """""" + +# from jupyter-sphinx (0.3.2) +REQUIRE_URL_DEFAULT = ( + "https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js" +) + +WIDGET_STATE_MIMETYPE = "application/vnd.jupyter.widget-state+json" +WIDGET_VIEW_MIMETYPE = "application/vnd.jupyter.widget-view+json" + + +class JupyterWidgetStateNode(docutils.nodes.Element): + """Appended to doctree if any Jupyter cell produced a widget as output. + + Contains the state needed to render a collection of Jupyter widgets. + + Per doctree there is 1 JupyterWidgetStateNode per kernel that produced + Jupyter widgets when running. This is fine as (presently) the + 'html-manager' Javascript library, which embeds widgets, loads the state + from all script tags on the page of the correct mimetype. + """ + + def __init__(self, rawsource="", *children, **attributes): + super().__init__("", state=attributes["state"]) + + def html(self): + # TODO: render into a separate file if 'html-manager' starts fully + # parsing script tags, and not just grabbing their innerHTML + # https://github.com/jupyter-widgets/ipywidgets/blob/master/packages/html-manager/src/libembed.ts#L36 + return snippet_template.format( + load="", widget_views="", json_data=json.dumps(self["state"]) + ) + + +class JupyterWidgetViewNode(docutils.nodes.Element): + """Inserted into doctree whenever a Jupyter cell produces a widget as output. + + Contains a unique ID for this widget; enough information for the widget + embedding javascript to render it, given the widget state. For non-HTML + outputs this doctree node is rendered generically. + """ + + def __init__(self, rawsource="", *children, **attributes): + super().__init__("", view_spec=attributes["view_spec"]) + + def html(self): + return widget_view_template.format(view_spec=json.dumps(self["view_spec"])) + + +def sphinx_abs_dir(env, *paths): + # We write the output files into + # output_directory / jupyter_execute / path relative to source directory + # Sphinx expects download links relative to source file or relative to + # source dir and prepended with '/'. We use the latter option. + out_path = ( + output_directory(env) / Path(env.docname).parent / Path(*paths) + ).resolve() + + if os.name == "nt": + # Can't get relative path between drives on Windows + return out_path.as_posix() + + # Path().relative_to() doesn't work when not a direct subpath + return "/" + os.path.relpath(out_path, env.app.srcdir) + + +def output_directory(env): + # Put output images inside the sphinx build directory to avoid + # polluting the current working directory. We don't use a + # temporary directory, as sphinx may cache the doctree with + # references to the images that we write + + # Note: we are using an implicit fact that sphinx output directories are + # direct subfolders of the build directory. + # TODO change this? + return (Path(env.app.outdir) / os.path.pardir / "jupyter_execute").resolve() + + +def strip_latex_delimiters(source): + r"""Remove LaTeX math delimiters that would be rendered by the math block. + + These are: ``\(…\)``, ``\[…\]``, ``$…$``, and ``$$…$$``. + This is necessary because sphinx does not have a dedicated role for + generic LaTeX, while Jupyter only defines generic LaTeX output, see + https://github.com/jupyter/jupyter-sphinx/issues/90 for discussion. + """ + source = source.strip() + delimiter_pairs = (pair.split() for pair in r"\( \),\[ \],$$ $$,$ $".split(",")) + for start, end in delimiter_pairs: + if source.startswith(start) and source.endswith(end): + return source[len(start) : -len(end)] + + return source + + +def get_widgets(notebook): + try: + return notebook.metadata.widgets[WIDGET_STATE_MIMETYPE] + except AttributeError: + # Don't catch KeyError, as it's a bug if 'widgets' does + # not contain 'WIDGET_STATE_MIMETYPE' + return None + + +def contains_widgets(notebook): + widgets = get_widgets(notebook) + return widgets and widgets["state"] + + +def write_notebook_output(notebook, output_dir, notebook_name, location=None): + """Extract output from notebook cells and write to files in output_dir. + + This also modifies 'notebook' in-place, adding metadata to each cell that + maps output mime-types to the filenames the output was saved under. + """ + resources = dict(unique_key=os.path.join(output_dir, notebook_name), outputs={}) + + # Modifies 'resources' in-place + ExtractOutputPreprocessor().preprocess(notebook, resources) + # Write the cell outputs to files where we can (images and PDFs), + # as well as the notebook file. + FilesWriter(build_directory=output_dir).write( + nbformat.writes(notebook), + resources, + os.path.join(output_dir, notebook_name + ".ipynb"), + ) + + exporter = nbconvert.exporters.ScriptExporter( + # TODO:log=LoggerAdapterWrapper(js.logger) + ) + with warnings.catch_warnings(): + # See https://github.com/jupyter/nbconvert/issues/1388 + warnings.simplefilter("ignore", DeprecationWarning) + contents, resources = exporter.from_notebook_node(notebook) + + notebook_file = notebook_name + resources["output_extension"] + output_dir = Path(output_dir) + # utf-8 is the de-facto standard encoding for notebooks. + (output_dir / notebook_file).write_text(contents, encoding="utf8") diff --git a/myst_nb/parser.py b/myst_nb/parser.py index d159cd81..5ce27478 100644 --- a/myst_nb/parser.py +++ b/myst_nb/parser.py @@ -3,8 +3,6 @@ import nbformat as nbf from docutils import nodes -from jupyter_sphinx.ast import JupyterWidgetStateNode, get_widgets -from jupyter_sphinx.execute import contains_widgets, write_notebook_output from markdown_it import MarkdownIt from markdown_it.rules_core import StateCore from markdown_it.token import Token @@ -21,6 +19,13 @@ from myst_nb.nb_glue.domain import NbGlueDomain from myst_nb.nodes import CellInputNode, CellNode, CellOutputBundleNode, CellOutputNode +from .jsphinx import ( + JupyterWidgetStateNode, + contains_widgets, + get_widgets, + write_notebook_output, +) + SPHINX_LOGGER = logging.getLogger(__name__) diff --git a/myst_nb/render_outputs.py b/myst_nb/render_outputs.py index 1882a8cd..2d79f55b 100644 --- a/myst_nb/render_outputs.py +++ b/myst_nb/render_outputs.py @@ -9,8 +9,6 @@ from docutils import nodes from docutils.parsers.rst import directives from importlib_metadata import entry_points -from jupyter_sphinx.ast import JupyterWidgetViewNode, strip_latex_delimiters -from jupyter_sphinx.utils import sphinx_abs_dir from myst_parser.docutils_renderer import make_document from myst_parser.main import MdParserConfig, default_parser from nbformat import NotebookNode @@ -20,12 +18,16 @@ from sphinx.transforms.post_transforms import SphinxPostTransform from sphinx.util import logging +from .jsphinx import ( + WIDGET_VIEW_MIMETYPE, + JupyterWidgetViewNode, + sphinx_abs_dir, + strip_latex_delimiters, +) from .nodes import CellOutputBundleNode LOGGER = logging.getLogger(__name__) -WIDGET_VIEW_MIMETYPE = "application/vnd.jupyter.widget-view+json" - def get_default_render_priority(builder: str) -> Optional[List[str]]: priority = { diff --git a/setup.cfg b/setup.cfg index 8bbc9376..caa28f4c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -41,9 +41,7 @@ install_requires = docutils>=0.15,<0.18 importlib_metadata ipython - ipywidgets>=7.0.0,<8 jupyter-cache~=0.4.1 - jupyter_sphinx~=0.3.2 myst-parser~=0.16.1 nbconvert>=5.6,<7 nbformat~=5.0 @@ -88,6 +86,7 @@ rtd = testing = coverage<5.0 ipykernel~=5.5 + ipywidgets jupytext~=1.11.2 # TODO: 3.4.0 has some warnings that need to be fixed in the tests. matplotlib~=3.3.0 From 815f6fc2617d083b8ec89d27b276231eb928e824 Mon Sep 17 00:00:00 2001 From: Chris Sewell Date: Mon, 20 Dec 2021 08:57:59 +0000 Subject: [PATCH 03/75] move nodes from jsphinx -> nodes --- myst_nb/__init__.py | 11 +++++-- myst_nb/jsphinx.py | 41 +------------------------- myst_nb/nodes.py | 62 ++++++++++++++++++++++++++++++++------- myst_nb/parser.py | 8 ++--- myst_nb/render_outputs.py | 9 ++---- 5 files changed, 65 insertions(+), 66 deletions(-) diff --git a/myst_nb/__init__.py b/myst_nb/__init__.py index 0570cf05..49d26e6f 100644 --- a/myst_nb/__init__.py +++ b/myst_nb/__init__.py @@ -22,8 +22,6 @@ DEFAULT_EMBED_REQUIREJS_URL, DEFAULT_EMBED_SCRIPT_URL, REQUIRE_URL_DEFAULT, - JupyterWidgetStateNode, - JupyterWidgetViewNode, sphinx_abs_dir, ) from .nb_glue import glue # noqa: F401 @@ -35,7 +33,14 @@ PasteTextNode, ) from .nb_glue.transform import PasteNodesToDocutils -from .nodes import CellInputNode, CellNode, CellOutputBundleNode, CellOutputNode +from .nodes import ( + CellInputNode, + CellNode, + CellOutputBundleNode, + CellOutputNode, + JupyterWidgetStateNode, + JupyterWidgetViewNode, +) from .parser import NotebookParser from .render_outputs import ( CellOutputsToNodes, diff --git a/myst_nb/jsphinx.py b/myst_nb/jsphinx.py index 55eb71f6..2d597cc1 100644 --- a/myst_nb/jsphinx.py +++ b/myst_nb/jsphinx.py @@ -1,10 +1,9 @@ """Replacements for jupyter-sphinx""" -import json import os import warnings from pathlib import Path -import docutils +# TODO pin nbconvert version? import nbconvert import nbformat from nbconvert.preprocessors import ExtractOutputPreprocessor @@ -34,44 +33,6 @@ WIDGET_VIEW_MIMETYPE = "application/vnd.jupyter.widget-view+json" -class JupyterWidgetStateNode(docutils.nodes.Element): - """Appended to doctree if any Jupyter cell produced a widget as output. - - Contains the state needed to render a collection of Jupyter widgets. - - Per doctree there is 1 JupyterWidgetStateNode per kernel that produced - Jupyter widgets when running. This is fine as (presently) the - 'html-manager' Javascript library, which embeds widgets, loads the state - from all script tags on the page of the correct mimetype. - """ - - def __init__(self, rawsource="", *children, **attributes): - super().__init__("", state=attributes["state"]) - - def html(self): - # TODO: render into a separate file if 'html-manager' starts fully - # parsing script tags, and not just grabbing their innerHTML - # https://github.com/jupyter-widgets/ipywidgets/blob/master/packages/html-manager/src/libembed.ts#L36 - return snippet_template.format( - load="", widget_views="", json_data=json.dumps(self["state"]) - ) - - -class JupyterWidgetViewNode(docutils.nodes.Element): - """Inserted into doctree whenever a Jupyter cell produces a widget as output. - - Contains a unique ID for this widget; enough information for the widget - embedding javascript to render it, given the widget state. For non-HTML - outputs this doctree node is rendered generically. - """ - - def __init__(self, rawsource="", *children, **attributes): - super().__init__("", view_spec=attributes["view_spec"]) - - def html(self): - return widget_view_template.format(view_spec=json.dumps(self["view_spec"])) - - def sphinx_abs_dir(env, *paths): # We write the output files into # output_directory / jupyter_execute / path relative to source directory diff --git a/myst_nb/nodes.py b/myst_nb/nodes.py index cdf869ee..e0700db7 100644 --- a/myst_nb/nodes.py +++ b/myst_nb/nodes.py @@ -1,30 +1,24 @@ """AST nodes to designate notebook components.""" -from typing import List +import json +from typing import Any, List from docutils import nodes from nbformat import NotebookNode +from .jsphinx import snippet_template, widget_view_template + class CellNode(nodes.container): """Represent a cell in the Sphinx AST.""" - def __init__(self, rawsource="", *children, **attributes): - super().__init__("", **attributes) - class CellInputNode(nodes.container): """Represent an input cell in the Sphinx AST.""" - def __init__(self, rawsource="", *children, **attributes): - super().__init__("", **attributes) - class CellOutputNode(nodes.container): """Represent an output cell in the Sphinx AST.""" - def __init__(self, rawsource="", *children, **attributes): - super().__init__("", **attributes) - class CellOutputBundleNode(nodes.container): """Represent a MimeBundle in the Sphinx AST, to be transformed later.""" @@ -62,3 +56,51 @@ def copy(self): obj.source = self.source obj.line = self.line return obj + + +class JupyterWidgetStateNode(nodes.Element): + """Appended to doctree if any Jupyter cell produced a widget as output. + + Contains the state needed to render a collection of Jupyter widgets. + + Per doctree there is 1 JupyterWidgetStateNode per kernel that produced + Jupyter widgets when running. This is fine as (presently) the + 'html-manager' Javascript library, which embeds widgets, loads the state + from all script tags on the page of the correct mimetype. + """ + + def __init__( + self, rawsource: str = "", *children: nodes.Element, **attributes: Any + ): + if "state" not in attributes: + raise ValueError("No 'state' specified") + super().__init__(rawsource, *children, **attributes) + + def html(self): + """Set in extension setup for html rendering visits.""" + # TODO: render into a separate file if 'html-manager' starts fully + # parsing script tags, and not just grabbing their innerHTML + # https://github.com/jupyter-widgets/ipywidgets/blob/master/packages/html-manager/src/libembed.ts#L36 + return snippet_template.format( + load="", widget_views="", json_data=json.dumps(self["state"]) + ) + + +class JupyterWidgetViewNode(nodes.Element): + """Inserted into doctree whenever a Jupyter cell produces a widget as output. + + Contains a unique ID for this widget; enough information for the widget + embedding javascript to render it, given the widget state. For non-HTML + outputs this doctree node is rendered generically. + """ + + def __init__( + self, rawsource: str = "", *children: nodes.Element, **attributes: Any + ): + if "view_spec" not in attributes: + raise ValueError("No 'view_spec' specified") + super().__init__(rawsource, *children, **attributes) + + def html(self): + """Set in extension setup for html rendering visits.""" + return widget_view_template.format(view_spec=json.dumps(self["view_spec"])) diff --git a/myst_nb/parser.py b/myst_nb/parser.py index 5ce27478..9db1ee0b 100644 --- a/myst_nb/parser.py +++ b/myst_nb/parser.py @@ -19,12 +19,8 @@ from myst_nb.nb_glue.domain import NbGlueDomain from myst_nb.nodes import CellInputNode, CellNode, CellOutputBundleNode, CellOutputNode -from .jsphinx import ( - JupyterWidgetStateNode, - contains_widgets, - get_widgets, - write_notebook_output, -) +from .jsphinx import contains_widgets, get_widgets, write_notebook_output +from .nodes import JupyterWidgetStateNode SPHINX_LOGGER = logging.getLogger(__name__) diff --git a/myst_nb/render_outputs.py b/myst_nb/render_outputs.py index 2d79f55b..f66dc97e 100644 --- a/myst_nb/render_outputs.py +++ b/myst_nb/render_outputs.py @@ -18,13 +18,8 @@ from sphinx.transforms.post_transforms import SphinxPostTransform from sphinx.util import logging -from .jsphinx import ( - WIDGET_VIEW_MIMETYPE, - JupyterWidgetViewNode, - sphinx_abs_dir, - strip_latex_delimiters, -) -from .nodes import CellOutputBundleNode +from .jsphinx import WIDGET_VIEW_MIMETYPE, sphinx_abs_dir, strip_latex_delimiters +from .nodes import CellOutputBundleNode, JupyterWidgetViewNode LOGGER = logging.getLogger(__name__) From 83cccbdca45ecef3d5aa39d60bedb85520c9ab8d Mon Sep 17 00:00:00 2001 From: Chris Sewell Date: Tue, 21 Dec 2021 14:31:55 +0000 Subject: [PATCH 04/75] ipywidgets url: unpkg -> jsdelivr --- myst_nb/jsphinx.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/myst_nb/jsphinx.py b/myst_nb/jsphinx.py index 2d597cc1..065e6103 100644 --- a/myst_nb/jsphinx.py +++ b/myst_nb/jsphinx.py @@ -9,8 +9,8 @@ from nbconvert.preprocessors import ExtractOutputPreprocessor from nbconvert.writers import FilesWriter -# from ipywidgets (7.6.5) -_HTML_MANGER_URL = "https://unpkg.com/@jupyter-widgets/html-manager@^0.20.0" +# from https://github.com/jupyter-widgets/ipywidgets v7.6.5 +_HTML_MANGER_URL = "https://cdn.jsdelivr.net/npm/@jupyter-widgets/html-manager@^0.20.0" DEFAULT_EMBED_SCRIPT_URL = f"{_HTML_MANGER_URL}/dist/embed.js" DEFAULT_EMBED_REQUIREJS_URL = f"{_HTML_MANGER_URL}/dist/embed-amd.js" snippet_template = """ From 71bec09e55393f729d56326de43519046b560cc0 Mon Sep 17 00:00:00 2001 From: Chris Sewell Date: Tue, 21 Dec 2021 14:44:19 +0000 Subject: [PATCH 05/75] Move sphinx extension to separate module --- myst_nb/__init__.py | 428 +------------------------------------- myst_nb/execution.py | 2 +- myst_nb/extension.py | 426 +++++++++++++++++++++++++++++++++++++ myst_nb/nodes.py | 2 +- myst_nb/parser.py | 12 +- myst_nb/render_outputs.py | 4 +- 6 files changed, 444 insertions(+), 430 deletions(-) create mode 100644 myst_nb/extension.py diff --git a/myst_nb/__init__.py b/myst_nb/__init__.py index 49d26e6f..1ac7dd1c 100644 --- a/myst_nb/__init__.py +++ b/myst_nb/__init__.py @@ -1,426 +1,10 @@ +"""A docutils/sphinx parser for Jupyter Notebooks.""" __version__ = "0.13.1" -import os -from collections.abc import Sequence -from pathlib import Path -from typing import cast -from docutils import nodes as docnodes -from IPython.lib.lexers import IPython3Lexer, IPythonTracebackLexer -from myst_parser import setup_sphinx as setup_myst_parser -from sphinx.addnodes import download_reference -from sphinx.application import Sphinx -from sphinx.builders.html import StandaloneHTMLBuilder -from sphinx.environment import BuildEnvironment -from sphinx.errors import SphinxError -from sphinx.util import import_object, logging -from sphinx.util.docutils import ReferenceRole, SphinxDirective +def setup(app): + """Sphinx extension setup.""" + # we import this locally, so sphinx is not automatically imported + from .extension import sphinx_setup -from .exec_table import setup_exec_table -from .execution import update_execution_cache -from .jsphinx import ( - DEFAULT_EMBED_REQUIREJS_URL, - DEFAULT_EMBED_SCRIPT_URL, - REQUIRE_URL_DEFAULT, - sphinx_abs_dir, -) -from .nb_glue import glue # noqa: F401 -from .nb_glue.domain import ( - NbGlueDomain, - PasteInlineNode, - PasteMathNode, - PasteNode, - PasteTextNode, -) -from .nb_glue.transform import PasteNodesToDocutils -from .nodes import ( - CellInputNode, - CellNode, - CellOutputBundleNode, - CellOutputNode, - JupyterWidgetStateNode, - JupyterWidgetViewNode, -) -from .parser import NotebookParser -from .render_outputs import ( - CellOutputsToNodes, - get_default_render_priority, - load_renderer, -) - -LOGGER = logging.getLogger(__name__) - - -def setup(app: Sphinx): - """Initialize Sphinx extension.""" - # Allow parsing ipynb files - app.add_source_suffix(".md", "myst-nb") - app.add_source_suffix(".ipynb", "myst-nb") - app.add_source_parser(NotebookParser) - app.setup_extension("sphinx_togglebutton") - - # Helper functions for the registry, pulled from jupyter-sphinx - def skip(self, node): - raise docnodes.SkipNode - - # Used to render an element node as HTML - def visit_element_html(self, node): - self.body.append(node.html()) - raise docnodes.SkipNode - - # Shortcut for registering our container nodes - render_container = ( - lambda self, node: self.visit_container(node), - lambda self, node: self.depart_container(node), - ) - - # Register our container nodes, these should behave just like a regular container - for node in [CellNode, CellInputNode, CellOutputNode]: - app.add_node( - node, - override=True, - html=(render_container), - latex=(render_container), - textinfo=(render_container), - text=(render_container), - man=(render_container), - ) - - # Register the output bundle node. - # No translators should touch this node because we'll replace it in a post-transform - app.add_node( - CellOutputBundleNode, - override=True, - html=(skip, None), - latex=(skip, None), - textinfo=(skip, None), - text=(skip, None), - man=(skip, None), - ) - - # these nodes hold widget state/view JSON, - # but are only rendered properly in HTML documents. - for node in [JupyterWidgetStateNode, JupyterWidgetViewNode]: - app.add_node( - node, - override=True, - html=(visit_element_html, None), - latex=(skip, None), - textinfo=(skip, None), - text=(skip, None), - man=(skip, None), - ) - - # Register our inline nodes so they can be parsed as a part of titles - # No translators should touch these nodes because we'll replace them in a transform - for node in [PasteMathNode, PasteNode, PasteTextNode, PasteInlineNode]: - app.add_node( - node, - override=True, - html=(skip, None), - latex=(skip, None), - textinfo=(skip, None), - text=(skip, None), - man=(skip, None), - ) - - # Add configuration for the cache - app.add_config_value("jupyter_cache", "", "env") - app.add_config_value("execution_excludepatterns", [], "env") - app.add_config_value("jupyter_execute_notebooks", "auto", "env") - app.add_config_value("execution_timeout", 30, "env") - app.add_config_value("execution_allow_errors", False, "env") - app.add_config_value("execution_in_temp", False, "env") - # show traceback in stdout (in addition to writing to file) - # this is useful in e.g. RTD where one cannot inspect a file - app.add_config_value("execution_show_tb", False, "") - app.add_config_value("nb_custom_formats", {}, "env") - - # render config - app.add_config_value("nb_render_key", "render", "env") - app.add_config_value("nb_render_priority", {}, "env") - app.add_config_value("nb_render_plugin", "default", "env") - app.add_config_value("nb_render_text_lexer", "myst-ansi", "env") - app.add_config_value("nb_output_stderr", "show", "env") - app.add_config_value("nb_merge_streams", False, "env") - - # Register our post-transform which will convert output bundles to nodes - app.add_post_transform(PasteNodesToDocutils) - app.add_post_transform(CellOutputsToNodes) - - # Add myst-parser transforms and configuration - setup_myst_parser(app) - - # Events - app.connect("config-inited", validate_config_values) - app.connect("builder-inited", static_path) - app.connect("builder-inited", set_valid_execution_paths) - app.connect("builder-inited", set_up_execution_data) - app.connect("builder-inited", set_render_priority) - app.connect("env-purge-doc", remove_execution_data) - app.connect("env-get-outdated", update_execution_cache) - app.connect("config-inited", add_exclude_patterns) - app.connect("config-inited", update_togglebutton_classes) - app.connect("env-updated", save_glue_cache) - app.connect("config-inited", add_nb_custom_formats) - app.connect("env-updated", load_ipywidgets_js) - - from myst_nb.ansi_lexer import AnsiColorLexer - - # For syntax highlighting - app.add_lexer("ipythontb", IPythonTracebackLexer) - app.add_lexer("ipython", IPython3Lexer) - app.add_lexer("myst-ansi", AnsiColorLexer) - - # Add components - app.add_directive("code-cell", CodeCell) - app.add_role("nb-download", JupyterDownloadRole()) - app.add_css_file("mystnb.css") - app.add_domain(NbGlueDomain) - - # execution statistics table - setup_exec_table(app) - - # TODO need to deal with key clashes in NbGlueDomain.merge_domaindata - # before this is parallel_read_safe - return {"version": __version__, "parallel_read_safe": False} - - -class MystNbConfigError(SphinxError): - """Error specific to MyST-NB.""" - - category = "MyST NB Configuration Error" - - -def validate_config_values(app: Sphinx, config): - """Validate configuration values.""" - execute_mode = app.config["jupyter_execute_notebooks"] - if execute_mode not in ["force", "auto", "cache", "off"]: - raise MystNbConfigError( - "'jupyter_execute_notebooks' can be: " - f"`force`, `auto`, `cache` or `off`, but got: {execute_mode}", - ) - - if app.config["jupyter_cache"] and execute_mode != "cache": - raise MystNbConfigError( - "'jupyter_cache' is set, " - f"but 'jupyter_execute_notebooks' is not `cache`: {execute_mode}" - ) - - if app.config["jupyter_cache"] and not os.path.isdir(app.config["jupyter_cache"]): - raise MystNbConfigError( - f"'jupyter_cache' is not a directory: {app.config['jupyter_cache']}", - ) - - if not isinstance(app.config["nb_custom_formats"], dict): - raise MystNbConfigError( - "'nb_custom_formats' should be a dictionary: " - f"{app.config['nb_custom_formats']}" - ) - for name, converter in app.config["nb_custom_formats"].items(): - if not isinstance(name, str): - raise MystNbConfigError( - f"'nb_custom_formats' keys should be a string: {name}" - ) - if isinstance(converter, str): - app.config["nb_custom_formats"][name] = (converter, {}) - elif not (isinstance(converter, Sequence) and len(converter) in [2, 3]): - raise MystNbConfigError( - "'nb_custom_formats' values must be " - f"either strings or 2/3-element sequences, got: {converter}" - ) - - converter_str = app.config["nb_custom_formats"][name][0] - caller = import_object( - converter_str, - f"MyST-NB nb_custom_formats: {name}", - ) - if not callable(caller): - raise MystNbConfigError( - f"`nb_custom_formats.{name}` converter is not callable: {caller}" - ) - if len(app.config["nb_custom_formats"][name]) == 2: - app.config["nb_custom_formats"][name].append(None) - elif not isinstance(app.config["nb_custom_formats"][name][2], bool): - raise MystNbConfigError( - f"`nb_custom_formats.{name}.commonmark_only` arg is not boolean" - ) - - if not isinstance(app.config["nb_render_key"], str): - raise MystNbConfigError("`nb_render_key` is not a string") - - if app.config["nb_output_stderr"] not in [ - "show", - "remove", - "remove-warn", - "warn", - "error", - "severe", - ]: - raise MystNbConfigError( - "`nb_output_stderr` not one of: " - "'show', 'remove', 'remove-warn', 'warn', 'error', 'severe'" - ) - - # try loading notebook output renderer - load_renderer(app.config["nb_render_plugin"]) - - -def static_path(app: Sphinx): - static_path = Path(__file__).absolute().with_name("_static") - app.config.html_static_path.append(str(static_path)) - - -def load_ipywidgets_js(app: Sphinx, env: BuildEnvironment) -> None: - """Add ipywidget JavaScript to HTML pages. - - We adapt the code in sphinx.ext.mathjax, - to only add this JS if widgets have been found in any notebooks. - (ideally we would only add it to the pages containing widgets, - but this is not trivial in sphinx) - - There are 2 cases: - - - ipywidgets 7, with require - - ipywidgets 7, no require - - We reuse settings, if available, for jupyter-sphinx - """ - if app.builder.format != "html" or not app.env.nb_contains_widgets: - return - builder = cast(StandaloneHTMLBuilder, app.builder) - - # TODO change this logic? - require_url_default = ( - REQUIRE_URL_DEFAULT - if "jupyter_sphinx_require_url" not in app.config - else app.config.jupyter_sphinx_require_url - ) - embed_url_default = ( - None - if "jupyter_sphinx_embed_url" not in app.config - else app.config.jupyter_sphinx_embed_url - ) - - if require_url_default: - builder.add_js_file(require_url_default) - embed_url = embed_url_default or DEFAULT_EMBED_REQUIREJS_URL - else: - embed_url = embed_url_default or DEFAULT_EMBED_SCRIPT_URL - if embed_url: - builder.add_js_file(embed_url) - - -def set_render_priority(app: Sphinx): - """Set the render priority for the particular builder.""" - builder = app.builder.name - if app.config.nb_render_priority and builder in app.config.nb_render_priority: - app.env.nb_render_priority = app.config.nb_render_priority[builder] - else: - app.env.nb_render_priority = get_default_render_priority(builder) - - if app.env.nb_render_priority is None: - raise MystNbConfigError(f"`nb_render_priority` not set for builder: {builder}") - try: - for item in app.env.nb_render_priority: - assert isinstance(item, str) - except Exception: - raise MystNbConfigError( - f"`nb_render_priority` is not a list of str: {app.env.nb_render_priority}" - ) - - -def set_valid_execution_paths(app: Sphinx): - """Set files excluded from execution, and valid file suffixes - - Patterns given in execution_excludepatterns conf variable from executing. - """ - app.env.nb_excluded_exec_paths = { - str(path) - for pat in app.config["execution_excludepatterns"] - for path in Path().cwd().rglob(pat) - } - LOGGER.verbose("MyST-NB: Excluded Paths: %s", app.env.nb_excluded_exec_paths) - app.env.nb_allowed_exec_suffixes = { - suffix - for suffix, parser_type in app.config["source_suffix"].items() - if parser_type in ("myst-nb",) - } - app.env.nb_contains_widgets = False - - -def set_up_execution_data(app: Sphinx): - if not hasattr(app.env, "nb_execution_data"): - app.env.nb_execution_data = {} - if not hasattr(app.env, "nb_execution_data_changed"): - app.env.nb_execution_data_changed = False - app.env.nb_execution_data_changed = False - - -def remove_execution_data(app: Sphinx, env, docname): - if docname in app.env.nb_execution_data: - app.env.nb_execution_data.pop(docname) - app.env.nb_execution_data_changed = True - - -def add_nb_custom_formats(app: Sphinx, config): - """Add custom conversion formats.""" - for suffix in config.nb_custom_formats: - app.add_source_suffix(suffix, "myst-nb") - - -def add_exclude_patterns(app: Sphinx, config): - """Add default exclude patterns (if not already present).""" - if "**.ipynb_checkpoints" not in config.exclude_patterns: - config.exclude_patterns.append("**.ipynb_checkpoints") - - -def update_togglebutton_classes(app: Sphinx, config): - to_add = [ - ".tag_hide_input div.cell_input", - ".tag_hide-input div.cell_input", - ".tag_hide_output div.cell_output", - ".tag_hide-output div.cell_output", - ".tag_hide_cell.cell", - ".tag_hide-cell.cell", - ] - for selector in to_add: - config.togglebutton_selector += f", {selector}" - - -def save_glue_cache(app: Sphinx, env): - NbGlueDomain.from_env(env).write_cache() - - -class JupyterDownloadRole(ReferenceRole): - def run(self): - reftarget = sphinx_abs_dir(self.env, self.target) - node = download_reference(self.rawtext, reftarget=reftarget) - self.set_source_info(node) - title = self.title if self.has_explicit_title else self.target - node += docnodes.literal( - self.rawtext, title, classes=["xref", "download", "myst-nb"] - ) - return [node], [] - - -class CodeCell(SphinxDirective): - """Raises a warning if it is triggered, it should not make it to the doctree.""" - - optional_arguments = 1 - final_argument_whitespace = True - has_content = True - - def run(self): - LOGGER.warning( - ( - "Found an unexpected `code-cell` directive. " - "Either this file was not converted to a notebook, " - "because Jupytext header content was missing, " - "or the `code-cell` was not converted, because it is nested. " - "See https://myst-nb.readthedocs.io/en/latest/use/markdown.html " - "for more information." - ), - location=(self.env.docname, self.lineno), - ) - return [] + return sphinx_setup(app) diff --git a/myst_nb/execution.py b/myst_nb/execution.py index ffbcb5d6..d47ef4e4 100644 --- a/myst_nb/execution.py +++ b/myst_nb/execution.py @@ -25,7 +25,7 @@ from sphinx.environment import BuildEnvironment from sphinx.util import logging, progress_message -from .converter import get_nb_converter +from myst_nb.converter import get_nb_converter LOGGER = logging.getLogger(__name__) diff --git a/myst_nb/extension.py b/myst_nb/extension.py new file mode 100644 index 00000000..8886c44c --- /dev/null +++ b/myst_nb/extension.py @@ -0,0 +1,426 @@ +"""Sphinx extension setup""" + +import os +from collections.abc import Sequence +from pathlib import Path +from typing import cast + +from docutils import nodes as docnodes +from IPython.lib.lexers import IPython3Lexer, IPythonTracebackLexer +from myst_parser import setup_sphinx as setup_myst_parser +from sphinx.addnodes import download_reference +from sphinx.application import Sphinx +from sphinx.builders.html import StandaloneHTMLBuilder +from sphinx.environment import BuildEnvironment +from sphinx.errors import SphinxError +from sphinx.util import import_object, logging +from sphinx.util.docutils import ReferenceRole, SphinxDirective + +from myst_nb import __version__ +from myst_nb.ansi_lexer import AnsiColorLexer +from myst_nb.exec_table import setup_exec_table +from myst_nb.execution import update_execution_cache +from myst_nb.jsphinx import ( + DEFAULT_EMBED_REQUIREJS_URL, + DEFAULT_EMBED_SCRIPT_URL, + REQUIRE_URL_DEFAULT, + sphinx_abs_dir, +) +from myst_nb.nb_glue import glue # noqa: F401 +from myst_nb.nb_glue.domain import ( + NbGlueDomain, + PasteInlineNode, + PasteMathNode, + PasteNode, + PasteTextNode, +) +from myst_nb.nb_glue.transform import PasteNodesToDocutils +from myst_nb.nodes import ( + CellInputNode, + CellNode, + CellOutputBundleNode, + CellOutputNode, + JupyterWidgetStateNode, + JupyterWidgetViewNode, +) +from myst_nb.parser import NotebookParser +from myst_nb.render_outputs import ( + CellOutputsToNodes, + get_default_render_priority, + load_renderer, +) + +LOGGER = logging.getLogger(__name__) + + +def sphinx_setup(app: Sphinx): + """Initialize Sphinx extension.""" + # Allow parsing ipynb files + app.add_source_suffix(".md", "myst-nb") + app.add_source_suffix(".ipynb", "myst-nb") + app.add_source_parser(NotebookParser) + app.setup_extension("sphinx_togglebutton") + + # Helper functions for the registry, pulled from jupyter-sphinx + def skip(self, node): + raise docnodes.SkipNode + + # Used to render an element node as HTML + def visit_element_html(self, node): + self.body.append(node.html()) + raise docnodes.SkipNode + + # Shortcut for registering our container nodes + render_container = ( + lambda self, node: self.visit_container(node), + lambda self, node: self.depart_container(node), + ) + + # Register our container nodes, these should behave just like a regular container + for node in [CellNode, CellInputNode, CellOutputNode]: + app.add_node( + node, + override=True, + html=(render_container), + latex=(render_container), + textinfo=(render_container), + text=(render_container), + man=(render_container), + ) + + # Register the output bundle node. + # No translators should touch this node because we'll replace it in a post-transform + app.add_node( + CellOutputBundleNode, + override=True, + html=(skip, None), + latex=(skip, None), + textinfo=(skip, None), + text=(skip, None), + man=(skip, None), + ) + + # these nodes hold widget state/view JSON, + # but are only rendered properly in HTML documents. + for node in [JupyterWidgetStateNode, JupyterWidgetViewNode]: + app.add_node( + node, + override=True, + html=(visit_element_html, None), + latex=(skip, None), + textinfo=(skip, None), + text=(skip, None), + man=(skip, None), + ) + + # Register our inline nodes so they can be parsed as a part of titles + # No translators should touch these nodes because we'll replace them in a transform + for node in [PasteMathNode, PasteNode, PasteTextNode, PasteInlineNode]: + app.add_node( + node, + override=True, + html=(skip, None), + latex=(skip, None), + textinfo=(skip, None), + text=(skip, None), + man=(skip, None), + ) + + # Add configuration for the cache + app.add_config_value("jupyter_cache", "", "env") + app.add_config_value("execution_excludepatterns", [], "env") + app.add_config_value("jupyter_execute_notebooks", "auto", "env") + app.add_config_value("execution_timeout", 30, "env") + app.add_config_value("execution_allow_errors", False, "env") + app.add_config_value("execution_in_temp", False, "env") + # show traceback in stdout (in addition to writing to file) + # this is useful in e.g. RTD where one cannot inspect a file + app.add_config_value("execution_show_tb", False, "") + app.add_config_value("nb_custom_formats", {}, "env") + + # render config + app.add_config_value("nb_render_key", "render", "env") + app.add_config_value("nb_render_priority", {}, "env") + app.add_config_value("nb_render_plugin", "default", "env") + app.add_config_value("nb_render_text_lexer", "myst-ansi", "env") + app.add_config_value("nb_output_stderr", "show", "env") + app.add_config_value("nb_merge_streams", False, "env") + + # Register our post-transform which will convert output bundles to nodes + app.add_post_transform(PasteNodesToDocutils) + app.add_post_transform(CellOutputsToNodes) + + # Add myst-parser transforms and configuration + setup_myst_parser(app) + + # Events + app.connect("config-inited", validate_config_values) + app.connect("builder-inited", static_path) + app.connect("builder-inited", set_valid_execution_paths) + app.connect("builder-inited", set_up_execution_data) + app.connect("builder-inited", set_render_priority) + app.connect("env-purge-doc", remove_execution_data) + app.connect("env-get-outdated", update_execution_cache) + app.connect("config-inited", add_exclude_patterns) + app.connect("config-inited", update_togglebutton_classes) + app.connect("env-updated", save_glue_cache) + app.connect("config-inited", add_nb_custom_formats) + app.connect("env-updated", load_ipywidgets_js) + + # For syntax highlighting + app.add_lexer("ipythontb", IPythonTracebackLexer) + app.add_lexer("ipython", IPython3Lexer) + app.add_lexer("myst-ansi", AnsiColorLexer) + + # Add components + app.add_directive("code-cell", CodeCell) + app.add_role("nb-download", JupyterDownloadRole()) + app.add_css_file("mystnb.css") + app.add_domain(NbGlueDomain) + + # execution statistics table + setup_exec_table(app) + + # TODO need to deal with key clashes in NbGlueDomain.merge_domaindata + # before this is parallel_read_safe + return {"version": __version__, "parallel_read_safe": False} + + +class MystNbConfigError(SphinxError): + """Error specific to MyST-NB.""" + + category = "MyST NB Configuration Error" + + +def validate_config_values(app: Sphinx, config): + """Validate configuration values.""" + execute_mode = app.config["jupyter_execute_notebooks"] + if execute_mode not in ["force", "auto", "cache", "off"]: + raise MystNbConfigError( + "'jupyter_execute_notebooks' can be: " + f"`force`, `auto`, `cache` or `off`, but got: {execute_mode}", + ) + + if app.config["jupyter_cache"] and execute_mode != "cache": + raise MystNbConfigError( + "'jupyter_cache' is set, " + f"but 'jupyter_execute_notebooks' is not `cache`: {execute_mode}" + ) + + if app.config["jupyter_cache"] and not os.path.isdir(app.config["jupyter_cache"]): + raise MystNbConfigError( + f"'jupyter_cache' is not a directory: {app.config['jupyter_cache']}", + ) + + if not isinstance(app.config["nb_custom_formats"], dict): + raise MystNbConfigError( + "'nb_custom_formats' should be a dictionary: " + f"{app.config['nb_custom_formats']}" + ) + for name, converter in app.config["nb_custom_formats"].items(): + if not isinstance(name, str): + raise MystNbConfigError( + f"'nb_custom_formats' keys should be a string: {name}" + ) + if isinstance(converter, str): + app.config["nb_custom_formats"][name] = (converter, {}) + elif not (isinstance(converter, Sequence) and len(converter) in [2, 3]): + raise MystNbConfigError( + "'nb_custom_formats' values must be " + f"either strings or 2/3-element sequences, got: {converter}" + ) + + converter_str = app.config["nb_custom_formats"][name][0] + caller = import_object( + converter_str, + f"MyST-NB nb_custom_formats: {name}", + ) + if not callable(caller): + raise MystNbConfigError( + f"`nb_custom_formats.{name}` converter is not callable: {caller}" + ) + if len(app.config["nb_custom_formats"][name]) == 2: + app.config["nb_custom_formats"][name].append(None) + elif not isinstance(app.config["nb_custom_formats"][name][2], bool): + raise MystNbConfigError( + f"`nb_custom_formats.{name}.commonmark_only` arg is not boolean" + ) + + if not isinstance(app.config["nb_render_key"], str): + raise MystNbConfigError("`nb_render_key` is not a string") + + if app.config["nb_output_stderr"] not in [ + "show", + "remove", + "remove-warn", + "warn", + "error", + "severe", + ]: + raise MystNbConfigError( + "`nb_output_stderr` not one of: " + "'show', 'remove', 'remove-warn', 'warn', 'error', 'severe'" + ) + + # try loading notebook output renderer + load_renderer(app.config["nb_render_plugin"]) + + +def static_path(app: Sphinx): + static_path = Path(__file__).absolute().with_name("_static") + app.config.html_static_path.append(str(static_path)) + + +def load_ipywidgets_js(app: Sphinx, env: BuildEnvironment) -> None: + """Add ipywidget JavaScript to HTML pages. + + We adapt the code in sphinx.ext.mathjax, + to only add this JS if widgets have been found in any notebooks. + (ideally we would only add it to the pages containing widgets, + but this is not trivial in sphinx) + + There are 2 cases: + + - ipywidgets 7, with require + - ipywidgets 7, no require + + We reuse settings, if available, for jupyter-sphinx + """ + if app.builder.format != "html" or not app.env.nb_contains_widgets: + return + builder = cast(StandaloneHTMLBuilder, app.builder) + + # TODO change this logic? + require_url_default = ( + REQUIRE_URL_DEFAULT + if "jupyter_sphinx_require_url" not in app.config + else app.config.jupyter_sphinx_require_url + ) + embed_url_default = ( + None + if "jupyter_sphinx_embed_url" not in app.config + else app.config.jupyter_sphinx_embed_url + ) + + if require_url_default: + builder.add_js_file(require_url_default) + embed_url = embed_url_default or DEFAULT_EMBED_REQUIREJS_URL + else: + embed_url = embed_url_default or DEFAULT_EMBED_SCRIPT_URL + if embed_url: + builder.add_js_file(embed_url) + + +def set_render_priority(app: Sphinx): + """Set the render priority for the particular builder.""" + builder = app.builder.name + if app.config.nb_render_priority and builder in app.config.nb_render_priority: + app.env.nb_render_priority = app.config.nb_render_priority[builder] + else: + app.env.nb_render_priority = get_default_render_priority(builder) + + if app.env.nb_render_priority is None: + raise MystNbConfigError(f"`nb_render_priority` not set for builder: {builder}") + try: + for item in app.env.nb_render_priority: + assert isinstance(item, str) + except Exception: + raise MystNbConfigError( + f"`nb_render_priority` is not a list of str: {app.env.nb_render_priority}" + ) + + +def set_valid_execution_paths(app: Sphinx): + """Set files excluded from execution, and valid file suffixes + + Patterns given in execution_excludepatterns conf variable from executing. + """ + app.env.nb_excluded_exec_paths = { + str(path) + for pat in app.config["execution_excludepatterns"] + for path in Path().cwd().rglob(pat) + } + LOGGER.verbose("MyST-NB: Excluded Paths: %s", app.env.nb_excluded_exec_paths) + app.env.nb_allowed_exec_suffixes = { + suffix + for suffix, parser_type in app.config["source_suffix"].items() + if parser_type in ("myst-nb",) + } + app.env.nb_contains_widgets = False + + +def set_up_execution_data(app: Sphinx): + if not hasattr(app.env, "nb_execution_data"): + app.env.nb_execution_data = {} + if not hasattr(app.env, "nb_execution_data_changed"): + app.env.nb_execution_data_changed = False + app.env.nb_execution_data_changed = False + + +def remove_execution_data(app: Sphinx, env, docname): + if docname in app.env.nb_execution_data: + app.env.nb_execution_data.pop(docname) + app.env.nb_execution_data_changed = True + + +def add_nb_custom_formats(app: Sphinx, config): + """Add custom conversion formats.""" + for suffix in config.nb_custom_formats: + app.add_source_suffix(suffix, "myst-nb") + + +def add_exclude_patterns(app: Sphinx, config): + """Add default exclude patterns (if not already present).""" + if "**.ipynb_checkpoints" not in config.exclude_patterns: + config.exclude_patterns.append("**.ipynb_checkpoints") + + +def update_togglebutton_classes(app: Sphinx, config): + to_add = [ + ".tag_hide_input div.cell_input", + ".tag_hide-input div.cell_input", + ".tag_hide_output div.cell_output", + ".tag_hide-output div.cell_output", + ".tag_hide_cell.cell", + ".tag_hide-cell.cell", + ] + for selector in to_add: + config.togglebutton_selector += f", {selector}" + + +def save_glue_cache(app: Sphinx, env): + NbGlueDomain.from_env(env).write_cache() + + +class JupyterDownloadRole(ReferenceRole): + def run(self): + reftarget = sphinx_abs_dir(self.env, self.target) + node = download_reference(self.rawtext, reftarget=reftarget) + self.set_source_info(node) + title = self.title if self.has_explicit_title else self.target + node += docnodes.literal( + self.rawtext, title, classes=["xref", "download", "myst-nb"] + ) + return [node], [] + + +class CodeCell(SphinxDirective): + """Raises a warning if it is triggered, it should not make it to the doctree.""" + + optional_arguments = 1 + final_argument_whitespace = True + has_content = True + + def run(self): + LOGGER.warning( + ( + "Found an unexpected `code-cell` directive. " + "Either this file was not converted to a notebook, " + "because Jupytext header content was missing, " + "or the `code-cell` was not converted, because it is nested. " + "See https://myst-nb.readthedocs.io/en/latest/use/markdown.html " + "for more information." + ), + location=(self.env.docname, self.lineno), + ) + return [] diff --git a/myst_nb/nodes.py b/myst_nb/nodes.py index e0700db7..06e645bb 100644 --- a/myst_nb/nodes.py +++ b/myst_nb/nodes.py @@ -5,7 +5,7 @@ from docutils import nodes from nbformat import NotebookNode -from .jsphinx import snippet_template, widget_view_template +from myst_nb.jsphinx import snippet_template, widget_view_template class CellNode(nodes.container): diff --git a/myst_nb/parser.py b/myst_nb/parser.py index 9db1ee0b..e4c371aa 100644 --- a/myst_nb/parser.py +++ b/myst_nb/parser.py @@ -15,12 +15,16 @@ from myst_nb.converter import get_nb_converter from myst_nb.execution import generate_notebook_outputs +from myst_nb.jsphinx import contains_widgets, get_widgets, write_notebook_output from myst_nb.nb_glue import GLUE_PREFIX from myst_nb.nb_glue.domain import NbGlueDomain -from myst_nb.nodes import CellInputNode, CellNode, CellOutputBundleNode, CellOutputNode - -from .jsphinx import contains_widgets, get_widgets, write_notebook_output -from .nodes import JupyterWidgetStateNode +from myst_nb.nodes import ( + CellInputNode, + CellNode, + CellOutputBundleNode, + CellOutputNode, + JupyterWidgetStateNode, +) SPHINX_LOGGER = logging.getLogger(__name__) diff --git a/myst_nb/render_outputs.py b/myst_nb/render_outputs.py index f66dc97e..52319cb8 100644 --- a/myst_nb/render_outputs.py +++ b/myst_nb/render_outputs.py @@ -18,8 +18,8 @@ from sphinx.transforms.post_transforms import SphinxPostTransform from sphinx.util import logging -from .jsphinx import WIDGET_VIEW_MIMETYPE, sphinx_abs_dir, strip_latex_delimiters -from .nodes import CellOutputBundleNode, JupyterWidgetViewNode +from myst_nb.jsphinx import WIDGET_VIEW_MIMETYPE, sphinx_abs_dir, strip_latex_delimiters +from myst_nb.nodes import CellOutputBundleNode, JupyterWidgetViewNode LOGGER = logging.getLogger(__name__) From c07109f6bb89d7ce3a43a28e52d28fb0f3923255 Mon Sep 17 00:00:00 2001 From: Chris Sewell Date: Sun, 2 Jan 2022 03:08:01 +0100 Subject: [PATCH 06/75] updates from myst-parser master branch --- myst_nb/converter.py | 8 ++++---- myst_nb/parser.py | 11 +++++------ myst_nb/render_outputs.py | 5 +++-- setup.cfg | 2 +- tests/nb_fixtures/basic.txt | 4 +++- .../test_execute/test_complex_outputs_unrun_auto.xml | 6 +++--- .../test_execute/test_complex_outputs_unrun_cache.xml | 6 +++--- tests/test_parser/test_complex_outputs.xml | 6 +++--- tests/test_render_outputs/test_complex_outputs.xml | 6 +++--- .../test_complex_outputs_latex.xml | 6 +++--- 10 files changed, 31 insertions(+), 29 deletions(-) diff --git a/myst_nb/converter.py b/myst_nb/converter.py index c9b2ebb2..1ab88f7d 100644 --- a/myst_nb/converter.py +++ b/myst_nb/converter.py @@ -5,7 +5,8 @@ import attr import nbformat as nbf import yaml -from myst_parser.main import MdParserConfig +from markdown_it.renderer import RendererHTML +from myst_parser.main import MdParserConfig, create_md_parser from sphinx.environment import BuildEnvironment from sphinx.util import import_object, logging @@ -229,13 +230,12 @@ def myst_to_notebook( i.e. not nested in other directives. """ # TODO warn about nested code-cells - from myst_parser.main import default_parser # parse markdown file up to the block level (i.e. don't worry about inline text) inline_config = attr.evolve( - config, renderer="html", disable_syntax=(config.disable_syntax + ["inline"]) + config, disable_syntax=(config.disable_syntax + ["inline"]) ) - parser = default_parser(inline_config) + parser = create_md_parser(inline_config, RendererHTML) tokens = parser.parse(text + "\n") lines = text.splitlines() md_start_line = 0 diff --git a/myst_nb/parser.py b/myst_nb/parser.py index e4c371aa..ead64e93 100644 --- a/myst_nb/parser.py +++ b/myst_nb/parser.py @@ -7,7 +7,7 @@ from markdown_it.rules_core import StateCore from markdown_it.token import Token from markdown_it.tree import SyntaxTreeNode -from myst_parser.main import MdParserConfig, default_parser +from myst_parser.main import MdParserConfig, create_md_parser from myst_parser.sphinx_parser import MystParser from myst_parser.sphinx_renderer import SphinxRenderer from sphinx.environment import BuildEnvironment @@ -38,9 +38,9 @@ class NotebookParser(MystParser): config_section = "myst-nb parser" config_section_dependencies = ("parsers",) - def parse( - self, inputstring: str, document: nodes.document, renderer: str = "sphinx" - ) -> None: + def parse(self, inputstring: str, document: nodes.document) -> None: + + # document.settings.smart_quotes = False self.reporter = document.reporter self.env = document.settings.env # type: BuildEnvironment @@ -102,12 +102,11 @@ def nb_to_tokens( """Parse the notebook content to a list of syntax tokens and an env, containing global data like reference definitions. """ - md = default_parser(config) # setup the markdown parser + md = create_md_parser(config, SphinxNBRenderer) # Note we disable front matter parsing, # because this is taken from the actual notebook metadata md.disable("front_matter", ignoreInvalid=True) - md.renderer = SphinxNBRenderer(md) # make a sandbox where all the parsing global data, # like reference definitions will be stored env: Dict[str, Any] = {} diff --git a/myst_nb/render_outputs.py b/myst_nb/render_outputs.py index 52319cb8..53aa71f6 100644 --- a/myst_nb/render_outputs.py +++ b/myst_nb/render_outputs.py @@ -10,7 +10,8 @@ from docutils.parsers.rst import directives from importlib_metadata import entry_points from myst_parser.docutils_renderer import make_document -from myst_parser.main import MdParserConfig, default_parser +from myst_parser.main import MdParserConfig, create_md_parser +from myst_parser.sphinx_renderer import SphinxRenderer from nbformat import NotebookNode from sphinx.environment import BuildEnvironment from sphinx.environment.collectors.asset import ImageCollector @@ -287,7 +288,7 @@ def parse_markdown( self, text: str, parent: Optional[nodes.Node] = None ) -> List[nodes.Node]: """Parse text as CommonMark, in a new document.""" - parser = default_parser(MdParserConfig(commonmark_only=True)) + parser = create_md_parser(MdParserConfig(commonmark_only=True), SphinxRenderer) # setup parent node if parent is None: diff --git a/setup.cfg b/setup.cfg index caa28f4c..c724f781 100644 --- a/setup.cfg +++ b/setup.cfg @@ -42,7 +42,7 @@ install_requires = importlib_metadata ipython jupyter-cache~=0.4.1 - myst-parser~=0.16.1 + myst-parser @ git+git://github.com/executablebooks/MyST-Parser.git@master nbconvert>=5.6,<7 nbformat~=5.0 pyyaml diff --git a/tests/nb_fixtures/basic.txt b/tests/nb_fixtures/basic.txt index e01bfab7..dc35b4fc 100644 --- a/tests/nb_fixtures/basic.txt +++ b/tests/nb_fixtures/basic.txt @@ -49,7 +49,9 @@ cells: language_info - {"pygments_lexer": "mylexer"} + + + {"pygments_lexer": "mylexer"} diff --git a/tests/test_execute/test_complex_outputs_unrun_auto.xml b/tests/test_execute/test_complex_outputs_unrun_auto.xml index 8e835129..b64ca34f 100644 --- a/tests/test_execute/test_complex_outputs_unrun_auto.xml +++ b/tests/test_execute/test_complex_outputs_unrun_auto.xml @@ -19,7 +19,7 @@ Some markdown text. A list: - + something @@ -28,7 +28,7 @@ something else A numbered list - + something @@ -59,7 +59,7 @@ some more text This is an abbreviated section of the document text, which we only want in a presentation - + summary of document text diff --git a/tests/test_execute/test_complex_outputs_unrun_cache.xml b/tests/test_execute/test_complex_outputs_unrun_cache.xml index 17ee2a52..58c571dd 100644 --- a/tests/test_execute/test_complex_outputs_unrun_cache.xml +++ b/tests/test_execute/test_complex_outputs_unrun_cache.xml @@ -19,7 +19,7 @@ Some markdown text. A list: - + something @@ -28,7 +28,7 @@ something else A numbered list - + something @@ -59,7 +59,7 @@ some more text This is an abbreviated section of the document text, which we only want in a presentation - + summary of document text diff --git a/tests/test_parser/test_complex_outputs.xml b/tests/test_parser/test_complex_outputs.xml index 833fcd60..951e4824 100644 --- a/tests/test_parser/test_complex_outputs.xml +++ b/tests/test_parser/test_complex_outputs.xml @@ -19,7 +19,7 @@ Some markdown text. A list: - + something @@ -28,7 +28,7 @@ something else A numbered list - + something @@ -59,7 +59,7 @@ some more text This is an abbreviated section of the document text, which we only want in a presentation - + summary of document text diff --git a/tests/test_render_outputs/test_complex_outputs.xml b/tests/test_render_outputs/test_complex_outputs.xml index 8c48e499..a64174b2 100644 --- a/tests/test_render_outputs/test_complex_outputs.xml +++ b/tests/test_render_outputs/test_complex_outputs.xml @@ -19,7 +19,7 @@ Some markdown text. A list: - + something @@ -28,7 +28,7 @@ something else A numbered list - + something @@ -59,7 +59,7 @@ some more text This is an abbreviated section of the document text, which we only want in a presentation - + summary of document text diff --git a/tests/test_render_outputs/test_complex_outputs_latex.xml b/tests/test_render_outputs/test_complex_outputs_latex.xml index 5f81f3a4..0be1d57d 100644 --- a/tests/test_render_outputs/test_complex_outputs_latex.xml +++ b/tests/test_render_outputs/test_complex_outputs_latex.xml @@ -19,7 +19,7 @@ Some markdown text. A list: - + something @@ -28,7 +28,7 @@ something else A numbered list - + something @@ -59,7 +59,7 @@ some more text This is an abbreviated section of the document text, which we only want in a presentation - + summary of document text From b33ea1eb1320b5ae36b900c7de6eb99a978e6102 Mon Sep 17 00:00:00 2001 From: Chris Sewell Date: Sun, 2 Jan 2022 03:09:53 +0100 Subject: [PATCH 07/75] Module ansi_lexer -> lexer, and add entry points --- myst_nb/extension.py | 2 +- myst_nb/{ansi_lexer.py => lexers.py} | 13 +++++++++---- setup.cfg | 5 +++-- tests/test_ansi_lexer.py | 6 +++--- 4 files changed, 16 insertions(+), 10 deletions(-) rename myst_nb/{ansi_lexer.py => lexers.py} (94%) diff --git a/myst_nb/extension.py b/myst_nb/extension.py index 8886c44c..18abefe9 100644 --- a/myst_nb/extension.py +++ b/myst_nb/extension.py @@ -17,7 +17,6 @@ from sphinx.util.docutils import ReferenceRole, SphinxDirective from myst_nb import __version__ -from myst_nb.ansi_lexer import AnsiColorLexer from myst_nb.exec_table import setup_exec_table from myst_nb.execution import update_execution_cache from myst_nb.jsphinx import ( @@ -26,6 +25,7 @@ REQUIRE_URL_DEFAULT, sphinx_abs_dir, ) +from myst_nb.lexers import AnsiColorLexer from myst_nb.nb_glue import glue # noqa: F401 from myst_nb.nb_glue.domain import ( NbGlueDomain, diff --git a/myst_nb/ansi_lexer.py b/myst_nb/lexers.py similarity index 94% rename from myst_nb/ansi_lexer.py rename to myst_nb/lexers.py index 57ad76a0..ea2c2bf3 100644 --- a/myst_nb/ansi_lexer.py +++ b/myst_nb/lexers.py @@ -1,13 +1,13 @@ # -*- coding: utf-8 -*- -"""Pygments lexer for text containing ANSI color codes. - -Adapted from https://github.com/chriskuehl/pygments-ansi-color -""" +"""Pygments lexers""" import re import pygments.lexer import pygments.token +# this is not added as an entry point in ipython, so we add it in this package +from IPython.lib.lexers import IPythonTracebackLexer # noqa: F401 + _ansi_code_to_color = { 0: "Black", 1: "Red", @@ -50,6 +50,11 @@ def _token_from_lexer_state(bold, faint, fg_color, bg_color): class AnsiColorLexer(pygments.lexer.RegexLexer): + """Pygments lexer for text containing ANSI color codes. + + Adapted from https://github.com/chriskuehl/pygments-ansi-color + """ + name = "ANSI Color" aliases = ("myst-ansi",) flags = re.DOTALL | re.MULTILINE diff --git a/setup.cfg b/setup.cfg index c724f781..8378f682 100644 --- a/setup.cfg +++ b/setup.cfg @@ -60,8 +60,9 @@ exclude = myst_nb.mime_render = default = myst_nb.render_outputs:CellOutputRenderer inline = myst_nb.render_outputs:CellOutputRendererInline -# pygments.lexers = -# myst_ansi = myst_nb.ansi_lexer:AnsiColorLexer +pygments.lexers = + myst-ansi = myst_nb.lexers:AnsiColorLexer + ipythontb = myst_nb.lexers:IPythonTracebackLexer [options.extras_require] code_style = diff --git a/tests/test_ansi_lexer.py b/tests/test_ansi_lexer.py index 9b2af460..838e984d 100644 --- a/tests/test_ansi_lexer.py +++ b/tests/test_ansi_lexer.py @@ -1,7 +1,7 @@ import pytest from pygments.token import Text, Token -from myst_nb import ansi_lexer +from myst_nb import lexers @pytest.mark.parametrize( @@ -15,12 +15,12 @@ ), ) def test_token_from_lexer_state(bold, faint, fg_color, bg_color, expected): - ret = ansi_lexer._token_from_lexer_state(bold, faint, fg_color, bg_color) + ret = lexers._token_from_lexer_state(bold, faint, fg_color, bg_color) assert ret == expected def _highlight(text): - return tuple(ansi_lexer.AnsiColorLexer().get_tokens(text)) + return tuple(lexers.AnsiColorLexer().get_tokens(text)) def test_plain_text(): From 02c34947b3e72bfb1a6b148faeec3650b3140863 Mon Sep 17 00:00:00 2001 From: Chris Sewell Date: Sun, 2 Jan 2022 03:10:54 +0100 Subject: [PATCH 08/75] Initial implementation of docutils parser --- myst_nb/configuration.py | 206 +++++++++ myst_nb/docutils_.py | 920 +++++++++++++++++++++++++++++++++++++++ setup.cfg | 9 + 3 files changed, 1135 insertions(+) create mode 100644 myst_nb/configuration.py create mode 100644 myst_nb/docutils_.py diff --git a/myst_nb/configuration.py b/myst_nb/configuration.py new file mode 100644 index 00000000..8fa291a3 --- /dev/null +++ b/myst_nb/configuration.py @@ -0,0 +1,206 @@ +"""Configuration for myst-nb.""" +from typing import Any, Dict, Iterable, Sequence, Tuple + +import attr +from attr.validators import deep_iterable, in_, instance_of, optional +from typing_extensions import Literal + + +def custom_formats_converter(value: dict) -> dict: + """Convert the custom format dict.""" + if not isinstance(value, dict): + raise TypeError(f"`nb_custom_formats` must be a dict: {value}") + output = {} + for suffix, reader in value.items(): + if not isinstance(suffix, str): + raise TypeError(f"`nb_custom_formats` keys must be a string: {suffix}") + if isinstance(reader, str): + output[suffix] = (reader, {}, False) + elif not isinstance(reader, Sequence): + raise TypeError( + f"`nb_custom_formats` values must be a string or sequence: {reader}" + ) + elif len(reader) == 2: + output[suffix] = (reader[0], reader[1], False) + elif len(reader) == 3: + output[suffix] = (reader[0], reader[1], reader[2]) + else: + raise TypeError( + f"`nb_custom_formats` values must be a string, of sequence of length " + f"2 or 3: {reader}" + ) + if not isinstance(output[suffix][0], str): + raise TypeError( + f"`nb_custom_formats` values[0] must be a string: {output[suffix][0]}" + ) + if not isinstance(output[suffix][1], dict): + raise TypeError( + f"`nb_custom_formats` values[1] must be a dict: {output[suffix][1]}" + ) + if not isinstance(output[suffix][2], bool): + raise TypeError( + f"`nb_custom_formats` values[2] must be a bool: {output[suffix][2]}" + ) + return output + + +@attr.s() +class NbParserConfig: + """Global configuration options for the MyST-NB parser. + + Note: in the sphinx configuration these option names are prepended with ``nb_`` + """ + + # TODO: nb_render_key, execution_show_tb, + # execution_excludepatterns, jupyter_cache + # jupyter_sphinx_require_url, jupyter_sphinx_embed_url + + # TODO handle old names; put in metadata, then auto generate warnings + + # file read options + + custom_formats: Dict[str, Tuple[str, dict, bool]] = attr.ib( + factory=dict, + converter=custom_formats_converter, + # TODO check can be loaded from string? + metadata={"help": "Custom formats for reading notebook; suffix -> reader"}, + ) + + # notebook execution options + + execution_mode: Literal["off", "force", "cache"] = attr.ib( + default="off", # TODO different default for docutils (off) and sphinx (cache)? + validator=in_( + [ + "off", + "force", + "cache", + ] + ), + metadata={"help": "Execution mode for notebooks"}, + ) + execution_cache_path: str = attr.ib( + default="", + validator=instance_of(str), + metadata={"help": "Path to folder for caching notebooks"}, + ) + execution_timeout: int = attr.ib( + default=30, + validator=instance_of(int), + metadata={"help": "Execution timeout (seconds)"}, + ) + execution_in_temp: bool = attr.ib( + default=False, + validator=instance_of(bool), + metadata={ + "help": "Use a temporary folder for the execution current working directory" + }, + ) + execution_allow_errors: bool = attr.ib( + default=False, + validator=instance_of(bool), + metadata={"help": "Allow errors during execution"}, + ) + + # render options + + output_folder: str = attr.ib( + default="build", + validator=instance_of(str), + metadata={ + "help": "Output folder for external outputs", + "docutils_only": True, # in sphinx we output to the build folder + }, + ) + remove_code_source: bool = attr.ib( + default=False, + validator=instance_of(bool), + metadata={"help": "Remove code cell source"}, + ) + remove_code_outputs: bool = attr.ib( + default=False, + validator=instance_of(bool), + metadata={"help": "Remove code cell outputs"}, + ) + number_source_lines: bool = attr.ib( + default=False, + validator=instance_of(bool), + metadata={"help": "Number code cell source lines"}, + ) + merge_streams: bool = attr.ib( + default=False, + validator=instance_of(bool), + metadata={"help": "Merge stdout/stderr execution output streams"}, + ) + output_stderr: Literal[ + "show", "remove", "remove-warn", "warn", "error", "severe" + ] = attr.ib( + default="show", + validator=in_( + [ + "show", + "remove", + "remove-warn", + "warn", + "error", + "severe", + ] + ), + metadata={"help": "Behaviour for stderr output"}, + ) + embed_markdown_outputs: bool = attr.ib( + default=False, + validator=instance_of(bool), + metadata={"help": "Embed markdown outputs"}, # TODO better help text + ) + # TODO this would be for docutils but not for sphinx + render_priority: Iterable[str] = attr.ib( + default=( + "application/vnd.jupyter.widget-view+json", + "application/javascript", + "text/html", + "image/svg+xml", + "image/png", + "image/jpeg", + "text/markdown", + "text/latex", + "text/plain", + ), + validator=deep_iterable(instance_of(str)), + metadata={"help": "Render priority for mime types"}, + ) + render_text_lexer: str = attr.ib( + default="myst-ansi", + # TODO allow None -> "none"? + validator=optional(instance_of(str)), # TODO check it can be loaded? + metadata={ + "help": "Pygments lexer applied to stdout/stderr and text/plain outputs" + }, + ) + render_error_lexer: str = attr.ib( + default="ipythontb", + # TODO allow None -> "none"? + validator=optional(instance_of(str)), # TODO check it can be loaded? + metadata={"help": "Pygments lexer applied to error/traceback outputs"}, + ) + render_plugin: str = attr.ib( + default="default", + validator=instance_of(str), # TODO check it can be loaded? + metadata={ + "help": "The entry point for the execution output render class " + "(in group `myst_nb.output_renderer`)" + }, + ) + + @classmethod + def get_fields(cls) -> Tuple[attr.Attribute, ...]: + return attr.fields(cls) + + def as_dict(self, dict_factory=dict) -> dict: + return attr.asdict(self, dict_factory=dict_factory) + + def as_triple(self) -> Iterable[Tuple[str, Any, attr.Attribute]]: + """Yield triples of (name, value, field).""" + fields = attr.fields_dict(self.__class__) + for name, value in attr.asdict(self).items(): + yield name, value, fields[name] diff --git a/myst_nb/docutils_.py b/myst_nb/docutils_.py new file mode 100644 index 00000000..3bb7db5c --- /dev/null +++ b/myst_nb/docutils_.py @@ -0,0 +1,920 @@ +"""A parser for docutils.""" +import hashlib +import json +import logging +import os +import re +from binascii import a2b_base64 +from contextlib import nullcontext +from functools import lru_cache +from mimetypes import guess_extension +from pathlib import Path +from tempfile import TemporaryDirectory +from typing import Any, Dict, List, Optional, Tuple, Union + +from docutils import nodes +from docutils.core import default_description, publish_cmdline +from importlib_metadata import entry_points +from jupyter_cache import get_cache +from jupyter_cache.executors import load_executor +from jupyter_cache.executors.utils import single_nb_execution +from markdown_it.main import MarkdownIt +from markdown_it.rules_core import StateCore +from markdown_it.token import Token +from markdown_it.tree import SyntaxTreeNode +from myst_parser.docutils_ import DOCUTILS_EXCLUDED_ARGS as DOCUTILS_EXCLUDED_ARGS_MYST +from myst_parser.docutils_ import Parser as MystParser +from myst_parser.docutils_ import create_myst_config, create_myst_settings_spec +from myst_parser.docutils_renderer import DocutilsRenderer, token_line +from myst_parser.main import MdParserConfig, create_md_parser +from nbformat import NotebookNode +from nbformat import reads as read_nb +from typing_extensions import Literal + +from myst_nb.configuration import NbParserConfig +from myst_nb.render_outputs import coalesce_streams + +NOTEBOOK_VERSION = 4 +WIDGET_STATE_MIMETYPE = "application/vnd.jupyter.widget-state+json" +WIDGET_VIEW_MIMETYPE = "application/vnd.jupyter.widget-view+json" +_ANSI_RE = re.compile("\x1b\\[(.*?)([@-~])") + + +DOCUTILS_EXCLUDED_ARGS = { + # docutils.conf can't represent dicts + # TODO can we make this work? + "custom_formats", +} + + +# mapping of standard logger level names to their docutils equivalent +_LOGNAME_TO_DOCUTILS_LEVEL = { + "DEBUG": 0, + "INFO": 1, + "WARN": 2, + "WARNING": 2, + "ERROR": 3, + "CRITICAL": 4, + "FATAL": 4, +} + + +class DocutilsFormatter(logging.Formatter): + """A formatter that formats log messages for docutils.""" + + def __init__(self, source: str): + """Initialize a new formatter.""" + self._source = source + super().__init__() + + def format(self, record: logging.LogRecord) -> str: + """Format a log record for docutils.""" + levelname = record.levelname.upper() + level = _LOGNAME_TO_DOCUTILS_LEVEL.get(levelname, 0) + node = nodes.system_message( + record.msg, source=self._source, type=levelname, level=level + ) + return node.astext() + + +class DocutilsLogHandler(logging.Handler): + """Bridge from a Python logger to a docutils reporter.""" + + def __init__(self, document: nodes.document) -> None: + """Initialize a new handler.""" + super().__init__() + self._document = document + reporter = self._document.reporter + self._name_to_level = { + "DEBUG": reporter.DEBUG_LEVEL, + "INFO": reporter.INFO_LEVEL, + "WARN": reporter.WARNING_LEVEL, + "WARNING": reporter.WARNING_LEVEL, + "ERROR": reporter.ERROR_LEVEL, + "CRITICAL": reporter.SEVERE_LEVEL, + "FATAL": reporter.SEVERE_LEVEL, + } + + def emit(self, record: logging.LogRecord) -> None: + """Handle a log record.""" + levelname = record.levelname.upper() + level = self._name_to_level.get(levelname, self._document.reporter.DEBUG_LEVEL) + self._document.reporter.system_message(level, record.msg) + + +class Parser(MystParser): + """Docutils parser for Jupyter Notebooks, containing MyST Markdown.""" + + supported: Tuple[str, ...] = ("mystnb", "ipynb") + """Aliases this parser supports.""" + + settings_spec = ( + "MyST-NB options", + None, + create_myst_settings_spec(DOCUTILS_EXCLUDED_ARGS, NbParserConfig, "nb_"), + *MystParser.settings_spec, + ) + """Runtime settings specification.""" + + config_section = "myst-nb parser" + + @staticmethod + def get_logger(document: nodes.document) -> logging.Logger: + """Get or create a logger for a docutils document.""" + logger = logging.getLogger(document["source"]) + logger.setLevel(logging.DEBUG) + if not logger.handlers: + logger.addHandler(DocutilsLogHandler(document)) + return logger + + def parse(self, inputstring: str, document: nodes.document) -> None: + """Parse source text. + + :param inputstring: The source string to parse + :param document: The root docutils node to add AST elements to + """ + # create a logger for this document + logger = self.get_logger(document) + + # get markdown parsing configuration + try: + md_config = create_myst_config( + document.settings, DOCUTILS_EXCLUDED_ARGS_MYST + ) + except (TypeError, ValueError) as error: + logger.error(f"myst configuration invalid: {error.args[0]}") + md_config = MdParserConfig() + + # get notebook rendering configuration + try: + nb_config = create_myst_config( + document.settings, DOCUTILS_EXCLUDED_ARGS, NbParserConfig, "nb_" + ) + except (TypeError, ValueError) as error: + logger.error(f"myst-nb configuration invalid: {error.args[0]}") + nb_config = NbParserConfig() + + # convert inputstring to notebook + # TODO handle converters + notebook: NotebookNode = read_nb(inputstring, as_version=NOTEBOOK_VERSION) + + # execute notebook if necessary + # TODO also look at notebook metadata + if nb_config.execution_mode == "force": + path = str(Path(document["source"]).parent) + cwd_context = ( + TemporaryDirectory() + if nb_config.execution_in_temp + else nullcontext(path) + ) + with cwd_context as cwd: + cwd = os.path.abspath(cwd) + logger.info(f"Executing notebook in {cwd}") + result = single_nb_execution( + notebook, + cwd=cwd, + allow_errors=nb_config.execution_allow_errors, + timeout=nb_config.execution_timeout, + ) + logger.info(f"Executed notebook in {result.time:.2f} seconds") + # TODO save execution data on document (and environment if sphinx) + # TODO handle errors + elif nb_config.execution_mode == "cache": + # TODO for sphinx, the default would be in the output directory + cache = get_cache(nb_config.execution_cache_path or ".cache") + stage_record = cache.stage_notebook_file(document["source"]) + # TODO handle converters + if cache.get_cache_record_of_staged(stage_record.pk) is None: + executor = load_executor("basic", cache, logger=logger) + executor.run_and_cache( + filter_pks=[stage_record.pk], + allow_errors=nb_config.execution_allow_errors, + timeout=nb_config.execution_timeout, + run_in_temp=nb_config.execution_in_temp, + ) + else: + logger.info("Using cached notebook outputs") + # TODO handle errors + _, notebook = cache.merge_match_into_notebook(notebook) + + # TODO write executed notebook to output folder + # always for sphinx, but maybe docutils option on whether to do this? + # only on successful parse? + + # Setup parser + mdit_parser = create_md_parser(md_config, DocutilsNbRenderer) + mdit_parser.options["document"] = document + mdit_parser.options["notebook"] = notebook + mdit_parser.options["nb_config"] = nb_config.as_dict() + mdit_env: Dict[str, Any] = {} + # parse to tokens + mdit_tokens = notebook_to_tokens(notebook, mdit_parser, mdit_env) + # convert to docutils AST, which is added to the document + mdit_parser.renderer.render(mdit_tokens, mdit_parser.options, mdit_env) + + +def notebook_to_tokens( + notebook: NotebookNode, mdit_parser: MarkdownIt, mdit_env: Dict[str, Any] +) -> List[Token]: + # disable front-matter, since this is taken from the notebook + mdit_parser.disable("front_matter", ignoreInvalid=True) + # this stores global state, such as reference definitions + + # Parse block tokens only first, leaving inline parsing to a second phase + # (required to collect all reference definitions, before assessing references). + metadata = dict(notebook.metadata.items()) + # save these keys on the document, rather than as docinfo + spec_data = { + key: metadata.pop(key, None) for key in ("kernelspec", "language_info") + } + + # get language lexer name + langinfo = spec_data.get("language_info", {}) + lexer = langinfo.get("pygments_lexer", langinfo.get("name", None)) + if lexer is None: + lexer = spec_data.get("kernelspec", {}).get("language", None) + # TODO warning if no lexer + + # extract widgets + widgets = metadata.pop("widgets", None) + block_tokens = [ + Token("nb_spec_data", "", 0, meta=spec_data), + Token( + "front_matter", + "", + 0, + map=[0, 0], + content=metadata, # type: ignore[arg-type] + ), + ] + for cell_index, nb_cell in enumerate(notebook.cells): + + # skip empty cells + if len(nb_cell["source"].strip()) == 0: + continue + + # skip cells tagged for removal + # TODO make configurable + tags = nb_cell.metadata.get("tags", []) + if ("remove_cell" in tags) or ("remove-cell" in tags): + continue + + # generate tokens + tokens: List[Token] + if nb_cell["cell_type"] == "markdown": + # TODO if cell has tag output-caption, then use as caption for next/preceding cell? + tokens = [ + Token( + "nb_cell_markdown_open", + "", + 1, + hidden=True, + meta={ + "index": cell_index, + "metadata": dict(nb_cell["metadata"].items()), + }, + map=[0, len(nb_cell["source"].splitlines()) - 1], + ), + ] + with mdit_parser.reset_rules(): + # enable only rules up to block + rules = mdit_parser.core.ruler.get_active_rules() + mdit_parser.core.ruler.enableOnly(rules[: rules.index("inline")]) + tokens.extend(mdit_parser.parse(nb_cell["source"], mdit_env)) + tokens.append( + Token( + "nb_cell_markdown_close", + "", + -1, + hidden=True, + ), + ) + elif nb_cell["cell_type"] == "raw": + tokens = [ + Token( + "nb_cell_raw", + "code", + 0, + content=nb_cell["source"], + meta={ + "index": cell_index, + "metadata": dict(nb_cell["metadata"].items()), + }, + map=[0, 0], + ) + ] + elif nb_cell["cell_type"] == "code": + # we don't copy the outputs here, since this would + # greatly increase the memory consumption, + # instead they will referenced by the cell index + tokens = [ + Token( + "nb_cell_code", + "code", + 0, + content=nb_cell["source"], + meta={ + "index": cell_index, + "execution_count": nb_cell.get("execution_count", None), + "lexer": lexer, + # TODO add notebook node to dict function and apply here etc + "metadata": dict(nb_cell["metadata"].items()), + }, + map=[0, 0], + ) + ] + else: + pass # TODO create warning + + # update token's source lines, using either a source_map (index -> line), + # set when converting to a notebook, or a pseudo base of the cell index + smap = notebook.metadata.get("source_map", None) + start_line = smap[cell_index] if smap else (cell_index + 1) * 10000 + start_line += 1 # use base 1 rather than 0 + for token in tokens: + if token.map: + token.map = [start_line + token.map[0], start_line + token.map[1]] + # also update the source lines for duplicate references + for dup_ref in mdit_env.get("duplicate_refs", []): + if "fixed" not in dup_ref: + dup_ref["map"] = [ + start_line + dup_ref["map"][0], + start_line + dup_ref["map"][1], + ] + dup_ref["fixed"] = True + + # add tokens to list + block_tokens.extend(tokens) + + # The widget state will be embedded as a script, at the end of HTML output + widget_state = (widgets or {}).get(WIDGET_STATE_MIMETYPE, None) + if widget_state and widget_state.get("state", None): + block_tokens.append( + Token( + "nb_widget_state", + "script", + 0, + attrs={"type": WIDGET_STATE_MIMETYPE}, + meta={"state": widget_state}, + map=[0, 0], + ) + ) + + # Now all definitions have been gathered, run the inline parsing phase + state = StateCore("", mdit_parser, mdit_env, block_tokens) + with mdit_parser.reset_rules(): + rules = mdit_parser.core.ruler.get_active_rules() + mdit_parser.core.ruler.enableOnly(rules[rules.index("inline") :]) + mdit_parser.core.process(state) + + return state.tokens + + +@lru_cache(maxsize=10) +def load_renderer(name: str) -> "NbOutputRenderer": + """Load a renderer, + given a name within the ``myst_nb.output_renderer`` entry point group + """ + all_eps = entry_points() + if hasattr(all_eps, "select"): + # importlib_metadata >= 3.6 or importlib.metadata in python >=3.10 + eps = all_eps.select(group="myst_nb.output_renderer", name=name) + found = name in eps.names + else: + eps = {ep.name: ep for ep in all_eps.get("myst_nb.output_renderer", [])} + found = name in eps + if found: + klass = eps[name].load() + if not issubclass(klass, NbOutputRenderer): + raise Exception( + f"Entry Point for myst_nb.output_renderer:{name} " + f"is not a subclass of `NbOutputRenderer`: {klass}" + ) + return klass + + raise Exception(f"No Entry Point found for myst_nb.output_renderer:{name}") + + +def strip_ansi(text: str) -> str: + """Strip ANSI escape sequences from a string""" + return _ANSI_RE.sub("", text) + + +def strip_latex_delimiters(source): + r"""Remove LaTeX math delimiters that would be rendered by the math block. + + These are: ``\(…\)``, ``\[…\]``, ``$…$``, and ``$$…$$``. + This is necessary because sphinx does not have a dedicated role for + generic LaTeX, while Jupyter only defines generic LaTeX output, see + https://github.com/jupyter/jupyter-sphinx/issues/90 for discussion. + """ + source = source.strip() + delimiter_pairs = (pair.split() for pair in r"\( \),\[ \],$$ $$,$ $".split(",")) + for start, end in delimiter_pairs: + if source.startswith(start) and source.endswith(end): + return source[len(start) : -len(end)] + + return source + + +class DocutilsNbRenderer(DocutilsRenderer): + """ "A docutils-only renderer for Jupyter Notebooks.""" + + # TODO upstream + def add_line_and_source_path_r( + self, nodes: List[nodes.Node], token: SyntaxTreeNode + ) -> None: + """Add the source and line recursively to all nodes.""" + for node in nodes: + self.add_line_and_source_path(node, token) + for child in node.traverse(): + self.add_line_and_source_path(child, token) + + # TODO maybe move more things to NbOutputRenderer? + # and change name to e.g. NbElementRenderer + + def get_nb_config(self, key: str, cell_index: int) -> Any: + # TODO selection between config/notebook/cell level + # TODO handle KeyError better + return self.config["nb_config"][key] + + def render_nb_spec_data(self, token: SyntaxTreeNode) -> None: + """Add a notebook spec data to the document attributes.""" + self.document["nb_kernelspec"] = token.meta["kernelspec"] + self.document["nb_language_info"] = token.meta["language_info"] + + def render_nb_cell_markdown(self, token: SyntaxTreeNode) -> None: + """Render a notebook markdown cell.""" + # TODO this is currently just a "pass-through", but we could utilise the metadata + # it would be nice to "wrap" this in a container that included the metadata, + # but unfortunately this would break the heading structure of docutils/sphinx. + # perhaps we add an "invisible" (non-rendered) marker node to the document tree, + self.render_children(token) + + def render_nb_cell_raw(self, token: SyntaxTreeNode) -> None: + """Render a notebook raw cell.""" + # TODO + + def render_nb_cell_code(self, token: SyntaxTreeNode) -> None: + """Render a notebook code cell.""" + cell_index = token.meta["index"] + exec_count = token.meta["execution_count"] + tags = token.meta["metadata"].get("tags", []) + # create a container for all the output + classes = ["cell"] + for tag in tags: + classes.append(f"tag_{tag.replace(' ', '_')}") + cell_container = nodes.container( + nb_type="cell_code", # TODO maybe nb_cell="code"/"markdown"/"raw" + cell_index=cell_index, + # TODO some way to use this to output cell indexes in HTML? + exec_count=exec_count, + cell_metadata=token.meta["metadata"], + classes=classes, + ) + self.add_line_and_source_path(cell_container, token) + with self.current_node_context(cell_container, append=True): + + # TODO do we need this -/_ duplication of tag names, or can deprecate one? + # TODO it would be nice if remove_input/remove_output were also config + + # render the code source code + if ( + (not self.get_nb_config("remove_code_source", cell_index)) + and ("remove_input" not in tags) + and ("remove-input" not in tags) + ): + cell_input = nodes.container( + nb_type="cell_code_source", classes=["cell_input"] + ) + self.add_line_and_source_path(cell_input, token) + with self.current_node_context(cell_input, append=True): + self.render_nb_cell_code_source(token) + # render the execution output, if any + has_outputs = self.config["notebook"]["cells"][cell_index].get( + "outputs", [] + ) + if ( + has_outputs + and (not self.get_nb_config("remove_code_outputs", cell_index)) + and ("remove_output" not in tags) + and ("remove-output" not in tags) + ): + cell_output = nodes.container( + nb_type="cell_code_output", classes=["cell_output"] + ) + self.add_line_and_source_path(cell_output, token) + with self.current_node_context(cell_output, append=True): + self.render_nb_cell_code_outputs(token) + + def render_nb_cell_code_source(self, token: SyntaxTreeNode) -> None: + """Render a notebook code cell's source.""" + cell_index = token.meta["index"] + lexer = token.meta.get("lexer", None) + node = self.create_highlighted_code_block( + token.content, + lexer, + number_lines=self.get_nb_config("number_source_lines", cell_index), + source=self.document["source"], + line=token_line(token), + ) + self.add_line_and_source_path(node, token) + self.current_node.append(node) + + def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: + """Render a notebook code cell's outputs.""" + cell_index = token.meta["index"] + line = token_line(token) + # metadata = token.meta["metadata"] + outputs: List[NotebookNode] = self.config["notebook"]["cells"][cell_index].get( + "outputs", [] + ) + if self.get_nb_config("merge_streams", cell_index): + outputs = coalesce_streams(outputs) + render_priority = self.get_nb_config("render_priority", cell_index) + renderer_name = self.get_nb_config("render_plugin", cell_index) + # get folder path for external outputs (like images) + # TODO for sphinx we use a set output folder + output_folder = self.get_nb_config("output_folder", cell_index) + # load renderer class from name + renderer: NbOutputRenderer = load_renderer(renderer_name)(self, output_folder) + for output in outputs: + if output.output_type == "stream": + if output.name == "stdout": + _nodes = renderer.render_stdout(output, cell_index, line) + self.add_line_and_source_path_r(_nodes, token) + self.current_node.extend(_nodes) + elif output.name == "stderr": + _nodes = renderer.render_stderr(output, cell_index, line) + self.add_line_and_source_path_r(_nodes, token) + self.current_node.extend(_nodes) + else: + pass # TODO warning + elif output.output_type == "error": + _nodes = renderer.render_error(output, cell_index, line) + self.add_line_and_source_path_r(_nodes, token) + self.current_node.extend(_nodes) + elif output.output_type in ("display_data", "execute_result"): + # TODO how to handle figures and other means of wrapping an output: + # TODO unwrapped Markdown (so you can output headers) + # maybe in a transform, we grab the containers and move them + # "below" the code cell container? + # if embed_markdown_outputs is True, + # this should be top priority and we "mark" the container for the transform + try: + mime_type = next(x for x in render_priority if x in output["data"]) + except StopIteration: + self.create_warning( + "No output mime type found from render_priority", + line=line, + append_to=self.current_node, + subtype="nb_mime_type", + ) + else: + container = nodes.container(mime_type=mime_type) + with self.current_node_context(container, append=True): + _nodes = renderer.render_mime_type( + mime_type, output["data"][mime_type], cell_index, line + ) + self.add_line_and_source_path_r(_nodes, token) + self.current_node.extend(_nodes) + else: + self.create_warning( + f"Unsupported output type: {output.output_type}", + line=line, + append_to=self.current_node, + subtype="nb_output_type", + ) + + def render_nb_widget_state(self, token: SyntaxTreeNode) -> None: + """Render the HTML defining the ipywidget state.""" + # The JSON inside the script tag is identified and parsed by: + # https://github.com/jupyter-widgets/ipywidgets/blob/32f59acbc63c3ff0acf6afa86399cb563d3a9a86/packages/html-manager/src/libembed.ts#L36 + # TODO we also need to load JS URLs if widgets are present and HTML + html = ( + f'" + ) + node = nodes.raw("", html, format="html", nb_type="widget_state") + self.add_line_and_source_path(node, token) + # always append to bottom of the document + self.document.append(node) + + +def sanitize_script_content(content: str) -> str: + """Sanitize the content of a ``", r"<\/script>") + + +class NbOutputRenderer: + """A class for rendering notebook outputs.""" + + def __init__(self, renderer: DocutilsNbRenderer, output_folder: str) -> None: + """Initialize the renderer. + + :params output_folder: the folder path for external outputs (like images) + """ + self._renderer = renderer + self._output_folder = output_folder + + @property + def renderer(self) -> DocutilsNbRenderer: + """The renderer this output renderer is associated with.""" + return self._renderer + + def write_file( + self, path: List[str], content: bytes, overwrite=False, exists_ok=False + ) -> Path: + """Write a file to the external output folder. + + :param path: the path to write the file to, relative to the output folder + :param content: the content to write to the file + :param overwrite: whether to overwrite an existing file + :param exists_ok: whether to ignore an existing file if overwrite is False + """ + folder = Path(self._output_folder) + filepath = folder.joinpath(*path) + if filepath.exists(): + if overwrite: + filepath.write_bytes(content) + elif not exists_ok: + # TODO raise or just report? + raise FileExistsError(f"File already exists: {filepath}") + else: + filepath.parent.mkdir(parents=True, exist_ok=True) + filepath.write_bytes(content) + + return filepath + + @property + def source(self): + """The source of the notebook.""" + return self.renderer.document["source"] + + def report( + self, level: Literal["warning", "error", "severe"], message: str, line: int + ) -> nodes.system_message: + """Report an issue.""" + # TODO add cell index to message + # TODO handle for sphinx (including type/subtype) + reporter = self.renderer.document.reporter + levels = { + "warning": reporter.WARNING_LEVEL, + "error": reporter.ERROR_LEVEL, + "severe": reporter.SEVERE_LEVEL, + } + return reporter.system_message( + levels.get(level, reporter.WARNING_LEVEL), message, line=line + ) + + def get_cell_metadata(self, cell_index: int) -> NotebookNode: + # TODO handle key/index error + return self._renderer.config["notebook"]["cells"][cell_index]["metadata"] + + # TODO add support for specifying inline types (for glue etc) + + def render_stdout( + self, output: NotebookNode, cell_index: int, source_line: int + ) -> List[nodes.Element]: + """Render a notebook stdout output.""" + metadata = self.get_cell_metadata(cell_index) + if "remove-stdout" in metadata.get("tags", []): + return [] + lexer = self.renderer.get_nb_config("render_text_lexer", cell_index) + node = self.renderer.create_highlighted_code_block( + output["text"], lexer, source=self.source, line=source_line + ) + node["classes"] += ["output", "stream"] + return [node] + + def render_stderr( + self, output: NotebookNode, cell_index: int, source_line: int + ) -> List[nodes.Element]: + """Render a notebook stderr output.""" + metadata = self.get_cell_metadata(cell_index) + if "remove-stdout" in metadata.get("tags", []): + return [] + output_stderr = self.renderer.get_nb_config("output_stderr", cell_index) + msg = "output render: stderr was found in the cell outputs" + outputs = [] + if output_stderr == "remove": + return [] + elif output_stderr == "remove-warn": + return [self.report("warning", msg, line=source_line)] + elif output_stderr == "warn": + outputs.append(self.report("warning", msg, line=source_line)) + elif output_stderr == "error": + outputs.append(self.report("error", msg, line=source_line)) + elif output_stderr == "severe": + outputs.append(self.report("severe", msg, line=source_line)) + lexer = self.renderer.get_nb_config("render_text_lexer", cell_index) + node = self.renderer.create_highlighted_code_block( + output["text"], lexer, source=self.source, line=source_line + ) + node["classes"] += ["output", "stderr"] + outputs.append(node) + return outputs + + def render_error( + self, output: NotebookNode, cell_index: int, source_line: int + ) -> List[nodes.Element]: + """Render a notebook error output.""" + traceback = strip_ansi("\n".join(output["traceback"])) + lexer = self.renderer.get_nb_config("render_error_lexer", cell_index) + node = self.renderer.create_highlighted_code_block( + traceback, lexer, source=self.source, line=source_line + ) + node["classes"] += ["output", "traceback"] + return [node] + + def render_mime_type( + self, mime_type: str, data: Union[str, bytes], cell_index: int, source_line: int + ) -> List[nodes.Element]: + """Render a notebook mime output.""" + if mime_type == "text/plain": + return self.render_text_plain(data, cell_index, source_line) + if mime_type in {"image/png", "image/jpeg", "application/pdf"}: + # TODO move a2b_base64 to method? (but need to handle render_svg) + return self.render_image( + mime_type, a2b_base64(data), cell_index, source_line + ) + if mime_type == "image/svg+xml": + return self.render_svg(data, cell_index, source_line) + if mime_type == "text/html": + return self.render_text_html(data, cell_index, source_line) + if mime_type == "text/latex": + return self.render_text_latex(data, cell_index, source_line) + if mime_type == "application/javascript": + return self.render_javascript(data, cell_index, source_line) + if mime_type == WIDGET_VIEW_MIMETYPE: + return self.render_widget_view(data, cell_index, source_line) + if mime_type == "text/markdown": + return self.render_markdown(data, cell_index, source_line) + + return self.render_unknown(mime_type, data, cell_index, source_line) + + def render_unknown( + self, mime_type: str, data: Union[str, bytes], cell_index: int, source_line: int + ) -> List[nodes.Element]: + """Render a notebook output of unknown mime type.""" + return self.report( + "warning", + f"skipping unknown output mime type: {mime_type}", + line=source_line, + ) + + def render_markdown( + self, data: str, cell_index: int, source_line: int + ) -> List[nodes.Element]: + """Render a notebook text/markdown output.""" + # create a container to parse the markdown into + temp_container = nodes.container() + + # setup temporary renderer config + md = self.renderer.md + match_titles = self.renderer.md_env.get("match_titles", None) + if self.renderer.get_nb_config("embed_markdown_outputs", cell_index): + # this configuration is used in conjunction with a transform, + # which move this content outside & below the output container + # in this way the Markdown output can contain headings, + # and not break the structure of the docutils AST + # TODO create transform and for sphinx prioritise this output for all output formats + self.renderer.md_env["match_titles"] = True + else: + # otherwise we render as simple Markdown and heading are not allowed + self.renderer.md_env["match_titles"] = False + self.renderer.md = create_md_parser( + MdParserConfig(commonmark_only=True), self.renderer.__class__ + ) + + # parse markdown + with self.renderer.current_node_context(temp_container): + self.renderer.nested_render_text(data, source_line) + + # restore renderer config + self.renderer.md = md + self.renderer.md_env["match_titles"] = match_titles + + return temp_container.children + + def render_text_plain( + self, data: str, cell_index: int, source_line: int + ) -> List[nodes.Element]: + """Render a notebook text/plain output.""" + lexer = self.renderer.get_nb_config("render_text_lexer", cell_index) + node = self.renderer.create_highlighted_code_block( + data, lexer, source=self.source, line=source_line + ) + node["classes"] += ["output", "text_plain"] + return [node] + + def render_text_html( + self, data: str, cell_index: int, source_line: int + ) -> List[nodes.Element]: + """Render a notebook text/html output.""" + return [nodes.raw(text=data, format="html", classes=["output", "text_html"])] + + def render_text_latex( + self, data: str, cell_index: int, source_line: int + ) -> List[nodes.Element]: + """Render a notebook text/latex output.""" + # TODO should we always assume this is math? + return [ + nodes.math_block( + text=strip_latex_delimiters(data), + nowrap=False, + number=None, + classes=["output", "text_latex"], + ) + ] + + def render_svg( + self, data: bytes, cell_index: int, source_line: int + ) -> List[nodes.Element]: + """Render a notebook image/svg+xml output.""" + data = data if isinstance(data, str) else data.decode("utf-8") + data = os.linesep.join(data.splitlines()).encode("utf-8") + return self.render_image("image/svg+xml", data, source_line) + + def render_image( + self, mime_type: str, data: bytes, cell_index: int, source_line: int + ) -> List[nodes.Element]: + """Render a notebook image output.""" + # Adapted from ``nbconvert.ExtractOutputPreprocessor`` + # TODO add additional attributes + # create filename + extension = guess_extension(mime_type) or "." + mime_type.rsplit("/")[-1] + # latex does not read the '.jpe' extension + extension = ".jpeg" if extension == ".jpe" else extension + # ensure de-duplication of outputs by using hash as filename + # TODO note this is a change to the current implementation, + # which names by {notbook_name}-{cell_index}-{output-index}.{extension} + data_hash = hashlib.sha256(data).hexdigest() + filename = f"{data_hash}{extension}" + path = self.write_file([filename], data, overwrite=False, exists_ok=True) + return [nodes.image(uri=str(path))] + + def render_javascript( + self, data: str, cell_index: int, source_line: int + ) -> List[nodes.Element]: + """Render a notebook application/javascript output.""" + content = sanitize_script_content(data) + mime_type = "application/javascript" + return [ + nodes.raw( + text=f'', + format="html", + ) + ] + + def render_widget_view( + self, data: str, cell_index: int, source_line: int + ) -> List[nodes.Element]: + """Render a notebook application/vnd.jupyter.widget-view+json output.""" + content = json.dumps(sanitize_script_content(data)) + return [ + nodes.raw( + text=f'', + format="html", + ) + ] + + +def _run_cli(writer_name: str, writer_description: str, argv: Optional[List[str]]): + """Run the command line interface for a particular writer.""" + publish_cmdline( + parser=Parser(), + writer_name=writer_name, + description=( + f"Generates {writer_description} from standalone MyST Notebook sources.\n" + f"{default_description}" + ), + argv=argv, + ) + + +def cli_html(argv: Optional[List[str]] = None) -> None: + """Cmdline entrypoint for converting MyST to HTML.""" + _run_cli("html", "(X)HTML documents", argv) + + +def cli_html5(argv: Optional[List[str]] = None): + """Cmdline entrypoint for converting MyST to HTML5.""" + _run_cli("html5", "HTML5 documents", argv) + + +def cli_latex(argv: Optional[List[str]] = None): + """Cmdline entrypoint for converting MyST to LaTeX.""" + _run_cli("latex", "LaTeX documents", argv) + + +def cli_xml(argv: Optional[List[str]] = None): + """Cmdline entrypoint for converting MyST to XML.""" + _run_cli("xml", "Docutils-native XML", argv) + + +def cli_pseudoxml(argv: Optional[List[str]] = None): + """Cmdline entrypoint for converting MyST to pseudo-XML.""" + _run_cli("pseudoxml", "pseudo-XML", argv) diff --git a/setup.cfg b/setup.cfg index 8378f682..ca5f91a3 100644 --- a/setup.cfg +++ b/setup.cfg @@ -48,6 +48,7 @@ install_requires = pyyaml sphinx>=3.1,<5 sphinx-togglebutton~=0.2.2 + typing-extensions python_requires = >=3.6 include_package_data = True zip_safe = True @@ -57,9 +58,17 @@ exclude = test* [options.entry_points] +console_scripts = + mystnb-docutils-html = myst_nb.docutils_:cli_html + mystnb-docutils-html5 = myst_nb.docutils_:cli_html5 + mystnb-docutils-latex = myst_nb.docutils_:cli_latex + mystnb-docutils-xml = myst_nb.docutils_:cli_xml + mystnb-docutils-pseudoxml = myst_nb.docutils_:cli_pseudoxml myst_nb.mime_render = default = myst_nb.render_outputs:CellOutputRenderer inline = myst_nb.render_outputs:CellOutputRendererInline +myst_nb.output_renderer = + default = myst_nb.docutils_:NbOutputRenderer pygments.lexers = myst-ansi = myst_nb.lexers:AnsiColorLexer ipythontb = myst_nb.lexers:IPythonTracebackLexer From 11188c2aa62dfe2949b4e8de0f3320f7efe8e208 Mon Sep 17 00:00:00 2001 From: Chris Sewell Date: Sun, 2 Jan 2022 16:36:22 +0100 Subject: [PATCH 09/75] Document render methods --- myst_nb/docutils_.py | 154 ++++++++++++++++++++++++++++++------------- 1 file changed, 109 insertions(+), 45 deletions(-) diff --git a/myst_nb/docutils_.py b/myst_nb/docutils_.py index 3bb7db5c..d94e0e04 100644 --- a/myst_nb/docutils_.py +++ b/myst_nb/docutils_.py @@ -194,6 +194,7 @@ def parse(self, inputstring: str, document: nodes.document) -> None: ) else: logger.info("Using cached notebook outputs") + # TODO save execution data on document (and environment if sphinx) # TODO handle errors _, notebook = cache.merge_match_into_notebook(notebook) @@ -262,6 +263,7 @@ def notebook_to_tokens( # generate tokens tokens: List[Token] if nb_cell["cell_type"] == "markdown": + # https://nbformat.readthedocs.io/en/5.1.3/format_description.html#markdown-cells # TODO if cell has tag output-caption, then use as caption for next/preceding cell? tokens = [ Token( @@ -290,6 +292,7 @@ def notebook_to_tokens( ), ) elif nb_cell["cell_type"] == "raw": + # https://nbformat.readthedocs.io/en/5.1.3/format_description.html#raw-nbconvert-cells tokens = [ Token( "nb_cell_raw", @@ -304,6 +307,7 @@ def notebook_to_tokens( ) ] elif nb_cell["cell_type"] == "code": + # https://nbformat.readthedocs.io/en/5.1.3/format_description.html#code-cells # we don't copy the outputs here, since this would # greatly increase the memory consumption, # instead they will referenced by the cell index @@ -420,16 +424,6 @@ def strip_latex_delimiters(source): class DocutilsNbRenderer(DocutilsRenderer): """ "A docutils-only renderer for Jupyter Notebooks.""" - # TODO upstream - def add_line_and_source_path_r( - self, nodes: List[nodes.Node], token: SyntaxTreeNode - ) -> None: - """Add the source and line recursively to all nodes.""" - for node in nodes: - self.add_line_and_source_path(node, token) - for child in node.traverse(): - self.add_line_and_source_path(child, token) - # TODO maybe move more things to NbOutputRenderer? # and change name to e.g. NbElementRenderer @@ -673,12 +667,17 @@ def get_cell_metadata(self, cell_index: int) -> NotebookNode: # TODO handle key/index error return self._renderer.config["notebook"]["cells"][cell_index]["metadata"] - # TODO add support for specifying inline types (for glue etc) - def render_stdout( self, output: NotebookNode, cell_index: int, source_line: int ) -> List[nodes.Element]: - """Render a notebook stdout output.""" + """Render a notebook stdout output. + + https://nbformat.readthedocs.io/en/5.1.3/format_description.html#stream-output + + :param output: the output node + :param cell_index: the index of the cell containing the output + :param source_line: the line number of the cell in the source document + """ metadata = self.get_cell_metadata(cell_index) if "remove-stdout" in metadata.get("tags", []): return [] @@ -692,7 +691,14 @@ def render_stdout( def render_stderr( self, output: NotebookNode, cell_index: int, source_line: int ) -> List[nodes.Element]: - """Render a notebook stderr output.""" + """Render a notebook stderr output. + + https://nbformat.readthedocs.io/en/5.1.3/format_description.html#stream-output + + :param output: the output node + :param cell_index: the index of the cell containing the output + :param source_line: the line number of the cell in the source document + """ metadata = self.get_cell_metadata(cell_index) if "remove-stdout" in metadata.get("tags", []): return [] @@ -720,7 +726,14 @@ def render_stderr( def render_error( self, output: NotebookNode, cell_index: int, source_line: int ) -> List[nodes.Element]: - """Render a notebook error output.""" + """Render a notebook error output. + + https://nbformat.readthedocs.io/en/5.1.3/format_description.html#error + + :param output: the output node + :param cell_index: the index of the cell containing the output + :param source_line: the line number of the cell in the source document + """ traceback = strip_ansi("\n".join(output["traceback"])) lexer = self.renderer.get_nb_config("render_error_lexer", cell_index) node = self.renderer.create_highlighted_code_block( @@ -732,16 +745,19 @@ def render_error( def render_mime_type( self, mime_type: str, data: Union[str, bytes], cell_index: int, source_line: int ) -> List[nodes.Element]: - """Render a notebook mime output.""" + """Render a notebook mime output. + + https://nbformat.readthedocs.io/en/5.1.3/format_description.html#display-data + + :param mime_type: the key from the "data" dict + :param data: the value from the "data" dict + :param cell_index: the index of the cell containing the output + :param source_line: the line number of the cell in the source document + """ if mime_type == "text/plain": return self.render_text_plain(data, cell_index, source_line) - if mime_type in {"image/png", "image/jpeg", "application/pdf"}: - # TODO move a2b_base64 to method? (but need to handle render_svg) - return self.render_image( - mime_type, a2b_base64(data), cell_index, source_line - ) - if mime_type == "image/svg+xml": - return self.render_svg(data, cell_index, source_line) + if mime_type in {"image/png", "image/jpeg", "application/pdf", "image/svg+xml"}: + return self.render_image(mime_type, data, cell_index, source_line) if mime_type == "text/html": return self.render_text_html(data, cell_index, source_line) if mime_type == "text/latex": @@ -758,7 +774,13 @@ def render_mime_type( def render_unknown( self, mime_type: str, data: Union[str, bytes], cell_index: int, source_line: int ) -> List[nodes.Element]: - """Render a notebook output of unknown mime type.""" + """Render a notebook output of unknown mime type. + + :param mime_type: the key from the "data" dict + :param data: the value from the "data" dict + :param cell_index: the index of the cell containing the output + :param source_line: the line number of the cell in the source document + """ return self.report( "warning", f"skipping unknown output mime type: {mime_type}", @@ -768,7 +790,12 @@ def render_unknown( def render_markdown( self, data: str, cell_index: int, source_line: int ) -> List[nodes.Element]: - """Render a notebook text/markdown output.""" + """Render a notebook text/markdown mime data output. + + :param data: the value from the "data" dict + :param cell_index: the index of the cell containing the output + :param source_line: the line number of the cell in the source document + """ # create a container to parse the markdown into temp_container = nodes.container() @@ -802,7 +829,12 @@ def render_markdown( def render_text_plain( self, data: str, cell_index: int, source_line: int ) -> List[nodes.Element]: - """Render a notebook text/plain output.""" + """Render a notebook text/plain mime data output. + + :param data: the value from the "data" dict + :param cell_index: the index of the cell containing the output + :param source_line: the line number of the cell in the source document + """ lexer = self.renderer.get_nb_config("render_text_lexer", cell_index) node = self.renderer.create_highlighted_code_block( data, lexer, source=self.source, line=source_line @@ -813,13 +845,24 @@ def render_text_plain( def render_text_html( self, data: str, cell_index: int, source_line: int ) -> List[nodes.Element]: - """Render a notebook text/html output.""" + """Render a notebook text/html mime data output. + + :param data: the value from the "data" dict + :param cell_index: the index of the cell containing the output + :param source_line: the line number of the cell in the source document + :param inline: create inline nodes instead of block nodes + """ return [nodes.raw(text=data, format="html", classes=["output", "text_html"])] def render_text_latex( self, data: str, cell_index: int, source_line: int ) -> List[nodes.Element]: - """Render a notebook text/latex output.""" + """Render a notebook text/latex mime data output. + + :param data: the value from the "data" dict + :param cell_index: the index of the cell containing the output + :param source_line: the line number of the cell in the source document + """ # TODO should we always assume this is math? return [ nodes.math_block( @@ -830,36 +873,52 @@ def render_text_latex( ) ] - def render_svg( - self, data: bytes, cell_index: int, source_line: int - ) -> List[nodes.Element]: - """Render a notebook image/svg+xml output.""" - data = data if isinstance(data, str) else data.decode("utf-8") - data = os.linesep.join(data.splitlines()).encode("utf-8") - return self.render_image("image/svg+xml", data, source_line) - def render_image( - self, mime_type: str, data: bytes, cell_index: int, source_line: int + self, + mime_type: Union[str, bytes], + data: bytes, + cell_index: int, + source_line: int, ) -> List[nodes.Element]: - """Render a notebook image output.""" - # Adapted from ``nbconvert.ExtractOutputPreprocessor`` - # TODO add additional attributes + """Render a notebook image mime data output. + + :param mime_type: the key from the "data" dict + :param data: the value from the "data" dict + :param cell_index: the index of the cell containing the output + :param source_line: the line number of the cell in the source document + """ + # Adapted from: + # https://github.com/jupyter/nbconvert/blob/45df4b6089b3bbab4b9c504f9e6a892f5b8692e3/nbconvert/preprocessors/extractoutput.py#L43 + + # ensure that the data is a bytestring + if mime_type in {"image/png", "image/jpeg", "application/pdf"}: + # data is b64-encoded as text + data_bytes = a2b_base64(data) + elif isinstance(data, str): + # ensure corrent line separator + data_bytes = os.linesep.join(data.splitlines()).encode("utf-8") # create filename extension = guess_extension(mime_type) or "." + mime_type.rsplit("/")[-1] - # latex does not read the '.jpe' extension + # latex does not recognize the '.jpe' extension extension = ".jpeg" if extension == ".jpe" else extension # ensure de-duplication of outputs by using hash as filename # TODO note this is a change to the current implementation, # which names by {notbook_name}-{cell_index}-{output-index}.{extension} - data_hash = hashlib.sha256(data).hexdigest() + data_hash = hashlib.sha256(data_bytes).hexdigest() filename = f"{data_hash}{extension}" - path = self.write_file([filename], data, overwrite=False, exists_ok=True) + path = self.write_file([filename], data_bytes, overwrite=False, exists_ok=True) + # TODO add additional attributes return [nodes.image(uri=str(path))] def render_javascript( self, data: str, cell_index: int, source_line: int ) -> List[nodes.Element]: - """Render a notebook application/javascript output.""" + """Render a notebook application/javascript mime data output. + + :param data: the value from the "data" dict + :param cell_index: the index of the cell containing the output + :param source_line: the line number of the cell in the source document + """ content = sanitize_script_content(data) mime_type = "application/javascript" return [ @@ -872,7 +931,12 @@ def render_javascript( def render_widget_view( self, data: str, cell_index: int, source_line: int ) -> List[nodes.Element]: - """Render a notebook application/vnd.jupyter.widget-view+json output.""" + """Render a notebook application/vnd.jupyter.widget-view+json mime output. + + :param data: the value from the "data" dict + :param cell_index: the index of the cell containing the output + :param source_line: the line number of the cell in the source document + """ content = json.dumps(sanitize_script_content(data)) return [ nodes.raw( From d39d2f48c2c0cdf35c0a8faecaf771e870b0e64f Mon Sep 17 00:00:00 2001 From: Chris Sewell Date: Sun, 2 Jan 2022 21:10:53 +0100 Subject: [PATCH 10/75] modularise docutils code --- myst_nb/docutils_.py | 642 +--------------------------------------- myst_nb/new/__init__.py | 0 myst_nb/new/execute.py | 71 +++++ myst_nb/new/parse.py | 180 +++++++++++ myst_nb/new/read.py | 27 ++ myst_nb/new/render.py | 422 ++++++++++++++++++++++++++ setup.cfg | 4 +- 7 files changed, 717 insertions(+), 629 deletions(-) create mode 100644 myst_nb/new/__init__.py create mode 100644 myst_nb/new/execute.py create mode 100644 myst_nb/new/parse.py create mode 100644 myst_nb/new/read.py create mode 100644 myst_nb/new/render.py diff --git a/myst_nb/docutils_.py b/myst_nb/docutils_.py index d94e0e04..849516f8 100644 --- a/myst_nb/docutils_.py +++ b/myst_nb/docutils_.py @@ -1,26 +1,10 @@ """A parser for docutils.""" -import hashlib import json import logging -import os -import re -from binascii import a2b_base64 -from contextlib import nullcontext -from functools import lru_cache -from mimetypes import guess_extension -from pathlib import Path -from tempfile import TemporaryDirectory -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Dict, List, Optional, Tuple from docutils import nodes from docutils.core import default_description, publish_cmdline -from importlib_metadata import entry_points -from jupyter_cache import get_cache -from jupyter_cache.executors import load_executor -from jupyter_cache.executors.utils import single_nb_execution -from markdown_it.main import MarkdownIt -from markdown_it.rules_core import StateCore -from markdown_it.token import Token from markdown_it.tree import SyntaxTreeNode from myst_parser.docutils_ import DOCUTILS_EXCLUDED_ARGS as DOCUTILS_EXCLUDED_ARGS_MYST from myst_parser.docutils_ import Parser as MystParser @@ -28,18 +12,14 @@ from myst_parser.docutils_renderer import DocutilsRenderer, token_line from myst_parser.main import MdParserConfig, create_md_parser from nbformat import NotebookNode -from nbformat import reads as read_nb -from typing_extensions import Literal from myst_nb.configuration import NbParserConfig +from myst_nb.new.execute import update_notebook +from myst_nb.new.parse import notebook_to_tokens +from myst_nb.new.read import create_nb_reader +from myst_nb.new.render import NbElementRenderer, load_renderer, sanitize_script_content from myst_nb.render_outputs import coalesce_streams -NOTEBOOK_VERSION = 4 -WIDGET_STATE_MIMETYPE = "application/vnd.jupyter.widget-state+json" -WIDGET_VIEW_MIMETYPE = "application/vnd.jupyter.widget-view+json" -_ANSI_RE = re.compile("\x1b\\[(.*?)([@-~])") - - DOCUTILS_EXCLUDED_ARGS = { # docutils.conf can't represent dicts # TODO can we make this work? @@ -155,48 +135,13 @@ def parse(self, inputstring: str, document: nodes.document) -> None: nb_config = NbParserConfig() # convert inputstring to notebook - # TODO handle converters - notebook: NotebookNode = read_nb(inputstring, as_version=NOTEBOOK_VERSION) - - # execute notebook if necessary - # TODO also look at notebook metadata - if nb_config.execution_mode == "force": - path = str(Path(document["source"]).parent) - cwd_context = ( - TemporaryDirectory() - if nb_config.execution_in_temp - else nullcontext(path) - ) - with cwd_context as cwd: - cwd = os.path.abspath(cwd) - logger.info(f"Executing notebook in {cwd}") - result = single_nb_execution( - notebook, - cwd=cwd, - allow_errors=nb_config.execution_allow_errors, - timeout=nb_config.execution_timeout, - ) - logger.info(f"Executed notebook in {result.time:.2f} seconds") - # TODO save execution data on document (and environment if sphinx) - # TODO handle errors - elif nb_config.execution_mode == "cache": - # TODO for sphinx, the default would be in the output directory - cache = get_cache(nb_config.execution_cache_path or ".cache") - stage_record = cache.stage_notebook_file(document["source"]) - # TODO handle converters - if cache.get_cache_record_of_staged(stage_record.pk) is None: - executor = load_executor("basic", cache, logger=logger) - executor.run_and_cache( - filter_pks=[stage_record.pk], - allow_errors=nb_config.execution_allow_errors, - timeout=nb_config.execution_timeout, - run_in_temp=nb_config.execution_in_temp, - ) - else: - logger.info("Using cached notebook outputs") - # TODO save execution data on document (and environment if sphinx) - # TODO handle errors - _, notebook = cache.merge_match_into_notebook(notebook) + nb_reader, md_config = create_nb_reader( + inputstring, document["source"], md_config, nb_config + ) + notebook = nb_reader(inputstring) + + # potentially execute notebook and/or populate outputs from cache + notebook = update_notebook(notebook, document["source"], nb_config, logger) # TODO write executed notebook to output folder # always for sphinx, but maybe docutils option on whether to do this? @@ -214,220 +159,13 @@ def parse(self, inputstring: str, document: nodes.document) -> None: mdit_parser.renderer.render(mdit_tokens, mdit_parser.options, mdit_env) -def notebook_to_tokens( - notebook: NotebookNode, mdit_parser: MarkdownIt, mdit_env: Dict[str, Any] -) -> List[Token]: - # disable front-matter, since this is taken from the notebook - mdit_parser.disable("front_matter", ignoreInvalid=True) - # this stores global state, such as reference definitions - - # Parse block tokens only first, leaving inline parsing to a second phase - # (required to collect all reference definitions, before assessing references). - metadata = dict(notebook.metadata.items()) - # save these keys on the document, rather than as docinfo - spec_data = { - key: metadata.pop(key, None) for key in ("kernelspec", "language_info") - } - - # get language lexer name - langinfo = spec_data.get("language_info", {}) - lexer = langinfo.get("pygments_lexer", langinfo.get("name", None)) - if lexer is None: - lexer = spec_data.get("kernelspec", {}).get("language", None) - # TODO warning if no lexer - - # extract widgets - widgets = metadata.pop("widgets", None) - block_tokens = [ - Token("nb_spec_data", "", 0, meta=spec_data), - Token( - "front_matter", - "", - 0, - map=[0, 0], - content=metadata, # type: ignore[arg-type] - ), - ] - for cell_index, nb_cell in enumerate(notebook.cells): - - # skip empty cells - if len(nb_cell["source"].strip()) == 0: - continue - - # skip cells tagged for removal - # TODO make configurable - tags = nb_cell.metadata.get("tags", []) - if ("remove_cell" in tags) or ("remove-cell" in tags): - continue - - # generate tokens - tokens: List[Token] - if nb_cell["cell_type"] == "markdown": - # https://nbformat.readthedocs.io/en/5.1.3/format_description.html#markdown-cells - # TODO if cell has tag output-caption, then use as caption for next/preceding cell? - tokens = [ - Token( - "nb_cell_markdown_open", - "", - 1, - hidden=True, - meta={ - "index": cell_index, - "metadata": dict(nb_cell["metadata"].items()), - }, - map=[0, len(nb_cell["source"].splitlines()) - 1], - ), - ] - with mdit_parser.reset_rules(): - # enable only rules up to block - rules = mdit_parser.core.ruler.get_active_rules() - mdit_parser.core.ruler.enableOnly(rules[: rules.index("inline")]) - tokens.extend(mdit_parser.parse(nb_cell["source"], mdit_env)) - tokens.append( - Token( - "nb_cell_markdown_close", - "", - -1, - hidden=True, - ), - ) - elif nb_cell["cell_type"] == "raw": - # https://nbformat.readthedocs.io/en/5.1.3/format_description.html#raw-nbconvert-cells - tokens = [ - Token( - "nb_cell_raw", - "code", - 0, - content=nb_cell["source"], - meta={ - "index": cell_index, - "metadata": dict(nb_cell["metadata"].items()), - }, - map=[0, 0], - ) - ] - elif nb_cell["cell_type"] == "code": - # https://nbformat.readthedocs.io/en/5.1.3/format_description.html#code-cells - # we don't copy the outputs here, since this would - # greatly increase the memory consumption, - # instead they will referenced by the cell index - tokens = [ - Token( - "nb_cell_code", - "code", - 0, - content=nb_cell["source"], - meta={ - "index": cell_index, - "execution_count": nb_cell.get("execution_count", None), - "lexer": lexer, - # TODO add notebook node to dict function and apply here etc - "metadata": dict(nb_cell["metadata"].items()), - }, - map=[0, 0], - ) - ] - else: - pass # TODO create warning - - # update token's source lines, using either a source_map (index -> line), - # set when converting to a notebook, or a pseudo base of the cell index - smap = notebook.metadata.get("source_map", None) - start_line = smap[cell_index] if smap else (cell_index + 1) * 10000 - start_line += 1 # use base 1 rather than 0 - for token in tokens: - if token.map: - token.map = [start_line + token.map[0], start_line + token.map[1]] - # also update the source lines for duplicate references - for dup_ref in mdit_env.get("duplicate_refs", []): - if "fixed" not in dup_ref: - dup_ref["map"] = [ - start_line + dup_ref["map"][0], - start_line + dup_ref["map"][1], - ] - dup_ref["fixed"] = True - - # add tokens to list - block_tokens.extend(tokens) - - # The widget state will be embedded as a script, at the end of HTML output - widget_state = (widgets or {}).get(WIDGET_STATE_MIMETYPE, None) - if widget_state and widget_state.get("state", None): - block_tokens.append( - Token( - "nb_widget_state", - "script", - 0, - attrs={"type": WIDGET_STATE_MIMETYPE}, - meta={"state": widget_state}, - map=[0, 0], - ) - ) - - # Now all definitions have been gathered, run the inline parsing phase - state = StateCore("", mdit_parser, mdit_env, block_tokens) - with mdit_parser.reset_rules(): - rules = mdit_parser.core.ruler.get_active_rules() - mdit_parser.core.ruler.enableOnly(rules[rules.index("inline") :]) - mdit_parser.core.process(state) - - return state.tokens - - -@lru_cache(maxsize=10) -def load_renderer(name: str) -> "NbOutputRenderer": - """Load a renderer, - given a name within the ``myst_nb.output_renderer`` entry point group - """ - all_eps = entry_points() - if hasattr(all_eps, "select"): - # importlib_metadata >= 3.6 or importlib.metadata in python >=3.10 - eps = all_eps.select(group="myst_nb.output_renderer", name=name) - found = name in eps.names - else: - eps = {ep.name: ep for ep in all_eps.get("myst_nb.output_renderer", [])} - found = name in eps - if found: - klass = eps[name].load() - if not issubclass(klass, NbOutputRenderer): - raise Exception( - f"Entry Point for myst_nb.output_renderer:{name} " - f"is not a subclass of `NbOutputRenderer`: {klass}" - ) - return klass - - raise Exception(f"No Entry Point found for myst_nb.output_renderer:{name}") - - -def strip_ansi(text: str) -> str: - """Strip ANSI escape sequences from a string""" - return _ANSI_RE.sub("", text) - - -def strip_latex_delimiters(source): - r"""Remove LaTeX math delimiters that would be rendered by the math block. - - These are: ``\(…\)``, ``\[…\]``, ``$…$``, and ``$$…$$``. - This is necessary because sphinx does not have a dedicated role for - generic LaTeX, while Jupyter only defines generic LaTeX output, see - https://github.com/jupyter/jupyter-sphinx/issues/90 for discussion. - """ - source = source.strip() - delimiter_pairs = (pair.split() for pair in r"\( \),\[ \],$$ $$,$ $".split(",")) - for start, end in delimiter_pairs: - if source.startswith(start) and source.endswith(end): - return source[len(start) : -len(end)] - - return source - - class DocutilsNbRenderer(DocutilsRenderer): """ "A docutils-only renderer for Jupyter Notebooks.""" # TODO maybe move more things to NbOutputRenderer? # and change name to e.g. NbElementRenderer - def get_nb_config(self, key: str, cell_index: int) -> Any: + def get_nb_config(self, key: str, cell_index: Optional[int]) -> Any: # TODO selection between config/notebook/cell level # TODO handle KeyError better return self.config["nb_config"][key] @@ -519,11 +257,11 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: """Render a notebook code cell's outputs.""" cell_index = token.meta["index"] line = token_line(token) - # metadata = token.meta["metadata"] outputs: List[NotebookNode] = self.config["notebook"]["cells"][cell_index].get( "outputs", [] ) if self.get_nb_config("merge_streams", cell_index): + # TODO should this be moved to the parsing phase? outputs = coalesce_streams(outputs) render_priority = self.get_nb_config("render_priority", cell_index) renderer_name = self.get_nb_config("render_plugin", cell_index) @@ -531,7 +269,7 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: # TODO for sphinx we use a set output folder output_folder = self.get_nb_config("output_folder", cell_index) # load renderer class from name - renderer: NbOutputRenderer = load_renderer(renderer_name)(self, output_folder) + renderer: NbElementRenderer = load_renderer(renderer_name)(self, output_folder) for output in outputs: if output.output_type == "stream": if output.name == "stdout": @@ -596,356 +334,6 @@ def render_nb_widget_state(self, token: SyntaxTreeNode) -> None: self.document.append(node) -def sanitize_script_content(content: str) -> str: - """Sanitize the content of a ``", r"<\/script>") - - -class NbOutputRenderer: - """A class for rendering notebook outputs.""" - - def __init__(self, renderer: DocutilsNbRenderer, output_folder: str) -> None: - """Initialize the renderer. - - :params output_folder: the folder path for external outputs (like images) - """ - self._renderer = renderer - self._output_folder = output_folder - - @property - def renderer(self) -> DocutilsNbRenderer: - """The renderer this output renderer is associated with.""" - return self._renderer - - def write_file( - self, path: List[str], content: bytes, overwrite=False, exists_ok=False - ) -> Path: - """Write a file to the external output folder. - - :param path: the path to write the file to, relative to the output folder - :param content: the content to write to the file - :param overwrite: whether to overwrite an existing file - :param exists_ok: whether to ignore an existing file if overwrite is False - """ - folder = Path(self._output_folder) - filepath = folder.joinpath(*path) - if filepath.exists(): - if overwrite: - filepath.write_bytes(content) - elif not exists_ok: - # TODO raise or just report? - raise FileExistsError(f"File already exists: {filepath}") - else: - filepath.parent.mkdir(parents=True, exist_ok=True) - filepath.write_bytes(content) - - return filepath - - @property - def source(self): - """The source of the notebook.""" - return self.renderer.document["source"] - - def report( - self, level: Literal["warning", "error", "severe"], message: str, line: int - ) -> nodes.system_message: - """Report an issue.""" - # TODO add cell index to message - # TODO handle for sphinx (including type/subtype) - reporter = self.renderer.document.reporter - levels = { - "warning": reporter.WARNING_LEVEL, - "error": reporter.ERROR_LEVEL, - "severe": reporter.SEVERE_LEVEL, - } - return reporter.system_message( - levels.get(level, reporter.WARNING_LEVEL), message, line=line - ) - - def get_cell_metadata(self, cell_index: int) -> NotebookNode: - # TODO handle key/index error - return self._renderer.config["notebook"]["cells"][cell_index]["metadata"] - - def render_stdout( - self, output: NotebookNode, cell_index: int, source_line: int - ) -> List[nodes.Element]: - """Render a notebook stdout output. - - https://nbformat.readthedocs.io/en/5.1.3/format_description.html#stream-output - - :param output: the output node - :param cell_index: the index of the cell containing the output - :param source_line: the line number of the cell in the source document - """ - metadata = self.get_cell_metadata(cell_index) - if "remove-stdout" in metadata.get("tags", []): - return [] - lexer = self.renderer.get_nb_config("render_text_lexer", cell_index) - node = self.renderer.create_highlighted_code_block( - output["text"], lexer, source=self.source, line=source_line - ) - node["classes"] += ["output", "stream"] - return [node] - - def render_stderr( - self, output: NotebookNode, cell_index: int, source_line: int - ) -> List[nodes.Element]: - """Render a notebook stderr output. - - https://nbformat.readthedocs.io/en/5.1.3/format_description.html#stream-output - - :param output: the output node - :param cell_index: the index of the cell containing the output - :param source_line: the line number of the cell in the source document - """ - metadata = self.get_cell_metadata(cell_index) - if "remove-stdout" in metadata.get("tags", []): - return [] - output_stderr = self.renderer.get_nb_config("output_stderr", cell_index) - msg = "output render: stderr was found in the cell outputs" - outputs = [] - if output_stderr == "remove": - return [] - elif output_stderr == "remove-warn": - return [self.report("warning", msg, line=source_line)] - elif output_stderr == "warn": - outputs.append(self.report("warning", msg, line=source_line)) - elif output_stderr == "error": - outputs.append(self.report("error", msg, line=source_line)) - elif output_stderr == "severe": - outputs.append(self.report("severe", msg, line=source_line)) - lexer = self.renderer.get_nb_config("render_text_lexer", cell_index) - node = self.renderer.create_highlighted_code_block( - output["text"], lexer, source=self.source, line=source_line - ) - node["classes"] += ["output", "stderr"] - outputs.append(node) - return outputs - - def render_error( - self, output: NotebookNode, cell_index: int, source_line: int - ) -> List[nodes.Element]: - """Render a notebook error output. - - https://nbformat.readthedocs.io/en/5.1.3/format_description.html#error - - :param output: the output node - :param cell_index: the index of the cell containing the output - :param source_line: the line number of the cell in the source document - """ - traceback = strip_ansi("\n".join(output["traceback"])) - lexer = self.renderer.get_nb_config("render_error_lexer", cell_index) - node = self.renderer.create_highlighted_code_block( - traceback, lexer, source=self.source, line=source_line - ) - node["classes"] += ["output", "traceback"] - return [node] - - def render_mime_type( - self, mime_type: str, data: Union[str, bytes], cell_index: int, source_line: int - ) -> List[nodes.Element]: - """Render a notebook mime output. - - https://nbformat.readthedocs.io/en/5.1.3/format_description.html#display-data - - :param mime_type: the key from the "data" dict - :param data: the value from the "data" dict - :param cell_index: the index of the cell containing the output - :param source_line: the line number of the cell in the source document - """ - if mime_type == "text/plain": - return self.render_text_plain(data, cell_index, source_line) - if mime_type in {"image/png", "image/jpeg", "application/pdf", "image/svg+xml"}: - return self.render_image(mime_type, data, cell_index, source_line) - if mime_type == "text/html": - return self.render_text_html(data, cell_index, source_line) - if mime_type == "text/latex": - return self.render_text_latex(data, cell_index, source_line) - if mime_type == "application/javascript": - return self.render_javascript(data, cell_index, source_line) - if mime_type == WIDGET_VIEW_MIMETYPE: - return self.render_widget_view(data, cell_index, source_line) - if mime_type == "text/markdown": - return self.render_markdown(data, cell_index, source_line) - - return self.render_unknown(mime_type, data, cell_index, source_line) - - def render_unknown( - self, mime_type: str, data: Union[str, bytes], cell_index: int, source_line: int - ) -> List[nodes.Element]: - """Render a notebook output of unknown mime type. - - :param mime_type: the key from the "data" dict - :param data: the value from the "data" dict - :param cell_index: the index of the cell containing the output - :param source_line: the line number of the cell in the source document - """ - return self.report( - "warning", - f"skipping unknown output mime type: {mime_type}", - line=source_line, - ) - - def render_markdown( - self, data: str, cell_index: int, source_line: int - ) -> List[nodes.Element]: - """Render a notebook text/markdown mime data output. - - :param data: the value from the "data" dict - :param cell_index: the index of the cell containing the output - :param source_line: the line number of the cell in the source document - """ - # create a container to parse the markdown into - temp_container = nodes.container() - - # setup temporary renderer config - md = self.renderer.md - match_titles = self.renderer.md_env.get("match_titles", None) - if self.renderer.get_nb_config("embed_markdown_outputs", cell_index): - # this configuration is used in conjunction with a transform, - # which move this content outside & below the output container - # in this way the Markdown output can contain headings, - # and not break the structure of the docutils AST - # TODO create transform and for sphinx prioritise this output for all output formats - self.renderer.md_env["match_titles"] = True - else: - # otherwise we render as simple Markdown and heading are not allowed - self.renderer.md_env["match_titles"] = False - self.renderer.md = create_md_parser( - MdParserConfig(commonmark_only=True), self.renderer.__class__ - ) - - # parse markdown - with self.renderer.current_node_context(temp_container): - self.renderer.nested_render_text(data, source_line) - - # restore renderer config - self.renderer.md = md - self.renderer.md_env["match_titles"] = match_titles - - return temp_container.children - - def render_text_plain( - self, data: str, cell_index: int, source_line: int - ) -> List[nodes.Element]: - """Render a notebook text/plain mime data output. - - :param data: the value from the "data" dict - :param cell_index: the index of the cell containing the output - :param source_line: the line number of the cell in the source document - """ - lexer = self.renderer.get_nb_config("render_text_lexer", cell_index) - node = self.renderer.create_highlighted_code_block( - data, lexer, source=self.source, line=source_line - ) - node["classes"] += ["output", "text_plain"] - return [node] - - def render_text_html( - self, data: str, cell_index: int, source_line: int - ) -> List[nodes.Element]: - """Render a notebook text/html mime data output. - - :param data: the value from the "data" dict - :param cell_index: the index of the cell containing the output - :param source_line: the line number of the cell in the source document - :param inline: create inline nodes instead of block nodes - """ - return [nodes.raw(text=data, format="html", classes=["output", "text_html"])] - - def render_text_latex( - self, data: str, cell_index: int, source_line: int - ) -> List[nodes.Element]: - """Render a notebook text/latex mime data output. - - :param data: the value from the "data" dict - :param cell_index: the index of the cell containing the output - :param source_line: the line number of the cell in the source document - """ - # TODO should we always assume this is math? - return [ - nodes.math_block( - text=strip_latex_delimiters(data), - nowrap=False, - number=None, - classes=["output", "text_latex"], - ) - ] - - def render_image( - self, - mime_type: Union[str, bytes], - data: bytes, - cell_index: int, - source_line: int, - ) -> List[nodes.Element]: - """Render a notebook image mime data output. - - :param mime_type: the key from the "data" dict - :param data: the value from the "data" dict - :param cell_index: the index of the cell containing the output - :param source_line: the line number of the cell in the source document - """ - # Adapted from: - # https://github.com/jupyter/nbconvert/blob/45df4b6089b3bbab4b9c504f9e6a892f5b8692e3/nbconvert/preprocessors/extractoutput.py#L43 - - # ensure that the data is a bytestring - if mime_type in {"image/png", "image/jpeg", "application/pdf"}: - # data is b64-encoded as text - data_bytes = a2b_base64(data) - elif isinstance(data, str): - # ensure corrent line separator - data_bytes = os.linesep.join(data.splitlines()).encode("utf-8") - # create filename - extension = guess_extension(mime_type) or "." + mime_type.rsplit("/")[-1] - # latex does not recognize the '.jpe' extension - extension = ".jpeg" if extension == ".jpe" else extension - # ensure de-duplication of outputs by using hash as filename - # TODO note this is a change to the current implementation, - # which names by {notbook_name}-{cell_index}-{output-index}.{extension} - data_hash = hashlib.sha256(data_bytes).hexdigest() - filename = f"{data_hash}{extension}" - path = self.write_file([filename], data_bytes, overwrite=False, exists_ok=True) - # TODO add additional attributes - return [nodes.image(uri=str(path))] - - def render_javascript( - self, data: str, cell_index: int, source_line: int - ) -> List[nodes.Element]: - """Render a notebook application/javascript mime data output. - - :param data: the value from the "data" dict - :param cell_index: the index of the cell containing the output - :param source_line: the line number of the cell in the source document - """ - content = sanitize_script_content(data) - mime_type = "application/javascript" - return [ - nodes.raw( - text=f'', - format="html", - ) - ] - - def render_widget_view( - self, data: str, cell_index: int, source_line: int - ) -> List[nodes.Element]: - """Render a notebook application/vnd.jupyter.widget-view+json mime output. - - :param data: the value from the "data" dict - :param cell_index: the index of the cell containing the output - :param source_line: the line number of the cell in the source document - """ - content = json.dumps(sanitize_script_content(data)) - return [ - nodes.raw( - text=f'', - format="html", - ) - ] - - def _run_cli(writer_name: str, writer_description: str, argv: Optional[List[str]]): """Run the command line interface for a particular writer.""" publish_cmdline( diff --git a/myst_nb/new/__init__.py b/myst_nb/new/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/myst_nb/new/execute.py b/myst_nb/new/execute.py new file mode 100644 index 00000000..aa826867 --- /dev/null +++ b/myst_nb/new/execute.py @@ -0,0 +1,71 @@ +"""Module for executing notebooks.""" +import os +from contextlib import nullcontext +from logging import Logger +from pathlib import Path +from tempfile import TemporaryDirectory + +from jupyter_cache import get_cache +from jupyter_cache.executors import load_executor +from jupyter_cache.executors.utils import single_nb_execution +from nbformat import NotebookNode + +from myst_nb.configuration import NbParserConfig + + +def update_notebook( + notebook: NotebookNode, + source: str, + nb_config: NbParserConfig, + logger: Logger, +) -> NotebookNode: + """Update a notebook using the given configuration. + + This function may execute the notebook if necessary. + + :param notebook: The notebook to update. + :param source: Path to or description of the input source being processed. + :param nb_config: The configuration for the notebook parser. + :param logger: The logger to use. + + :returns: The updated notebook. + """ + # TODO also look at notebook metadata + if nb_config.execution_mode == "force": + # TODO what if source is a descriptor? + path = str(Path(source).parent) + cwd_context = ( + TemporaryDirectory() if nb_config.execution_in_temp else nullcontext(path) + ) + with cwd_context as cwd: + cwd = os.path.abspath(cwd) + logger.info(f"Executing notebook in {cwd}") + result = single_nb_execution( + notebook, + cwd=cwd, + allow_errors=nb_config.execution_allow_errors, + timeout=nb_config.execution_timeout, + ) + logger.info(f"Executed notebook in {result.time:.2f} seconds") + # TODO save execution data on document (and environment if sphinx) + # TODO handle errors + elif nb_config.execution_mode == "cache": + # TODO for sphinx, the default would be in the output directory + cache = get_cache(nb_config.execution_cache_path or ".cache") + stage_record = cache.stage_notebook_file(source) + # TODO handle converters + if cache.get_cache_record_of_staged(stage_record.pk) is None: + executor = load_executor("basic", cache, logger=logger) + executor.run_and_cache( + filter_pks=[stage_record.pk], + allow_errors=nb_config.execution_allow_errors, + timeout=nb_config.execution_timeout, + run_in_temp=nb_config.execution_in_temp, + ) + else: + logger.info("Using cached notebook outputs") + # TODO save execution data on document (and environment if sphinx) + # TODO handle errors + _, notebook = cache.merge_match_into_notebook(notebook) + + return notebook diff --git a/myst_nb/new/parse.py b/myst_nb/new/parse.py new file mode 100644 index 00000000..d8ea6a72 --- /dev/null +++ b/myst_nb/new/parse.py @@ -0,0 +1,180 @@ +"""Module for parsing notebooks to Markdown-it tokens.""" +from typing import Any, Dict, List + +from markdown_it.main import MarkdownIt +from markdown_it.rules_core import StateCore +from markdown_it.token import Token +from nbformat import NotebookNode + +from myst_nb.new.render import WIDGET_STATE_MIMETYPE + + +def nb_node_to_dict(node: NotebookNode) -> Dict[str, Any]: + """Recursively convert a notebook node to a dict.""" + return _nb_node_to_dict(node) + + +def _nb_node_to_dict(item: Any) -> Any: + """Recursively convert any notebook nodes to dict.""" + if isinstance(item, NotebookNode): + return {k: _nb_node_to_dict(v) for k, v in item.items()} + return item + + +def notebook_to_tokens( + notebook: NotebookNode, mdit_parser: MarkdownIt, mdit_env: Dict[str, Any] +) -> List[Token]: + # disable front-matter, since this is taken from the notebook + mdit_parser.disable("front_matter", ignoreInvalid=True) + # this stores global state, such as reference definitions + + # Parse block tokens only first, leaving inline parsing to a second phase + # (required to collect all reference definitions, before assessing references). + metadata = nb_node_to_dict(notebook.metadata) + # save these keys on the document, rather than as docinfo + spec_data = { + key: metadata.pop(key, None) for key in ("kernelspec", "language_info") + } + + # get language lexer name + langinfo = spec_data.get("language_info", {}) + lexer = langinfo.get("pygments_lexer", langinfo.get("name", None)) + if lexer is None: + lexer = spec_data.get("kernelspec", {}).get("language", None) + # TODO warning if no lexer + + # extract widgets + widgets = metadata.pop("widgets", None) + block_tokens = [ + Token( + "front_matter", + "", + 0, + map=[0, 0], + content=metadata, # type: ignore[arg-type] + ), + Token("nb_spec_data", "", 0, meta=spec_data), + ] + for cell_index, nb_cell in enumerate(notebook.cells): + + # skip empty cells + if len(nb_cell["source"].strip()) == 0: + continue + + # skip cells tagged for removal + # TODO make configurable + tags = nb_cell.metadata.get("tags", []) + if ("remove_cell" in tags) or ("remove-cell" in tags): + continue + + # generate tokens + tokens: List[Token] + if nb_cell["cell_type"] == "markdown": + # https://nbformat.readthedocs.io/en/5.1.3/format_description.html#markdown-cells + # TODO if cell has tag output-caption, then use as caption for next/preceding cell? + tokens = [ + Token( + "nb_cell_markdown_open", + "", + 1, + hidden=True, + meta={ + "index": cell_index, + "metadata": nb_node_to_dict(nb_cell["metadata"]), + }, + map=[0, len(nb_cell["source"].splitlines()) - 1], + ), + ] + with mdit_parser.reset_rules(): + # enable only rules up to block + rules = mdit_parser.core.ruler.get_active_rules() + mdit_parser.core.ruler.enableOnly(rules[: rules.index("inline")]) + tokens.extend(mdit_parser.parse(nb_cell["source"], mdit_env)) + tokens.append( + Token( + "nb_cell_markdown_close", + "", + -1, + hidden=True, + ), + ) + elif nb_cell["cell_type"] == "raw": + # https://nbformat.readthedocs.io/en/5.1.3/format_description.html#raw-nbconvert-cells + tokens = [ + Token( + "nb_cell_raw", + "code", + 0, + content=nb_cell["source"], + meta={ + "index": cell_index, + "metadata": nb_node_to_dict(nb_cell["metadata"]), + }, + map=[0, 0], + ) + ] + elif nb_cell["cell_type"] == "code": + # https://nbformat.readthedocs.io/en/5.1.3/format_description.html#code-cells + # we don't copy the outputs here, since this would + # greatly increase the memory consumption, + # instead they will referenced by the cell index + tokens = [ + Token( + "nb_cell_code", + "code", + 0, + content=nb_cell["source"], + meta={ + "index": cell_index, + "execution_count": nb_cell.get("execution_count", None), + "lexer": lexer, + "metadata": nb_node_to_dict(nb_cell["metadata"]), + }, + map=[0, 0], + ) + ] + else: + pass # TODO create warning + + # update token's source lines, using either a source_map (index -> line), + # set when converting to a notebook, or a pseudo base of the cell index + smap = notebook.metadata.get("source_map", None) + start_line = smap[cell_index] if smap else (cell_index + 1) * 10000 + start_line += 1 # use base 1 rather than 0 + for token in tokens: + if token.map: + token.map = [start_line + token.map[0], start_line + token.map[1]] + # also update the source lines for duplicate references + for dup_ref in mdit_env.get("duplicate_refs", []): + if "fixed" not in dup_ref: + dup_ref["map"] = [ + start_line + dup_ref["map"][0], + start_line + dup_ref["map"][1], + ] + dup_ref["fixed"] = True + + # add tokens to list + block_tokens.extend(tokens) + + # The widget state will be embedded as a script, at the end of HTML output + widget_state = (widgets or {}).get(WIDGET_STATE_MIMETYPE, None) + if widget_state and widget_state.get("state", None): + block_tokens.append( + Token( + "nb_widget_state", + "script", + 0, + attrs={"type": WIDGET_STATE_MIMETYPE}, + meta={"state": widget_state}, + map=[0, 0], + ) + ) + + # Now all definitions have been gathered, run the inline parsing phase + state = StateCore("", mdit_parser, mdit_env, block_tokens) + with mdit_parser.reset_rules(): + rules = mdit_parser.core.ruler.get_active_rules() + mdit_parser.core.ruler.enableOnly(rules[rules.index("inline") :]) + mdit_parser.core.process(state) + + return state.tokens diff --git a/myst_nb/new/read.py b/myst_nb/new/read.py new file mode 100644 index 00000000..9d9547ef --- /dev/null +++ b/myst_nb/new/read.py @@ -0,0 +1,27 @@ +"""Module for reading notebooks from a string input.""" +from typing import Callable, Tuple + +from myst_parser.main import MdParserConfig +from nbformat import NotebookNode +from nbformat import reads as read_ipynb + +from myst_nb.configuration import NbParserConfig + +NOTEBOOK_VERSION = 4 + + +def create_nb_reader( + string: str, source: str, md_config: MdParserConfig, nb_config: NbParserConfig +) -> Tuple[Callable[[str], NotebookNode], MdParserConfig]: + """Create a notebook reader, given a string, source and configuration. + + Note, we do not directly parse to a notebook, since jupyter-cache functionality + requires the reader. + + :param string: The input string. + :param source: Path to or description of the input source being processed. + + :returns: the notebook reader, and the (potentially modified) MdParserConfig. + """ + # TODO handle converters + return lambda text: read_ipynb(text, as_version=NOTEBOOK_VERSION), md_config diff --git a/myst_nb/new/render.py b/myst_nb/new/render.py new file mode 100644 index 00000000..8048e05d --- /dev/null +++ b/myst_nb/new/render.py @@ -0,0 +1,422 @@ +"""Module for rendering notebook components to docutils nodes.""" +import hashlib +import json +import os +import re +from binascii import a2b_base64 +from functools import lru_cache +from mimetypes import guess_extension +from pathlib import Path +from typing import TYPE_CHECKING, List, Union + +from docutils import nodes +from importlib_metadata import entry_points +from myst_parser.main import MdParserConfig, create_md_parser +from nbformat import NotebookNode +from typing_extensions import Literal + +if TYPE_CHECKING: + from myst_nb.docutils_ import DocutilsNbRenderer + + +WIDGET_STATE_MIMETYPE = "application/vnd.jupyter.widget-state+json" +WIDGET_VIEW_MIMETYPE = "application/vnd.jupyter.widget-view+json" +RENDER_ENTRY_GROUP = "myst_nb.renderers" +_ANSI_RE = re.compile("\x1b\\[(.*?)([@-~])") + + +def strip_ansi(text: str) -> str: + """Strip ANSI escape sequences from a string""" + return _ANSI_RE.sub("", text) + + +def sanitize_script_content(content: str) -> str: + """Sanitize the content of a ``", r"<\/script>") + + +def strip_latex_delimiters(source): + r"""Remove LaTeX math delimiters that would be rendered by the math block. + + These are: ``\(…\)``, ``\[…\]``, ``$…$``, and ``$$…$$``. + This is necessary because sphinx does not have a dedicated role for + generic LaTeX, while Jupyter only defines generic LaTeX output, see + https://github.com/jupyter/jupyter-sphinx/issues/90 for discussion. + """ + source = source.strip() + delimiter_pairs = (pair.split() for pair in r"\( \),\[ \],$$ $$,$ $".split(",")) + for start, end in delimiter_pairs: + if source.startswith(start) and source.endswith(end): + return source[len(start) : -len(end)] + + return source + + +class NbElementRenderer: + """A class for rendering notebook elements.""" + + def __init__(self, renderer: "DocutilsNbRenderer", output_folder: str) -> None: + """Initialize the renderer. + + :params output_folder: the folder path for external outputs (like images) + """ + self._renderer = renderer + self._output_folder = output_folder + + @property + def renderer(self) -> "DocutilsNbRenderer": + """The renderer this output renderer is associated with.""" + return self._renderer + + def write_file( + self, path: List[str], content: bytes, overwrite=False, exists_ok=False + ) -> Path: + """Write a file to the external output folder. + + :param path: the path to write the file to, relative to the output folder + :param content: the content to write to the file + :param overwrite: whether to overwrite an existing file + :param exists_ok: whether to ignore an existing file if overwrite is False + """ + folder = Path(self._output_folder) + filepath = folder.joinpath(*path) + if filepath.exists(): + if overwrite: + filepath.write_bytes(content) + elif not exists_ok: + # TODO raise or just report? + raise FileExistsError(f"File already exists: {filepath}") + else: + filepath.parent.mkdir(parents=True, exist_ok=True) + filepath.write_bytes(content) + + return filepath + + @property + def source(self): + """The source of the notebook.""" + return self.renderer.document["source"] + + def report( + self, level: Literal["warning", "error", "severe"], message: str, line: int + ) -> nodes.system_message: + """Report an issue.""" + # TODO add cell index to message + # TODO handle for sphinx (including type/subtype) + reporter = self.renderer.document.reporter + levels = { + "warning": reporter.WARNING_LEVEL, + "error": reporter.ERROR_LEVEL, + "severe": reporter.SEVERE_LEVEL, + } + return reporter.system_message( + levels.get(level, reporter.WARNING_LEVEL), message, line=line + ) + + def get_cell_metadata(self, cell_index: int) -> NotebookNode: + # TODO handle key/index error + return self._renderer.config["notebook"]["cells"][cell_index]["metadata"] + + def render_stdout( + self, output: NotebookNode, cell_index: int, source_line: int + ) -> List[nodes.Element]: + """Render a notebook stdout output. + + https://nbformat.readthedocs.io/en/5.1.3/format_description.html#stream-output + + :param output: the output node + :param cell_index: the index of the cell containing the output + :param source_line: the line number of the cell in the source document + """ + metadata = self.get_cell_metadata(cell_index) + if "remove-stdout" in metadata.get("tags", []): + return [] + lexer = self.renderer.get_nb_config("render_text_lexer", cell_index) + node = self.renderer.create_highlighted_code_block( + output["text"], lexer, source=self.source, line=source_line + ) + node["classes"] += ["output", "stream"] + return [node] + + def render_stderr( + self, output: NotebookNode, cell_index: int, source_line: int + ) -> List[nodes.Element]: + """Render a notebook stderr output. + + https://nbformat.readthedocs.io/en/5.1.3/format_description.html#stream-output + + :param output: the output node + :param cell_index: the index of the cell containing the output + :param source_line: the line number of the cell in the source document + """ + metadata = self.get_cell_metadata(cell_index) + if "remove-stdout" in metadata.get("tags", []): + return [] + output_stderr = self.renderer.get_nb_config("output_stderr", cell_index) + msg = "output render: stderr was found in the cell outputs" + outputs = [] + if output_stderr == "remove": + return [] + elif output_stderr == "remove-warn": + return [self.report("warning", msg, line=source_line)] + elif output_stderr == "warn": + outputs.append(self.report("warning", msg, line=source_line)) + elif output_stderr == "error": + outputs.append(self.report("error", msg, line=source_line)) + elif output_stderr == "severe": + outputs.append(self.report("severe", msg, line=source_line)) + lexer = self.renderer.get_nb_config("render_text_lexer", cell_index) + node = self.renderer.create_highlighted_code_block( + output["text"], lexer, source=self.source, line=source_line + ) + node["classes"] += ["output", "stderr"] + outputs.append(node) + return outputs + + def render_error( + self, output: NotebookNode, cell_index: int, source_line: int + ) -> List[nodes.Element]: + """Render a notebook error output. + + https://nbformat.readthedocs.io/en/5.1.3/format_description.html#error + + :param output: the output node + :param cell_index: the index of the cell containing the output + :param source_line: the line number of the cell in the source document + """ + traceback = strip_ansi("\n".join(output["traceback"])) + lexer = self.renderer.get_nb_config("render_error_lexer", cell_index) + node = self.renderer.create_highlighted_code_block( + traceback, lexer, source=self.source, line=source_line + ) + node["classes"] += ["output", "traceback"] + return [node] + + def render_mime_type( + self, mime_type: str, data: Union[str, bytes], cell_index: int, source_line: int + ) -> List[nodes.Element]: + """Render a notebook mime output. + + https://nbformat.readthedocs.io/en/5.1.3/format_description.html#display-data + + :param mime_type: the key from the "data" dict + :param data: the value from the "data" dict + :param cell_index: the index of the cell containing the output + :param source_line: the line number of the cell in the source document + """ + if mime_type == "text/plain": + return self.render_text_plain(data, cell_index, source_line) + if mime_type in {"image/png", "image/jpeg", "application/pdf", "image/svg+xml"}: + return self.render_image(mime_type, data, cell_index, source_line) + if mime_type == "text/html": + return self.render_text_html(data, cell_index, source_line) + if mime_type == "text/latex": + return self.render_text_latex(data, cell_index, source_line) + if mime_type == "application/javascript": + return self.render_javascript(data, cell_index, source_line) + if mime_type == WIDGET_VIEW_MIMETYPE: + return self.render_widget_view(data, cell_index, source_line) + if mime_type == "text/markdown": + return self.render_markdown(data, cell_index, source_line) + + return self.render_unknown(mime_type, data, cell_index, source_line) + + def render_unknown( + self, mime_type: str, data: Union[str, bytes], cell_index: int, source_line: int + ) -> List[nodes.Element]: + """Render a notebook output of unknown mime type. + + :param mime_type: the key from the "data" dict + :param data: the value from the "data" dict + :param cell_index: the index of the cell containing the output + :param source_line: the line number of the cell in the source document + """ + return self.report( + "warning", + f"skipping unknown output mime type: {mime_type}", + line=source_line, + ) + + def render_markdown( + self, data: str, cell_index: int, source_line: int + ) -> List[nodes.Element]: + """Render a notebook text/markdown mime data output. + + :param data: the value from the "data" dict + :param cell_index: the index of the cell containing the output + :param source_line: the line number of the cell in the source document + """ + # create a container to parse the markdown into + temp_container = nodes.container() + + # setup temporary renderer config + md = self.renderer.md + match_titles = self.renderer.md_env.get("match_titles", None) + if self.renderer.get_nb_config("embed_markdown_outputs", cell_index): + # this configuration is used in conjunction with a transform, + # which move this content outside & below the output container + # in this way the Markdown output can contain headings, + # and not break the structure of the docutils AST + # TODO create transform and for sphinx prioritise this output for all output formats + self.renderer.md_env["match_titles"] = True + else: + # otherwise we render as simple Markdown and heading are not allowed + self.renderer.md_env["match_titles"] = False + self.renderer.md = create_md_parser( + MdParserConfig(commonmark_only=True), self.renderer.__class__ + ) + + # parse markdown + with self.renderer.current_node_context(temp_container): + self.renderer.nested_render_text(data, source_line) + + # restore renderer config + self.renderer.md = md + self.renderer.md_env["match_titles"] = match_titles + + return temp_container.children + + def render_text_plain( + self, data: str, cell_index: int, source_line: int + ) -> List[nodes.Element]: + """Render a notebook text/plain mime data output. + + :param data: the value from the "data" dict + :param cell_index: the index of the cell containing the output + :param source_line: the line number of the cell in the source document + """ + lexer = self.renderer.get_nb_config("render_text_lexer", cell_index) + node = self.renderer.create_highlighted_code_block( + data, lexer, source=self.source, line=source_line + ) + node["classes"] += ["output", "text_plain"] + return [node] + + def render_text_html( + self, data: str, cell_index: int, source_line: int + ) -> List[nodes.Element]: + """Render a notebook text/html mime data output. + + :param data: the value from the "data" dict + :param cell_index: the index of the cell containing the output + :param source_line: the line number of the cell in the source document + :param inline: create inline nodes instead of block nodes + """ + return [nodes.raw(text=data, format="html", classes=["output", "text_html"])] + + def render_text_latex( + self, data: str, cell_index: int, source_line: int + ) -> List[nodes.Element]: + """Render a notebook text/latex mime data output. + + :param data: the value from the "data" dict + :param cell_index: the index of the cell containing the output + :param source_line: the line number of the cell in the source document + """ + # TODO should we always assume this is math? + return [ + nodes.math_block( + text=strip_latex_delimiters(data), + nowrap=False, + number=None, + classes=["output", "text_latex"], + ) + ] + + def render_image( + self, + mime_type: Union[str, bytes], + data: bytes, + cell_index: int, + source_line: int, + ) -> List[nodes.Element]: + """Render a notebook image mime data output. + + :param mime_type: the key from the "data" dict + :param data: the value from the "data" dict + :param cell_index: the index of the cell containing the output + :param source_line: the line number of the cell in the source document + """ + # Adapted from: + # https://github.com/jupyter/nbconvert/blob/45df4b6089b3bbab4b9c504f9e6a892f5b8692e3/nbconvert/preprocessors/extractoutput.py#L43 + + # ensure that the data is a bytestring + if mime_type in {"image/png", "image/jpeg", "application/pdf"}: + # data is b64-encoded as text + data_bytes = a2b_base64(data) + elif isinstance(data, str): + # ensure corrent line separator + data_bytes = os.linesep.join(data.splitlines()).encode("utf-8") + # create filename + extension = guess_extension(mime_type) or "." + mime_type.rsplit("/")[-1] + # latex does not recognize the '.jpe' extension + extension = ".jpeg" if extension == ".jpe" else extension + # ensure de-duplication of outputs by using hash as filename + # TODO note this is a change to the current implementation, + # which names by {notbook_name}-{cell_index}-{output-index}.{extension} + data_hash = hashlib.sha256(data_bytes).hexdigest() + filename = f"{data_hash}{extension}" + path = self.write_file([filename], data_bytes, overwrite=False, exists_ok=True) + # TODO add additional attributes + return [nodes.image(uri=str(path))] + + def render_javascript( + self, data: str, cell_index: int, source_line: int + ) -> List[nodes.Element]: + """Render a notebook application/javascript mime data output. + + :param data: the value from the "data" dict + :param cell_index: the index of the cell containing the output + :param source_line: the line number of the cell in the source document + """ + content = sanitize_script_content(data) + mime_type = "application/javascript" + return [ + nodes.raw( + text=f'', + format="html", + ) + ] + + def render_widget_view( + self, data: str, cell_index: int, source_line: int + ) -> List[nodes.Element]: + """Render a notebook application/vnd.jupyter.widget-view+json mime output. + + :param data: the value from the "data" dict + :param cell_index: the index of the cell containing the output + :param source_line: the line number of the cell in the source document + """ + content = json.dumps(sanitize_script_content(data)) + return [ + nodes.raw( + text=f'', + format="html", + ) + ] + + +@lru_cache(maxsize=10) +def load_renderer(name: str) -> NbElementRenderer: + """Load a renderer, + given a name within the ``RENDER_ENTRY_GROUP`` entry point group + """ + all_eps = entry_points() + if hasattr(all_eps, "select"): + # importlib_metadata >= 3.6 or importlib.metadata in python >=3.10 + eps = all_eps.select(group=RENDER_ENTRY_GROUP, name=name) + found = name in eps.names + else: + eps = {ep.name: ep for ep in all_eps.get(RENDER_ENTRY_GROUP, [])} + found = name in eps + if found: + klass = eps[name].load() + if not issubclass(klass, NbElementRenderer): + raise Exception( + f"Entry Point for {RENDER_ENTRY_GROUP}:{name} " + f"is not a subclass of `NbElementRenderer`: {klass}" + ) + return klass + + raise Exception(f"No Entry Point found for {RENDER_ENTRY_GROUP}:{name}") diff --git a/setup.cfg b/setup.cfg index ca5f91a3..3c2ef799 100644 --- a/setup.cfg +++ b/setup.cfg @@ -67,8 +67,8 @@ console_scripts = myst_nb.mime_render = default = myst_nb.render_outputs:CellOutputRenderer inline = myst_nb.render_outputs:CellOutputRendererInline -myst_nb.output_renderer = - default = myst_nb.docutils_:NbOutputRenderer +myst_nb.renderers = + default = myst_nb.new.render:NbElementRenderer pygments.lexers = myst-ansi = myst_nb.lexers:AnsiColorLexer ipythontb = myst_nb.lexers:IPythonTracebackLexer From 27ec5f7b204b3d50ec6f3c2be4ec1d67bcffa911 Mon Sep 17 00:00:00 2001 From: Chris Sewell Date: Sun, 2 Jan 2022 21:42:36 +0100 Subject: [PATCH 11/75] Add execution data to the document --- myst_nb/docutils_.py | 14 +++++++++--- myst_nb/new/execute.py | 51 ++++++++++++++++++++++++++++++++++-------- 2 files changed, 53 insertions(+), 12 deletions(-) diff --git a/myst_nb/docutils_.py b/myst_nb/docutils_.py index 849516f8..c80e629f 100644 --- a/myst_nb/docutils_.py +++ b/myst_nb/docutils_.py @@ -140,8 +140,15 @@ def parse(self, inputstring: str, document: nodes.document) -> None: ) notebook = nb_reader(inputstring) + # TODO update nb_config from notebook metadata + # potentially execute notebook and/or populate outputs from cache - notebook = update_notebook(notebook, document["source"], nb_config, logger) + notebook, exec_data = update_notebook( + notebook, document["source"], nb_config, logger + ) + if exec_data: + document["nb_exec_data"] = exec_data + # TODO store/print error traceback? # TODO write executed notebook to output folder # always for sphinx, but maybe docutils option on whether to do this? @@ -167,6 +174,7 @@ class DocutilsNbRenderer(DocutilsRenderer): def get_nb_config(self, key: str, cell_index: Optional[int]) -> Any: # TODO selection between config/notebook/cell level + # (we can maybe update the nb_config with notebook level metadata in parser) # TODO handle KeyError better return self.config["nb_config"][key] @@ -266,8 +274,8 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: render_priority = self.get_nb_config("render_priority", cell_index) renderer_name = self.get_nb_config("render_plugin", cell_index) # get folder path for external outputs (like images) - # TODO for sphinx we use a set output folder - output_folder = self.get_nb_config("output_folder", cell_index) + # TODO for sphinx we use a set output folder (set this in parser?) + output_folder = self.get_nb_config("output_folder", None) # load renderer class from name renderer: NbElementRenderer = load_renderer(renderer_name)(self, output_folder) for output in outputs: diff --git a/myst_nb/new/execute.py b/myst_nb/new/execute.py index aa826867..bf85a9d7 100644 --- a/myst_nb/new/execute.py +++ b/myst_nb/new/execute.py @@ -1,24 +1,41 @@ """Module for executing notebooks.""" import os from contextlib import nullcontext +from datetime import datetime from logging import Logger from pathlib import Path from tempfile import TemporaryDirectory +from typing import Any, Dict, Optional, Tuple from jupyter_cache import get_cache from jupyter_cache.executors import load_executor from jupyter_cache.executors.utils import single_nb_execution from nbformat import NotebookNode +from typing_extensions import TypedDict from myst_nb.configuration import NbParserConfig +class ExecutionResult(TypedDict): + """Result of executing a notebook.""" + + mtime: float + """POSIX timestamp of the execution time""" + runtime: Optional[float] + """runtime in seconds""" + method: str + """method used to execute the notebook""" + succeeded: bool + """True if the notebook executed successfully""" + # TODO error_log: str + + def update_notebook( notebook: NotebookNode, source: str, nb_config: NbParserConfig, logger: Logger, -) -> NotebookNode: +) -> Tuple[NotebookNode, Optional[ExecutionResult]]: """Update a notebook using the given configuration. This function may execute the notebook if necessary. @@ -28,9 +45,10 @@ def update_notebook( :param nb_config: The configuration for the notebook parser. :param logger: The logger to use. - :returns: The updated notebook. + :returns: The updated notebook, and the (optional) execution metadata. """ - # TODO also look at notebook metadata + exec_metadata: Optional[ExecutionResult] = None + if nb_config.execution_mode == "force": # TODO what if source is a descriptor? path = str(Path(source).parent) @@ -47,9 +65,18 @@ def update_notebook( timeout=nb_config.execution_timeout, ) logger.info(f"Executed notebook in {result.time:.2f} seconds") - # TODO save execution data on document (and environment if sphinx) - # TODO handle errors + + exec_metadata = { + "mtime": datetime.now().timestamp(), + "runtime": result.time, + "method": nb_config.execution_mode, + "succeeded": False if result.err else True, + } + + # TODO handle errors + elif nb_config.execution_mode == "cache": + # TODO for sphinx, the default would be in the output directory cache = get_cache(nb_config.execution_cache_path or ".cache") stage_record = cache.stage_notebook_file(source) @@ -57,15 +84,21 @@ def update_notebook( if cache.get_cache_record_of_staged(stage_record.pk) is None: executor = load_executor("basic", cache, logger=logger) executor.run_and_cache( - filter_pks=[stage_record.pk], + filter_pks=[stage_record.pk], # TODO specitfy, rather than filter allow_errors=nb_config.execution_allow_errors, timeout=nb_config.execution_timeout, run_in_temp=nb_config.execution_in_temp, ) else: logger.info("Using cached notebook outputs") - # TODO save execution data on document (and environment if sphinx) - # TODO handle errors + _, notebook = cache.merge_match_into_notebook(notebook) - return notebook + exec_metadata = { + "mtime": datetime.now().timestamp(), + "runtime": None, # TODO get runtime from cache + "method": nb_config.execution_mode, + "succeeded": True, # TODO handle errors + } + + return notebook, exec_metadata From 248e7af2f93a0d6b3f878b2beae5fdf398fc5b22 Mon Sep 17 00:00:00 2001 From: Chris Sewell Date: Mon, 3 Jan 2022 02:55:37 +0100 Subject: [PATCH 12/75] Initial implementation of new sphinx parser --- myst_nb/configuration.py | 2 +- myst_nb/new/execute.py | 2 +- myst_nb/new/sphinx_.py | 148 ++++++++++++++++++++ tests/test_sphinx_builds.py | 16 +++ tests/test_sphinx_builds/test_basic_run.xml | 14 ++ 5 files changed, 180 insertions(+), 2 deletions(-) create mode 100644 myst_nb/new/sphinx_.py create mode 100644 tests/test_sphinx_builds.py create mode 100644 tests/test_sphinx_builds/test_basic_run.xml diff --git a/myst_nb/configuration.py b/myst_nb/configuration.py index 8fa291a3..1e9435ca 100644 --- a/myst_nb/configuration.py +++ b/myst_nb/configuration.py @@ -109,7 +109,7 @@ class NbParserConfig: validator=instance_of(str), metadata={ "help": "Output folder for external outputs", - "docutils_only": True, # in sphinx we output to the build folder + "docutils_only": True, # in sphinx we always output to the build folder }, ) remove_code_source: bool = attr.ib( diff --git a/myst_nb/new/execute.py b/myst_nb/new/execute.py index bf85a9d7..11a0a606 100644 --- a/myst_nb/new/execute.py +++ b/myst_nb/new/execute.py @@ -5,7 +5,7 @@ from logging import Logger from pathlib import Path from tempfile import TemporaryDirectory -from typing import Any, Dict, Optional, Tuple +from typing import Optional, Tuple from jupyter_cache import get_cache from jupyter_cache.executors import load_executor diff --git a/myst_nb/new/sphinx_.py b/myst_nb/new/sphinx_.py new file mode 100644 index 00000000..b5f732b7 --- /dev/null +++ b/myst_nb/new/sphinx_.py @@ -0,0 +1,148 @@ +"""An extension for sphinx""" +import logging +from pathlib import Path +from typing import Any, Dict + +from docutils import nodes +from myst_parser import setup_sphinx as setup_myst_parser +from myst_parser.main import MdParserConfig, create_md_parser +from myst_parser.sphinx_parser import MystParser +from sphinx.application import Sphinx +from sphinx.util import logging as sphinx_logging + +from myst_nb import __version__ +from myst_nb.configuration import NbParserConfig +from myst_nb.docutils_ import DocutilsNbRenderer +from myst_nb.new.execute import update_notebook +from myst_nb.new.parse import notebook_to_tokens +from myst_nb.new.read import create_nb_reader + + +def setup(app): + return sphinx_setup(app) + + +def sphinx_setup(app: Sphinx): + """Initialize Sphinx extension.""" + # TODO perhaps there should be a way to turn this off, + # app.add_source_suffix(".md", "myst-nb") + app.add_source_suffix(".ipynb", "myst-nb") + app.add_source_parser(MystNbParser) + + # Add myst-parser transforms and configuration + setup_myst_parser(app) + + for name, default, field in NbParserConfig().as_triple(): + if not field.metadata.get("docutils_only", False): + # TODO add types? + app.add_config_value(f"nb_{name}", default, "env") + # TODO add deprecated names + + # generate notebook configuration from Sphinx configuration + app.connect("builder-inited", create_mystnb_config) + + # ensure notebook checkpoints are excluded + app.connect("config-inited", add_exclude_patterns) + # add HTML resources + app.connect("builder-inited", add_static_path) + app.add_css_file("mystnb.css") + + # TODO do we need to add lexers, if they are anyhow added via entry-points? + + return {"version": __version__, "parallel_read_safe": True} + + +def create_mystnb_config(app): + """Generate notebook configuration from Sphinx configuration""" + + # Ignore type checkers because the attribute is dynamically assigned + from sphinx.util.console import bold # type: ignore[attr-defined] + + logger = sphinx_logging.getLogger(__name__) + + # TODO deal with deprecated names + values = { + name: app.config[f"nb_{name}"] + for name, _, field in NbParserConfig().as_triple() + if not field.metadata.get("docutils_only", False) + } + + try: + app.env.mystnb_config = NbParserConfig(**values) + logger.info(bold("myst-nb v%s:") + " %s", __version__, app.env.mystnb_config) + except (TypeError, ValueError) as error: + logger.error("myst-nb configuration invalid: %s", error.args[0]) + app.env.mystnb_config = NbParserConfig() + + +def add_exclude_patterns(app: Sphinx, config): + """Add default exclude patterns (if not already present).""" + if "**.ipynb_checkpoints" not in config.exclude_patterns: + config.exclude_patterns.append("**.ipynb_checkpoints") + + +def add_static_path(app: Sphinx): + """Add static path for myst-nb.""" + static_path = Path(__file__).absolute().with_name("_static") + app.config.html_static_path.append(str(static_path)) + + +class MystNbParser(MystParser): + """Sphinx parser for Jupyter Notebook formats, containing MyST Markdown.""" + + supported = ("myst-nb",) + translate_section_name = None + + config_section = "myst-nb parser" + config_section_dependencies = ("parsers",) + + @staticmethod + def get_logger(document: nodes.document) -> logging.Logger: + """Get or create a logger for a docutils document.""" + # TODO load with document + return sphinx_logging.getLogger(__name__) + + def parse(self, inputstring: str, document: nodes.document) -> None: + """Parse source text. + + :param inputstring: The source string to parse + :param document: The root docutils node to add AST elements to + """ + # create a logger for this document + logger = self.get_logger(document) + + # get markdown parsing configuration + md_config: MdParserConfig = document.settings.env.myst_config + # get notebook rendering configuration + nb_config: NbParserConfig = document.settings.env.mystnb_config + + # convert inputstring to notebook + nb_reader, md_config = create_nb_reader( + inputstring, document["source"], md_config, nb_config + ) + notebook = nb_reader(inputstring) + + # TODO update nb_config from notebook metadata + + # potentially execute notebook and/or populate outputs from cache + notebook, exec_data = update_notebook( + notebook, document["source"], nb_config, logger + ) + if exec_data: + document["nb_exec_data"] = exec_data + # TODO store/print error traceback? + + # TODO write executed notebook to output folder + # always for sphinx, but maybe docutils option on whether to do this? + # only on successful parse? + + # Setup parser + mdit_parser = create_md_parser(md_config, DocutilsNbRenderer) + mdit_parser.options["document"] = document + mdit_parser.options["notebook"] = notebook + mdit_parser.options["nb_config"] = nb_config.as_dict() + mdit_env: Dict[str, Any] = {} + # parse to tokens + mdit_tokens = notebook_to_tokens(notebook, mdit_parser, mdit_env) + # convert to docutils AST, which is added to the document + mdit_parser.renderer.render(mdit_tokens, mdit_parser.options, mdit_env) diff --git a/tests/test_sphinx_builds.py b/tests/test_sphinx_builds.py new file mode 100644 index 00000000..ef8a1ad6 --- /dev/null +++ b/tests/test_sphinx_builds.py @@ -0,0 +1,16 @@ +"""Test full sphinx builds.""" +import pytest + + +@pytest.mark.sphinx_params( + "basic_run.ipynb", conf={"extensions": ["myst_nb.new.sphinx_"]} +) +def test_basic_run(sphinx_run, file_regression): + sphinx_run.build() + # print(sphinx_run.status()) + assert sphinx_run.warnings() == "" + # TODO implement "cleaning" of doctree["nb_language_info"] this on SphinxFixture + # e.g. remove/replace the python 'version' key + file_regression.check( + sphinx_run.get_doctree().pformat(), extension=".xml", encoding="utf8" + ) diff --git a/tests/test_sphinx_builds/test_basic_run.xml b/tests/test_sphinx_builds/test_basic_run.xml new file mode 100644 index 00000000..2e26f613 --- /dev/null +++ b/tests/test_sphinx_builds/test_basic_run.xml @@ -0,0 +1,14 @@ + +
+ + a title + <paragraph> + some text + <container cell_index="1" cell_metadata="{}" classes="cell" exec_count="1" nb_type="cell_code"> + <container classes="cell_input" nb_type="cell_code_source"> + <literal_block language="ipython3" xml:space="preserve"> + a=1 + print(a) + <container classes="cell_output" nb_type="cell_code_output"> + <literal_block classes="output stream" language="myst-ansi" xml:space="preserve"> + 1 From 73ea5ed362488be084550bcbb41f0c46f8bb2e3b Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Mon, 3 Jan 2022 17:20:59 +0100 Subject: [PATCH 13/75] Improve logging mechanism --- myst_nb/configuration.py | 6 ++ myst_nb/docutils_.py | 134 ++++++++++++++------------------------- myst_nb/new/execute.py | 1 + myst_nb/new/loggers.py | 108 +++++++++++++++++++++++++++++++ myst_nb/new/parse.py | 13 +++- myst_nb/new/render.py | 29 ++++++--- myst_nb/new/sphinx_.py | 69 ++++++++++++-------- 7 files changed, 236 insertions(+), 124 deletions(-) create mode 100644 myst_nb/new/loggers.py diff --git a/myst_nb/configuration.py b/myst_nb/configuration.py index 1e9435ca..72c58d5e 100644 --- a/myst_nb/configuration.py +++ b/myst_nb/configuration.py @@ -57,6 +57,8 @@ class NbParserConfig: # TODO handle old names; put in metadata, then auto generate warnings + # TODO mark which config are allowed per notebook/cell + # file read options custom_formats: Dict[str, Tuple[str, dict, bool]] = attr.ib( @@ -204,3 +206,7 @@ def as_triple(self) -> Iterable[Tuple[str, Any, attr.Attribute]]: fields = attr.fields_dict(self.__class__) for name, value in attr.asdict(self).items(): yield name, value, fields[name] + + def copy(self, **changes) -> "NbParserConfig": + """Return a copy of the configuration with optional changes applied.""" + return attr.evolve(self, **changes) diff --git a/myst_nb/docutils_.py b/myst_nb/docutils_.py index c80e629f..92dff711 100644 --- a/myst_nb/docutils_.py +++ b/myst_nb/docutils_.py @@ -1,8 +1,8 @@ """A parser for docutils.""" import json -import logging from typing import Any, Dict, List, Optional, Tuple +import nbformat from docutils import nodes from docutils.core import default_description, publish_cmdline from markdown_it.tree import SyntaxTreeNode @@ -15,6 +15,7 @@ from myst_nb.configuration import NbParserConfig from myst_nb.new.execute import update_notebook +from myst_nb.new.loggers import DocutilsLogger from myst_nb.new.parse import notebook_to_tokens from myst_nb.new.read import create_nb_reader from myst_nb.new.render import NbElementRenderer, load_renderer, sanitize_script_content @@ -27,61 +28,6 @@ } -# mapping of standard logger level names to their docutils equivalent -_LOGNAME_TO_DOCUTILS_LEVEL = { - "DEBUG": 0, - "INFO": 1, - "WARN": 2, - "WARNING": 2, - "ERROR": 3, - "CRITICAL": 4, - "FATAL": 4, -} - - -class DocutilsFormatter(logging.Formatter): - """A formatter that formats log messages for docutils.""" - - def __init__(self, source: str): - """Initialize a new formatter.""" - self._source = source - super().__init__() - - def format(self, record: logging.LogRecord) -> str: - """Format a log record for docutils.""" - levelname = record.levelname.upper() - level = _LOGNAME_TO_DOCUTILS_LEVEL.get(levelname, 0) - node = nodes.system_message( - record.msg, source=self._source, type=levelname, level=level - ) - return node.astext() - - -class DocutilsLogHandler(logging.Handler): - """Bridge from a Python logger to a docutils reporter.""" - - def __init__(self, document: nodes.document) -> None: - """Initialize a new handler.""" - super().__init__() - self._document = document - reporter = self._document.reporter - self._name_to_level = { - "DEBUG": reporter.DEBUG_LEVEL, - "INFO": reporter.INFO_LEVEL, - "WARN": reporter.WARNING_LEVEL, - "WARNING": reporter.WARNING_LEVEL, - "ERROR": reporter.ERROR_LEVEL, - "CRITICAL": reporter.SEVERE_LEVEL, - "FATAL": reporter.SEVERE_LEVEL, - } - - def emit(self, record: logging.LogRecord) -> None: - """Handle a log record.""" - levelname = record.levelname.upper() - level = self._name_to_level.get(levelname, self._document.reporter.DEBUG_LEVEL) - self._document.reporter.system_message(level, record.msg) - - class Parser(MystParser): """Docutils parser for Jupyter Notebooks, containing MyST Markdown.""" @@ -98,23 +44,16 @@ class Parser(MystParser): config_section = "myst-nb parser" - @staticmethod - def get_logger(document: nodes.document) -> logging.Logger: - """Get or create a logger for a docutils document.""" - logger = logging.getLogger(document["source"]) - logger.setLevel(logging.DEBUG) - if not logger.handlers: - logger.addHandler(DocutilsLogHandler(document)) - return logger - def parse(self, inputstring: str, document: nodes.document) -> None: """Parse source text. :param inputstring: The source string to parse :param document: The root docutils node to add AST elements to """ - # create a logger for this document - logger = self.get_logger(document) + document_source = document["source"] + + # get a logger for this document + logger = DocutilsLogger(document) # get markdown parsing configuration try: @@ -136,32 +75,39 @@ def parse(self, inputstring: str, document: nodes.document) -> None: # convert inputstring to notebook nb_reader, md_config = create_nb_reader( - inputstring, document["source"], md_config, nb_config + inputstring, document_source, md_config, nb_config ) notebook = nb_reader(inputstring) # TODO update nb_config from notebook metadata + # Setup the markdown parser + mdit_parser = create_md_parser(md_config, DocutilsNbRenderer) + mdit_parser.options["document"] = document + mdit_parser.options["notebook"] = notebook + mdit_parser.options["nb_config"] = nb_config.as_dict() + mdit_env: Dict[str, Any] = {} + + # load notebook element renderer class from entry-point name + # this is separate from DocutilsNbRenderer, so that users can override it + renderer_name = nb_config.render_plugin + nb_renderer: NbElementRenderer = load_renderer(renderer_name)( + mdit_parser.renderer + ) + mdit_parser.options["nb_renderer"] = nb_renderer + # potentially execute notebook and/or populate outputs from cache notebook, exec_data = update_notebook( - notebook, document["source"], nb_config, logger + notebook, document_source, nb_config, logger ) if exec_data: document["nb_exec_data"] = exec_data # TODO store/print error traceback? - # TODO write executed notebook to output folder - # always for sphinx, but maybe docutils option on whether to do this? - # only on successful parse? + # TODO also write CSS to output folder if necessary or always? - # Setup parser - mdit_parser = create_md_parser(md_config, DocutilsNbRenderer) - mdit_parser.options["document"] = document - mdit_parser.options["notebook"] = notebook - mdit_parser.options["nb_config"] = nb_config.as_dict() - mdit_env: Dict[str, Any] = {} # parse to tokens - mdit_tokens = notebook_to_tokens(notebook, mdit_parser, mdit_env) + mdit_tokens = notebook_to_tokens(notebook, mdit_parser, mdit_env, logger) # convert to docutils AST, which is added to the document mdit_parser.renderer.render(mdit_tokens, mdit_parser.options, mdit_env) @@ -169,6 +115,20 @@ def parse(self, inputstring: str, document: nodes.document) -> None: class DocutilsNbRenderer(DocutilsRenderer): """ "A docutils-only renderer for Jupyter Notebooks.""" + def render(self, tokens, options, md_env) -> nodes.document: + document = super().render(tokens, options, md_env) + # write executed notebook to output folder + # utf-8 is the de-facto standard encoding for notebooks. + content = nbformat.writes(self.config["notebook"]).encode("utf-8") + if self.sphinx_env: + path = self.sphinx_env.docname.split("/") + path[-1] += ".ipynb" + else: + # TODO maybe docutils should be optional on whether to do this? + path = ["rendered.ipynb"] + self.config["nb_renderer"].write_file(path, content, overwrite=True) + return document + # TODO maybe move more things to NbOutputRenderer? # and change name to e.g. NbElementRenderer @@ -271,13 +231,11 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: if self.get_nb_config("merge_streams", cell_index): # TODO should this be moved to the parsing phase? outputs = coalesce_streams(outputs) + + renderer: NbElementRenderer = self.config["nb_renderer"] render_priority = self.get_nb_config("render_priority", cell_index) - renderer_name = self.get_nb_config("render_plugin", cell_index) - # get folder path for external outputs (like images) - # TODO for sphinx we use a set output folder (set this in parser?) - output_folder = self.get_nb_config("output_folder", None) - # load renderer class from name - renderer: NbElementRenderer = load_renderer(renderer_name)(self, output_folder) + + # render the outputs for output in outputs: if output.output_type == "stream": if output.name == "stdout": @@ -308,7 +266,8 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: "No output mime type found from render_priority", line=line, append_to=self.current_node, - subtype="nb_mime_type", + wtype="mystnb", + subtype="mime_type", ) else: container = nodes.container(mime_type=mime_type) @@ -323,7 +282,8 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: f"Unsupported output type: {output.output_type}", line=line, append_to=self.current_node, - subtype="nb_output_type", + wtype="mystnb", + subtype="output_type", ) def render_nb_widget_state(self, token: SyntaxTreeNode) -> None: diff --git a/myst_nb/new/execute.py b/myst_nb/new/execute.py index 11a0a606..75801200 100644 --- a/myst_nb/new/execute.py +++ b/myst_nb/new/execute.py @@ -78,6 +78,7 @@ def update_notebook( elif nb_config.execution_mode == "cache": # TODO for sphinx, the default would be in the output directory + # also in sphinx we run and cache up front cache = get_cache(nb_config.execution_cache_path or ".cache") stage_record = cache.stage_notebook_file(source) # TODO handle converters diff --git a/myst_nb/new/loggers.py b/myst_nb/new/loggers.py new file mode 100644 index 00000000..f0b664fb --- /dev/null +++ b/myst_nb/new/loggers.py @@ -0,0 +1,108 @@ +"""This module provides equivalent loggers for both docutils and sphinx. + +These loggers act like standard Python logging.Logger objects, +but route messages via the docutils/sphinx reporting systems. + +They are initialised with a docutils document, +in order to provide the source location of the log message, +and can also both handle ``line`` and ``subtype`` keyword arguments: +``logger.warning("message", line=1, subtype="foo")`` + +""" +import logging + +from docutils import nodes + +DEFAULT_LOG_TYPE = "mystnb" + + +class SphinxLogger(logging.LoggerAdapter): + """Wraps a Sphinx logger, which routes messages to the docutils document reporter. + + The document path and message type are automatically included in the message, + and ``line`` is allowed as a keyword argument, + as well as the standard sphinx logger keywords: + ``subtype``, ``color``, ``once``, ``nonl``. + + As per the sphinx logger, warnings are suppressed, + if their ``type.subtype`` are included in the ``suppress_warnings`` configuration. + These are also appended to the end of messages. + """ + + def __init__(self, document: nodes.document, type_name: str = DEFAULT_LOG_TYPE): + from sphinx.util import logging as sphinx_logging + + docname = document.settings.env.docname + self.logger = sphinx_logging.getLogger(f"{type_name}-{docname}") + # default extras to parse to sphinx logger + # location can be: docname, (docname, lineno), or a node + self.extra = {"location": docname, "type": type_name} + + def process(self, msg, kwargs): + kwargs["extra"] = self.extra + if "type" in kwargs: # override type + self.extra["type"] = kwargs.pop("type") + subtype = ("." + kwargs["subtype"]) if "subtype" in kwargs else "" + if "line" in kwargs: # add line to location + self.extra["location"] = (self.extra["location"], kwargs.pop("line")) + return f"{msg} [{self.extra['type']}{subtype}]", kwargs + + +class DocutilsLogger(logging.LoggerAdapter): + """A logger which routes messages to the docutils document reporter. + + The document path and message type are automatically included in the message, + and ``line`` is allowed as a keyword argument. + The standard sphinx logger keywords are allowed but ignored: + ``subtype``, ``color``, ``once``, ``nonl``. + + ``type.subtype`` are also appended to the end of messages. + """ + + KEYWORDS = ["type", "subtype", "location", "nonl", "color", "once", "line"] + + def __init__(self, document: nodes.document, type_name: str = DEFAULT_LOG_TYPE): + self.logger = logging.getLogger(f"{type_name}-{document.source}") + # docutils handles the level of output logging + self.logger.setLevel(logging.DEBUG) + if not self.logger.hasHandlers(): + self.logger.addHandler(DocutilsLogHandler(document)) + + # default extras to parse to sphinx logger + # location can be: docname, (docname, lineno), or a node + self.extra = {"type": type_name, "line": None} + + def process(self, msg, kwargs): + kwargs["extra"] = self.extra + subtype = ("." + kwargs["subtype"]) if "subtype" in kwargs else "" + for keyword in self.KEYWORDS: + if keyword in kwargs: + kwargs["extra"][keyword] = kwargs.pop(keyword) + return f"{msg} [{self.extra['type']}{subtype}]", kwargs + + +class DocutilsLogHandler(logging.Handler): + """Handle logging via a docutils reporter.""" + + def __init__(self, document: nodes.document) -> None: + """Initialize a new handler.""" + super().__init__() + self._document = document + reporter = self._document.reporter + self._name_to_level = { + "DEBUG": reporter.DEBUG_LEVEL, + "INFO": reporter.INFO_LEVEL, + "WARN": reporter.WARNING_LEVEL, + "WARNING": reporter.WARNING_LEVEL, + "ERROR": reporter.ERROR_LEVEL, + "CRITICAL": reporter.SEVERE_LEVEL, + "FATAL": reporter.SEVERE_LEVEL, + } + + def emit(self, record: logging.LogRecord) -> None: + """Handle a log record.""" + levelname = record.levelname.upper() + level = self._name_to_level.get(levelname, self._document.reporter.DEBUG_LEVEL) + self._document.reporter.system_message( + level, record.msg, **({"line": record.line} if record.line else {}) + ) diff --git a/myst_nb/new/parse.py b/myst_nb/new/parse.py index d8ea6a72..e100179c 100644 --- a/myst_nb/new/parse.py +++ b/myst_nb/new/parse.py @@ -1,4 +1,5 @@ """Module for parsing notebooks to Markdown-it tokens.""" +import logging from typing import Any, Dict, List from markdown_it.main import MarkdownIt @@ -22,7 +23,10 @@ def _nb_node_to_dict(item: Any) -> Any: def notebook_to_tokens( - notebook: NotebookNode, mdit_parser: MarkdownIt, mdit_env: Dict[str, Any] + notebook: NotebookNode, + mdit_parser: MarkdownIt, + mdit_env: Dict[str, Any], + logger: logging.Logger, ) -> List[Token]: # disable front-matter, since this is taken from the notebook mdit_parser.disable("front_matter", ignoreInvalid=True) @@ -36,12 +40,15 @@ def notebook_to_tokens( key: metadata.pop(key, None) for key in ("kernelspec", "language_info") } - # get language lexer name + # attempt to get language lexer name langinfo = spec_data.get("language_info", {}) lexer = langinfo.get("pygments_lexer", langinfo.get("name", None)) if lexer is None: lexer = spec_data.get("kernelspec", {}).get("language", None) - # TODO warning if no lexer + if lexer is None: + logger.warning( + "No source code lexer found in notebook metadata", subtype="lexer" + ) # extract widgets widgets = metadata.pop("widgets", None) diff --git a/myst_nb/new/render.py b/myst_nb/new/render.py index 8048e05d..0649de16 100644 --- a/myst_nb/new/render.py +++ b/myst_nb/new/render.py @@ -56,13 +56,12 @@ def strip_latex_delimiters(source): class NbElementRenderer: """A class for rendering notebook elements.""" - def __init__(self, renderer: "DocutilsNbRenderer", output_folder: str) -> None: + def __init__(self, renderer: "DocutilsNbRenderer") -> None: """Initialize the renderer. :params output_folder: the folder path for external outputs (like images) """ self._renderer = renderer - self._output_folder = output_folder @property def renderer(self) -> "DocutilsNbRenderer": @@ -71,16 +70,18 @@ def renderer(self) -> "DocutilsNbRenderer": def write_file( self, path: List[str], content: bytes, overwrite=False, exists_ok=False - ) -> Path: + ) -> str: """Write a file to the external output folder. :param path: the path to write the file to, relative to the output folder :param content: the content to write to the file :param overwrite: whether to overwrite an existing file :param exists_ok: whether to ignore an existing file if overwrite is False + + :returns: URI to use for referencing the file """ - folder = Path(self._output_folder) - filepath = folder.joinpath(*path) + output_folder = Path(self.renderer.get_nb_config("output_folder", None)) + filepath = output_folder.joinpath(*path) if filepath.exists(): if overwrite: filepath.write_bytes(content) @@ -91,7 +92,17 @@ def write_file( filepath.parent.mkdir(parents=True, exist_ok=True) filepath.write_bytes(content) - return filepath + if self.renderer.sphinx_env: + # sphinx expects paths in POSIX format, relative to the documents path, + # or relative to the source folder if prepended with '/' + filepath = filepath.resolve() + if os.name == "nt": + # Can't get relative path between drives on Windows + return filepath.as_posix() + # Path().relative_to() doesn't work when not a direct subpath + return "/" + os.path.relpath(filepath, self.renderer.sphinx_env.app.srcdir) + else: + return str(filepath) @property def source(self): @@ -116,7 +127,7 @@ def report( def get_cell_metadata(self, cell_index: int) -> NotebookNode: # TODO handle key/index error - return self._renderer.config["notebook"]["cells"][cell_index]["metadata"] + return self.renderer.config["notebook"]["cells"][cell_index]["metadata"] def render_stdout( self, output: NotebookNode, cell_index: int, source_line: int @@ -357,9 +368,9 @@ def render_image( # which names by {notbook_name}-{cell_index}-{output-index}.{extension} data_hash = hashlib.sha256(data_bytes).hexdigest() filename = f"{data_hash}{extension}" - path = self.write_file([filename], data_bytes, overwrite=False, exists_ok=True) + uri = self.write_file([filename], data_bytes, overwrite=False, exists_ok=True) # TODO add additional attributes - return [nodes.image(uri=str(path))] + return [nodes.image(uri=uri)] def render_javascript( self, data: str, cell_index: int, source_line: int diff --git a/myst_nb/new/sphinx_.py b/myst_nb/new/sphinx_.py index b5f732b7..b2b44585 100644 --- a/myst_nb/new/sphinx_.py +++ b/myst_nb/new/sphinx_.py @@ -1,5 +1,4 @@ """An extension for sphinx""" -import logging from pathlib import Path from typing import Any, Dict @@ -14,8 +13,10 @@ from myst_nb.configuration import NbParserConfig from myst_nb.docutils_ import DocutilsNbRenderer from myst_nb.new.execute import update_notebook +from myst_nb.new.loggers import SphinxLogger from myst_nb.new.parse import notebook_to_tokens from myst_nb.new.read import create_nb_reader +from myst_nb.new.render import NbElementRenderer, load_renderer def setup(app): @@ -74,6 +75,11 @@ def create_mystnb_config(app): logger.error("myst-nb configuration invalid: %s", error.args[0]) app.env.mystnb_config = NbParserConfig() + # update the output_folder (for writing external files like images), + # to a set path within the sphinx build folder + output_folder = Path(app.outdir).parent.joinpath("jupyter_execute").resolve() + app.env.mystnb_config = app.env.mystnb_config.copy(output_folder=str(output_folder)) + def add_exclude_patterns(app: Sphinx, config): """Add default exclude patterns (if not already present).""" @@ -96,53 +102,66 @@ class MystNbParser(MystParser): config_section = "myst-nb parser" config_section_dependencies = ("parsers",) - @staticmethod - def get_logger(document: nodes.document) -> logging.Logger: - """Get or create a logger for a docutils document.""" - # TODO load with document - return sphinx_logging.getLogger(__name__) - def parse(self, inputstring: str, document: nodes.document) -> None: """Parse source text. :param inputstring: The source string to parse :param document: The root docutils node to add AST elements to """ - # create a logger for this document - logger = self.get_logger(document) + document_source = self.env.doc2path(self.env.docname) + + # get a logger for this document + logger = SphinxLogger(document) # get markdown parsing configuration - md_config: MdParserConfig = document.settings.env.myst_config + md_config: MdParserConfig = self.env.myst_config # get notebook rendering configuration - nb_config: NbParserConfig = document.settings.env.mystnb_config + nb_config: NbParserConfig = self.env.mystnb_config # convert inputstring to notebook + # TODO in sphinx, we also need to allow for the fact + # that the input could be a standard markdown file nb_reader, md_config = create_nb_reader( - inputstring, document["source"], md_config, nb_config + inputstring, document_source, md_config, nb_config ) notebook = nb_reader(inputstring) # TODO update nb_config from notebook metadata + # Setup the markdown parser + mdit_parser = create_md_parser(md_config, DocutilsNbRenderer) + mdit_parser.options["document"] = document + mdit_parser.options["notebook"] = notebook + mdit_parser.options["nb_config"] = nb_config.as_dict() + mdit_env: Dict[str, Any] = {} + + # load notebook element renderer class from entry-point name + # this is separate from DocutilsNbRenderer, so that users can override it + renderer_name = nb_config.render_plugin + nb_renderer: NbElementRenderer = load_renderer(renderer_name)( + mdit_parser.renderer + ) + mdit_parser.options["nb_renderer"] = nb_renderer + # potentially execute notebook and/or populate outputs from cache notebook, exec_data = update_notebook( - notebook, document["source"], nb_config, logger + notebook, document_source, nb_config, logger ) if exec_data: - document["nb_exec_data"] = exec_data - # TODO store/print error traceback? + # TODO note this is a different location to previous env.nb_execution_data + # but it is a more standard place, which will be merged on parallel builds + # (via MetadataCollector) + # Also to note, in docutils we store it on the document + # TODO should we deal with this getting overwritten by docinfo? + self.env.metadata[self.env.docname]["nb_exec_data"] = exec_data + # self.env.nb_exec_data_changed = True + # TODO how to do this in a "parallel friendly" way? perhaps we don't store + # this and just check the mtime of the exec_data instead, + # using that for the the exec_table extension - # TODO write executed notebook to output folder - # always for sphinx, but maybe docutils option on whether to do this? - # only on successful parse? + # TODO store/print error traceback? - # Setup parser - mdit_parser = create_md_parser(md_config, DocutilsNbRenderer) - mdit_parser.options["document"] = document - mdit_parser.options["notebook"] = notebook - mdit_parser.options["nb_config"] = nb_config.as_dict() - mdit_env: Dict[str, Any] = {} # parse to tokens - mdit_tokens = notebook_to_tokens(notebook, mdit_parser, mdit_env) + mdit_tokens = notebook_to_tokens(notebook, mdit_parser, mdit_env, logger) # convert to docutils AST, which is added to the document mdit_parser.renderer.render(mdit_tokens, mdit_parser.options, mdit_env) From 422f2e711f0255544e7b74a352afbea58c0c8161 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Mon, 3 Jan 2022 22:29:46 +0100 Subject: [PATCH 14/75] Handle mime type selection in sphinx --- myst_nb/configuration.py | 115 +++++++- myst_nb/docutils_.py | 87 +++--- myst_nb/new/loggers.py | 22 +- myst_nb/new/render.py | 80 +++--- myst_nb/new/sphinx_.py | 270 ++++++++++++++++- tests/conftest.py | 3 +- tests/test_sphinx_builds.py | 31 +- tests/test_sphinx_builds/test_basic_run.xml | 8 +- .../test_complex_outputs_run.resolved.xml | 229 +++++++++++++++ .../test_complex_outputs_run.xml | 272 ++++++++++++++++++ 10 files changed, 1010 insertions(+), 107 deletions(-) create mode 100644 tests/test_sphinx_builds/test_complex_outputs_run.resolved.xml create mode 100644 tests/test_sphinx_builds/test_complex_outputs_run.xml diff --git a/myst_nb/configuration.py b/myst_nb/configuration.py index 72c58d5e..77ad6665 100644 --- a/myst_nb/configuration.py +++ b/myst_nb/configuration.py @@ -2,7 +2,7 @@ from typing import Any, Dict, Iterable, Sequence, Tuple import attr -from attr.validators import deep_iterable, in_, instance_of, optional +from attr.validators import deep_iterable, deep_mapping, in_, instance_of, optional from typing_extensions import Literal @@ -44,6 +44,95 @@ def custom_formats_converter(value: dict) -> dict: return output +def render_priority_factory() -> Dict[str, Sequence[str]]: + """Create a default render priority dict: name -> priority list.""" + # See formats at https://www.sphinx-doc.org/en/master/usage/builders/index.html + # generated with: + # [(b.name, b.format, b.supported_image_types) + # for b in app.registry.builders.values()] + # TODO potentially could auto-generate + html_builders = [ + ("epub", "html", ["image/svg+xml", "image/png", "image/gif", "image/jpeg"]), + ("html", "html", ["image/svg+xml", "image/png", "image/gif", "image/jpeg"]), + ("dirhtml", "html", ["image/svg+xml", "image/png", "image/gif", "image/jpeg"]), + ( + "singlehtml", + "html", + ["image/svg+xml", "image/png", "image/gif", "image/jpeg"], + ), + ( + "applehelp", + "html", + [ + "image/png", + "image/gif", + "image/jpeg", + "image/tiff", + "image/jp2", + "image/svg+xml", + ], + ), + ("devhelp", "html", ["image/png", "image/gif", "image/jpeg"]), + ("htmlhelp", "html", ["image/png", "image/gif", "image/jpeg"]), + ("json", "html", ["image/svg+xml", "image/png", "image/gif", "image/jpeg"]), + ("pickle", "html", ["image/svg+xml", "image/png", "image/gif", "image/jpeg"]), + ("qthelp", "html", ["image/svg+xml", "image/png", "image/gif", "image/jpeg"]), + # deprecated RTD builders + # https://github.com/readthedocs/readthedocs-sphinx-ext/blob/master/readthedocs_ext/readthedocs.py + ( + "readthedocs", + "html", + ["image/svg+xml", "image/png", "image/gif", "image/jpeg"], + ), + ( + "readthedocsdirhtml", + "html", + ["image/svg+xml", "image/png", "image/gif", "image/jpeg"], + ), + ( + "readthedocssinglehtml", + "html", + ["image/svg+xml", "image/png", "image/gif", "image/jpeg"], + ), + ( + "readthedocssinglehtmllocalmedia", + "html", + ["image/svg+xml", "image/png", "image/gif", "image/jpeg"], + ), + ] + other_builders = [ + ("changes", "", []), + ("dummy", "", []), + ("gettext", "", []), + ("latex", "latex", ["application/pdf", "image/png", "image/jpeg"]), + ("linkcheck", "", []), + ("man", "man", []), + ("texinfo", "texinfo", ["image/png", "image/jpeg", "image/gif"]), + ("text", "text", []), + ("xml", "xml", []), + ("pseudoxml", "pseudoxml", []), + ] + output = {} + for name, _, supported_images in html_builders: + output[name] = ( + "application/vnd.jupyter.widget-view+json", + "application/javascript", + "text/html", + *supported_images, + "text/markdown", + "text/latex", + "text/plain", + ) + for name, _, supported_images in other_builders: + output[name] = ( + *supported_images, + "text/latex", + "text/markdown", + "text/plain", + ) + return output + + @attr.s() class NbParserConfig: """Global configuration options for the MyST-NB parser. @@ -65,7 +154,11 @@ class NbParserConfig: factory=dict, converter=custom_formats_converter, # TODO check can be loaded from string? - metadata={"help": "Custom formats for reading notebook; suffix -> reader"}, + metadata={ + "help": "Custom formats for reading notebook; suffix -> reader", + # TODO can we make this work for docutils? + "docutils_exclude": True, + }, ) # notebook execution options @@ -111,7 +204,7 @@ class NbParserConfig: validator=instance_of(str), metadata={ "help": "Output folder for external outputs", - "docutils_only": True, # in sphinx we always output to the build folder + "sphinx_exclude": True, # in sphinx we always output to the build folder }, ) remove_code_source: bool = attr.ib( @@ -155,8 +248,10 @@ class NbParserConfig: validator=instance_of(bool), metadata={"help": "Embed markdown outputs"}, # TODO better help text ) - # TODO this would be for docutils but not for sphinx - render_priority: Iterable[str] = attr.ib( + # docutils does not allow for the dictionaries in its configuration, + # and also there is no API for the parser to know the output format, so + # we use two different options for docutils(mime_priority)/sphinx(render_priority) + mime_priority: Sequence[str] = attr.ib( default=( "application/vnd.jupyter.widget-view+json", "application/javascript", @@ -169,7 +264,15 @@ class NbParserConfig: "text/plain", ), validator=deep_iterable(instance_of(str)), - metadata={"help": "Render priority for mime types"}, + metadata={"help": "Render priority for mime types", "sphinx_exclude": True}, + ) + render_priority: Dict[str, Sequence[str]] = attr.ib( + factory=render_priority_factory, + validator=deep_mapping(instance_of(str), deep_iterable(instance_of(str))), + metadata={ + "help": "Render priority for mime types, by builder name", + "docutils_exclude": True, + }, ) render_text_lexer: str = attr.ib( default="myst-ansi", diff --git a/myst_nb/docutils_.py b/myst_nb/docutils_.py index 92dff711..1ec8576b 100644 --- a/myst_nb/docutils_.py +++ b/myst_nb/docutils_.py @@ -1,5 +1,4 @@ """A parser for docutils.""" -import json from typing import Any, Dict, List, Optional, Tuple import nbformat @@ -15,16 +14,14 @@ from myst_nb.configuration import NbParserConfig from myst_nb.new.execute import update_notebook -from myst_nb.new.loggers import DocutilsLogger +from myst_nb.new.loggers import DEFAULT_LOG_TYPE, DocutilsLogger from myst_nb.new.parse import notebook_to_tokens from myst_nb.new.read import create_nb_reader -from myst_nb.new.render import NbElementRenderer, load_renderer, sanitize_script_content +from myst_nb.new.render import NbElementRenderer, load_renderer from myst_nb.render_outputs import coalesce_streams DOCUTILS_EXCLUDED_ARGS = { - # docutils.conf can't represent dicts - # TODO can we make this work? - "custom_formats", + f.name for f in NbParserConfig.get_fields() if f.metadata.get("docutils_exclude") } @@ -92,7 +89,7 @@ def parse(self, inputstring: str, document: nodes.document) -> None: # this is separate from DocutilsNbRenderer, so that users can override it renderer_name = nb_config.render_plugin nb_renderer: NbElementRenderer = load_renderer(renderer_name)( - mdit_parser.renderer + mdit_parser.renderer, logger ) mdit_parser.options["nb_renderer"] = nb_renderer @@ -102,32 +99,31 @@ def parse(self, inputstring: str, document: nodes.document) -> None: ) if exec_data: document["nb_exec_data"] = exec_data - # TODO store/print error traceback? - # TODO also write CSS to output folder if necessary or always? + # TODO store/print error traceback? # parse to tokens mdit_tokens = notebook_to_tokens(notebook, mdit_parser, mdit_env, logger) # convert to docutils AST, which is added to the document mdit_parser.renderer.render(mdit_tokens, mdit_parser.options, mdit_env) + # write updated notebook to output folder + # TODO currently this has to be done after the render has been called/setup + # TODO maybe docutils should be optional on whether to do this? + # utf-8 is the de-facto standard encoding for notebooks. + content = nbformat.writes(notebook).encode("utf-8") + path = ["rendered.ipynb"] + nb_renderer.write_file(path, content, overwrite=True) + # TODO also write CSS to output folder if necessary or always? + class DocutilsNbRenderer(DocutilsRenderer): - """ "A docutils-only renderer for Jupyter Notebooks.""" + """A docutils-only renderer for Jupyter Notebooks.""" - def render(self, tokens, options, md_env) -> nodes.document: - document = super().render(tokens, options, md_env) - # write executed notebook to output folder - # utf-8 is the de-facto standard encoding for notebooks. - content = nbformat.writes(self.config["notebook"]).encode("utf-8") - if self.sphinx_env: - path = self.sphinx_env.docname.split("/") - path[-1] += ".ipynb" - else: - # TODO maybe docutils should be optional on whether to do this? - path = ["rendered.ipynb"] - self.config["nb_renderer"].write_file(path, content, overwrite=True) - return document + @property + def nb_renderer(self) -> NbElementRenderer: + """Get the notebook element renderer.""" + return self.config["nb_renderer"] # TODO maybe move more things to NbOutputRenderer? # and change name to e.g. NbElementRenderer @@ -140,6 +136,7 @@ def get_nb_config(self, key: str, cell_index: Optional[int]) -> Any: def render_nb_spec_data(self, token: SyntaxTreeNode) -> None: """Add a notebook spec data to the document attributes.""" + # TODO in sphinx moves these to env metadata? self.document["nb_kernelspec"] = token.meta["kernelspec"] self.document["nb_language_info"] = token.meta["language_info"] @@ -158,17 +155,16 @@ def render_nb_cell_raw(self, token: SyntaxTreeNode) -> None: def render_nb_cell_code(self, token: SyntaxTreeNode) -> None: """Render a notebook code cell.""" cell_index = token.meta["index"] - exec_count = token.meta["execution_count"] tags = token.meta["metadata"].get("tags", []) # create a container for all the output classes = ["cell"] for tag in tags: classes.append(f"tag_{tag.replace(' ', '_')}") cell_container = nodes.container( - nb_type="cell_code", # TODO maybe nb_cell="code"/"markdown"/"raw" + nb_element="cell_code", cell_index=cell_index, - # TODO some way to use this to output cell indexes in HTML? - exec_count=exec_count, + # TODO some way to use this to allow repr of count in outputs like HTML? + exec_count=token.meta["execution_count"], cell_metadata=token.meta["metadata"], classes=classes, ) @@ -185,7 +181,7 @@ def render_nb_cell_code(self, token: SyntaxTreeNode) -> None: and ("remove-input" not in tags) ): cell_input = nodes.container( - nb_type="cell_code_source", classes=["cell_input"] + nb_element="cell_code_source", classes=["cell_input"] ) self.add_line_and_source_path(cell_input, token) with self.current_node_context(cell_input, append=True): @@ -201,7 +197,7 @@ def render_nb_cell_code(self, token: SyntaxTreeNode) -> None: and ("remove-output" not in tags) ): cell_output = nodes.container( - nb_type="cell_code_output", classes=["cell_output"] + nb_element="cell_code_output", classes=["cell_output"] ) self.add_line_and_source_path(cell_output, token) with self.current_node_context(cell_output, append=True): @@ -232,24 +228,23 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: # TODO should this be moved to the parsing phase? outputs = coalesce_streams(outputs) - renderer: NbElementRenderer = self.config["nb_renderer"] - render_priority = self.get_nb_config("render_priority", cell_index) + mime_priority = self.get_nb_config("mime_priority", cell_index) # render the outputs for output in outputs: if output.output_type == "stream": if output.name == "stdout": - _nodes = renderer.render_stdout(output, cell_index, line) + _nodes = self.nb_renderer.render_stdout(output, cell_index, line) self.add_line_and_source_path_r(_nodes, token) self.current_node.extend(_nodes) elif output.name == "stderr": - _nodes = renderer.render_stderr(output, cell_index, line) + _nodes = self.nb_renderer.render_stderr(output, cell_index, line) self.add_line_and_source_path_r(_nodes, token) self.current_node.extend(_nodes) else: pass # TODO warning elif output.output_type == "error": - _nodes = renderer.render_error(output, cell_index, line) + _nodes = self.nb_renderer.render_error(output, cell_index, line) self.add_line_and_source_path_r(_nodes, token) self.current_node.extend(_nodes) elif output.output_type in ("display_data", "execute_result"): @@ -260,43 +255,41 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: # if embed_markdown_outputs is True, # this should be top priority and we "mark" the container for the transform try: - mime_type = next(x for x in render_priority if x in output["data"]) + mime_type = next(x for x in mime_priority if x in output["data"]) except StopIteration: self.create_warning( "No output mime type found from render_priority", line=line, append_to=self.current_node, - wtype="mystnb", + wtype=DEFAULT_LOG_TYPE, subtype="mime_type", ) else: container = nodes.container(mime_type=mime_type) with self.current_node_context(container, append=True): - _nodes = renderer.render_mime_type( + _nodes = self.nb_renderer.render_mime_type( mime_type, output["data"][mime_type], cell_index, line ) - self.add_line_and_source_path_r(_nodes, token) self.current_node.extend(_nodes) + self.add_line_and_source_path_r([container], token) else: self.create_warning( f"Unsupported output type: {output.output_type}", line=line, append_to=self.current_node, - wtype="mystnb", + wtype=DEFAULT_LOG_TYPE, subtype="output_type", ) def render_nb_widget_state(self, token: SyntaxTreeNode) -> None: """Render the HTML defining the ipywidget state.""" - # The JSON inside the script tag is identified and parsed by: - # https://github.com/jupyter-widgets/ipywidgets/blob/32f59acbc63c3ff0acf6afa86399cb563d3a9a86/packages/html-manager/src/libembed.ts#L36 - # TODO we also need to load JS URLs if widgets are present and HTML - html = ( - f'<script type="{token.attrGet("type")}">\n' - f"{sanitize_script_content(json.dumps(token.meta['state']))}\n" - "</script>" + # TODO handle this more generally, + # by just passing all notebook metadata to the nb_renderer + # TODO in docutils we also need to load JS URLs if widgets are present and HTML + node = self.nb_renderer.render_widget_state( + mime_type=token.attrGet("type"), data=token.meta ) - node = nodes.raw("", html, format="html", nb_type="widget_state") + node["nb_element"] = "widget_state" self.add_line_and_source_path(node, token) # always append to bottom of the document self.document.append(node) diff --git a/myst_nb/new/loggers.py b/myst_nb/new/loggers.py index f0b664fb..fc805aab 100644 --- a/myst_nb/new/loggers.py +++ b/myst_nb/new/loggers.py @@ -44,7 +44,12 @@ def process(self, msg, kwargs): self.extra["type"] = kwargs.pop("type") subtype = ("." + kwargs["subtype"]) if "subtype" in kwargs else "" if "line" in kwargs: # add line to location + # note this will be overridden by the location keyword self.extra["location"] = (self.extra["location"], kwargs.pop("line")) + if "parent" in kwargs: + # TODO ideally here we would append a system_message to this node, + # then it could replace myst_parser.SphinxRenderer.create_warning + self.extra["parent"] = kwargs.pop("parent") return f"{msg} [{self.extra['type']}{subtype}]", kwargs @@ -59,7 +64,16 @@ class DocutilsLogger(logging.LoggerAdapter): ``type.subtype`` are also appended to the end of messages. """ - KEYWORDS = ["type", "subtype", "location", "nonl", "color", "once", "line"] + KEYWORDS = [ + "type", + "subtype", + "location", + "nonl", + "color", + "once", + "line", + "parent", + ] def __init__(self, document: nodes.document, type_name: str = DEFAULT_LOG_TYPE): self.logger = logging.getLogger(f"{type_name}-{document.source}") @@ -70,7 +84,7 @@ def __init__(self, document: nodes.document, type_name: str = DEFAULT_LOG_TYPE): # default extras to parse to sphinx logger # location can be: docname, (docname, lineno), or a node - self.extra = {"type": type_name, "line": None} + self.extra = {"type": type_name, "line": None, "parent": None} def process(self, msg, kwargs): kwargs["extra"] = self.extra @@ -103,6 +117,8 @@ def emit(self, record: logging.LogRecord) -> None: """Handle a log record.""" levelname = record.levelname.upper() level = self._name_to_level.get(levelname, self._document.reporter.DEBUG_LEVEL) - self._document.reporter.system_message( + node = self._document.reporter.system_message( level, record.msg, **({"line": record.line} if record.line else {}) ) + if record.parent is not None: + record.parent.append(node) diff --git a/myst_nb/new/render.py b/myst_nb/new/render.py index 0649de16..5172fead 100644 --- a/myst_nb/new/render.py +++ b/myst_nb/new/render.py @@ -1,6 +1,7 @@ """Module for rendering notebook components to docutils nodes.""" import hashlib import json +import logging import os import re from binascii import a2b_base64 @@ -13,7 +14,6 @@ from importlib_metadata import entry_points from myst_parser.main import MdParserConfig, create_md_parser from nbformat import NotebookNode -from typing_extensions import Literal if TYPE_CHECKING: from myst_nb.docutils_ import DocutilsNbRenderer @@ -56,18 +56,33 @@ def strip_latex_delimiters(source): class NbElementRenderer: """A class for rendering notebook elements.""" - def __init__(self, renderer: "DocutilsNbRenderer") -> None: + # TODO the type of renderer could be DocutilsNbRenderer or SphinxNbRenderer + + def __init__(self, renderer: "DocutilsNbRenderer", logger: logging.Logger) -> None: """Initialize the renderer. :params output_folder: the folder path for external outputs (like images) """ self._renderer = renderer + self._logger = logger @property def renderer(self) -> "DocutilsNbRenderer": """The renderer this output renderer is associated with.""" return self._renderer + @property + def logger(self) -> logging.Logger: + """The logger for this renderer.""" + # TODO the only problem with logging here, is that we cannot generate + # nodes.system_message to append to the document. + return self._logger + + @property + def source(self): + """The source of the notebook.""" + return self.renderer.document["source"] + def write_file( self, path: List[str], content: bytes, overwrite=False, exists_ok=False ) -> str: @@ -104,27 +119,6 @@ def write_file( else: return str(filepath) - @property - def source(self): - """The source of the notebook.""" - return self.renderer.document["source"] - - def report( - self, level: Literal["warning", "error", "severe"], message: str, line: int - ) -> nodes.system_message: - """Report an issue.""" - # TODO add cell index to message - # TODO handle for sphinx (including type/subtype) - reporter = self.renderer.document.reporter - levels = { - "warning": reporter.WARNING_LEVEL, - "error": reporter.ERROR_LEVEL, - "severe": reporter.SEVERE_LEVEL, - } - return reporter.system_message( - levels.get(level, reporter.WARNING_LEVEL), message, line=line - ) - def get_cell_metadata(self, cell_index: int) -> NotebookNode: # TODO handle key/index error return self.renderer.config["notebook"]["cells"][cell_index]["metadata"] @@ -165,18 +159,19 @@ def render_stderr( if "remove-stdout" in metadata.get("tags", []): return [] output_stderr = self.renderer.get_nb_config("output_stderr", cell_index) - msg = "output render: stderr was found in the cell outputs" + msg = f"stderr was found in the cell outputs of cell {cell_index + 1}" outputs = [] if output_stderr == "remove": return [] elif output_stderr == "remove-warn": - return [self.report("warning", msg, line=source_line)] + self.logger.warning(msg, subtype="stderr", line=source_line) + return [] elif output_stderr == "warn": - outputs.append(self.report("warning", msg, line=source_line)) + self.logger.warning(msg, subtype="stderr", line=source_line) elif output_stderr == "error": - outputs.append(self.report("error", msg, line=source_line)) + self.logger.error(msg, subtype="stderr", line=source_line) elif output_stderr == "severe": - outputs.append(self.report("severe", msg, line=source_line)) + self.logger.critical(msg, subtype="stderr", line=source_line) lexer = self.renderer.get_nb_config("render_text_lexer", cell_index) node = self.renderer.create_highlighted_code_block( output["text"], lexer, source=self.source, line=source_line @@ -218,7 +213,13 @@ def render_mime_type( """ if mime_type == "text/plain": return self.render_text_plain(data, cell_index, source_line) - if mime_type in {"image/png", "image/jpeg", "application/pdf", "image/svg+xml"}: + if mime_type in { + "image/png", + "image/jpeg", + "application/pdf", + "image/svg+xml", + "image/gif", + }: return self.render_image(mime_type, data, cell_index, source_line) if mime_type == "text/html": return self.render_text_html(data, cell_index, source_line) @@ -243,11 +244,12 @@ def render_unknown( :param cell_index: the index of the cell containing the output :param source_line: the line number of the cell in the source document """ - return self.report( - "warning", + self.logger.warning( f"skipping unknown output mime type: {mime_type}", + subtype="unknown_mime_type", line=source_line, ) + return [] def render_markdown( self, data: str, cell_index: int, source_line: int @@ -353,7 +355,7 @@ def render_image( # https://github.com/jupyter/nbconvert/blob/45df4b6089b3bbab4b9c504f9e6a892f5b8692e3/nbconvert/preprocessors/extractoutput.py#L43 # ensure that the data is a bytestring - if mime_type in {"image/png", "image/jpeg", "application/pdf"}: + if mime_type in {"image/png", "image/jpeg", "image/gif", "application/pdf"}: # data is b64-encoded as text data_bytes = a2b_base64(data) elif isinstance(data, str): @@ -407,6 +409,20 @@ def render_widget_view( ) ] + def render_widget_state(self, mime_type: str, data: dict) -> nodes.Element: + """Render a notebook application/vnd.jupyter.widget-state+json mime output. + + :param mime_type: the key from the "notebook.metdata.widgets" dict + :param data: the value from the "notebook.metdata.widgets" dict + """ + # The JSON inside the script tag is identified and parsed by: + # https://github.com/jupyter-widgets/ipywidgets/blob/32f59acbc63c3ff0acf6afa86399cb563d3a9a86/packages/html-manager/src/libembed.ts#L36 + content = json.dumps(sanitize_script_content(data["state"])) + return nodes.raw( + text=f'<script type="{mime_type}">\n{content}\n</script>', + format="html", + ) + @lru_cache(maxsize=10) def load_renderer(name: str) -> NbElementRenderer: diff --git a/myst_nb/new/sphinx_.py b/myst_nb/new/sphinx_.py index b2b44585..eae7c474 100644 --- a/myst_nb/new/sphinx_.py +++ b/myst_nb/new/sphinx_.py @@ -1,22 +1,30 @@ """An extension for sphinx""" from pathlib import Path -from typing import Any, Dict +from typing import Any, Dict, List, Optional, Sequence +import nbformat from docutils import nodes +from markdown_it.tree import SyntaxTreeNode from myst_parser import setup_sphinx as setup_myst_parser +from myst_parser.docutils_renderer import token_line from myst_parser.main import MdParserConfig, create_md_parser from myst_parser.sphinx_parser import MystParser +from myst_parser.sphinx_renderer import SphinxRenderer +from nbformat import NotebookNode from sphinx.application import Sphinx +from sphinx.transforms.post_transforms import SphinxPostTransform from sphinx.util import logging as sphinx_logging from myst_nb import __version__ from myst_nb.configuration import NbParserConfig -from myst_nb.docutils_ import DocutilsNbRenderer from myst_nb.new.execute import update_notebook -from myst_nb.new.loggers import SphinxLogger +from myst_nb.new.loggers import DEFAULT_LOG_TYPE, SphinxLogger from myst_nb.new.parse import notebook_to_tokens from myst_nb.new.read import create_nb_reader from myst_nb.new.render import NbElementRenderer, load_renderer +from myst_nb.render_outputs import coalesce_streams + +SPHINX_LOGGER = sphinx_logging.getLogger(__name__) def setup(app): @@ -34,7 +42,7 @@ def sphinx_setup(app: Sphinx): setup_myst_parser(app) for name, default, field in NbParserConfig().as_triple(): - if not field.metadata.get("docutils_only", False): + if not field.metadata.get("sphinx_exclude"): # TODO add types? app.add_config_value(f"nb_{name}", default, "env") # TODO add deprecated names @@ -47,6 +55,8 @@ def sphinx_setup(app: Sphinx): # add HTML resources app.connect("builder-inited", add_static_path) app.add_css_file("mystnb.css") + # add post-transform for selecting mime type from a bundle + app.add_post_transform(SelectMimeType) # TODO do we need to add lexers, if they are anyhow added via entry-points? @@ -59,20 +69,20 @@ def create_mystnb_config(app): # Ignore type checkers because the attribute is dynamically assigned from sphinx.util.console import bold # type: ignore[attr-defined] - logger = sphinx_logging.getLogger(__name__) - # TODO deal with deprecated names values = { name: app.config[f"nb_{name}"] for name, _, field in NbParserConfig().as_triple() - if not field.metadata.get("docutils_only", False) + if not field.metadata.get("sphinx_exclude") } try: app.env.mystnb_config = NbParserConfig(**values) - logger.info(bold("myst-nb v%s:") + " %s", __version__, app.env.mystnb_config) + SPHINX_LOGGER.info( + bold("myst-nb v%s:") + " %s", __version__, app.env.mystnb_config + ) except (TypeError, ValueError) as error: - logger.error("myst-nb configuration invalid: %s", error.args[0]) + SPHINX_LOGGER.error("myst-nb configuration invalid: %s", error.args[0]) app.env.mystnb_config = NbParserConfig() # update the output_folder (for writing external files like images), @@ -129,7 +139,7 @@ def parse(self, inputstring: str, document: nodes.document) -> None: # TODO update nb_config from notebook metadata # Setup the markdown parser - mdit_parser = create_md_parser(md_config, DocutilsNbRenderer) + mdit_parser = create_md_parser(md_config, SphinxNbRenderer) mdit_parser.options["document"] = document mdit_parser.options["notebook"] = notebook mdit_parser.options["nb_config"] = nb_config.as_dict() @@ -139,7 +149,7 @@ def parse(self, inputstring: str, document: nodes.document) -> None: # this is separate from DocutilsNbRenderer, so that users can override it renderer_name = nb_config.render_plugin nb_renderer: NbElementRenderer = load_renderer(renderer_name)( - mdit_parser.renderer + mdit_parser.renderer, logger ) mdit_parser.options["nb_renderer"] = nb_renderer @@ -165,3 +175,241 @@ def parse(self, inputstring: str, document: nodes.document) -> None: mdit_tokens = notebook_to_tokens(notebook, mdit_parser, mdit_env, logger) # convert to docutils AST, which is added to the document mdit_parser.renderer.render(mdit_tokens, mdit_parser.options, mdit_env) + + # write updated notebook to output folder + # TODO currently this has to be done after the render has been called/setup + # utf-8 is the de-facto standard encoding for notebooks. + content = nbformat.writes(notebook).encode("utf-8") + path = self.env.docname.split("/") + path[-1] += ".ipynb" + nb_renderer.write_file(path, content, overwrite=True) + + +class SphinxNbRenderer(SphinxRenderer): + """A sphinx renderer for Jupyter Notebooks.""" + + # TODO de-duplication with DocutilsNbRenderer + + @property + def nb_renderer(self) -> NbElementRenderer: + """Get the notebook element renderer.""" + return self.config["nb_renderer"] + + # TODO maybe move more things to NbOutputRenderer? + # and change name to e.g. NbElementRenderer + + def get_nb_config(self, key: str, cell_index: Optional[int]) -> Any: + # TODO selection between config/notebook/cell level + # (we can maybe update the nb_config with notebook level metadata in parser) + # TODO handle KeyError better + return self.config["nb_config"][key] + + def render_nb_spec_data(self, token: SyntaxTreeNode) -> None: + """Add a notebook spec data to the document attributes.""" + # This is different to docutils-only, where we store it on the document + env = self.sphinx_env + env.metadata[env.docname]["kernelspec"] = token.meta["kernelspec"] + env.metadata[env.docname]["language_info"] = token.meta["language_info"] + + def render_nb_cell_markdown(self, token: SyntaxTreeNode) -> None: + """Render a notebook markdown cell.""" + # TODO this is currently just a "pass-through", but we could utilise the metadata + # it would be nice to "wrap" this in a container that included the metadata, + # but unfortunately this would break the heading structure of docutils/sphinx. + # perhaps we add an "invisible" (non-rendered) marker node to the document tree, + self.render_children(token) + + def render_nb_cell_raw(self, token: SyntaxTreeNode) -> None: + """Render a notebook raw cell.""" + # TODO + + def render_nb_cell_code(self, token: SyntaxTreeNode) -> None: + """Render a notebook code cell.""" + cell_index = token.meta["index"] + tags = token.meta["metadata"].get("tags", []) + # create a container for all the output + classes = ["cell"] + for tag in tags: + classes.append(f"tag_{tag.replace(' ', '_')}") + cell_container = nodes.container( + nb_element="cell_code", + cell_index=cell_index, + # TODO some way to use this to allow repr of count in outputs like HTML? + exec_count=token.meta["execution_count"], + cell_metadata=token.meta["metadata"], + classes=classes, + ) + self.add_line_and_source_path(cell_container, token) + with self.current_node_context(cell_container, append=True): + + # TODO do we need this -/_ duplication of tag names, or can deprecate one? + # TODO it would be nice if remove_input/remove_output were also config + + # render the code source code + if ( + (not self.get_nb_config("remove_code_source", cell_index)) + and ("remove_input" not in tags) + and ("remove-input" not in tags) + ): + cell_input = nodes.container( + nb_element="cell_code_source", classes=["cell_input"] + ) + self.add_line_and_source_path(cell_input, token) + with self.current_node_context(cell_input, append=True): + self.render_nb_cell_code_source(token) + # render the execution output, if any + has_outputs = self.config["notebook"]["cells"][cell_index].get( + "outputs", [] + ) + if ( + has_outputs + and (not self.get_nb_config("remove_code_outputs", cell_index)) + and ("remove_output" not in tags) + and ("remove-output" not in tags) + ): + cell_output = nodes.container( + nb_element="cell_code_output", classes=["cell_output"] + ) + self.add_line_and_source_path(cell_output, token) + with self.current_node_context(cell_output, append=True): + self.render_nb_cell_code_outputs(token) + + def render_nb_cell_code_source(self, token: SyntaxTreeNode) -> None: + """Render a notebook code cell's source.""" + cell_index = token.meta["index"] + lexer = token.meta.get("lexer", None) + node = self.create_highlighted_code_block( + token.content, + lexer, + number_lines=self.get_nb_config("number_source_lines", cell_index), + source=self.document["source"], + line=token_line(token), + ) + self.add_line_and_source_path(node, token) + self.current_node.append(node) + + def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: + """Render a notebook code cell's outputs.""" + cell_index = token.meta["index"] + line = token_line(token) + outputs: List[NotebookNode] = self.config["notebook"]["cells"][cell_index].get( + "outputs", [] + ) + if self.get_nb_config("merge_streams", cell_index): + # TODO should this be moved to the parsing phase? + outputs = coalesce_streams(outputs) + + # render the outputs + for output in outputs: + if output.output_type == "stream": + if output.name == "stdout": + _nodes = self.nb_renderer.render_stdout(output, cell_index, line) + self.add_line_and_source_path_r(_nodes, token) + self.current_node.extend(_nodes) + elif output.name == "stderr": + _nodes = self.nb_renderer.render_stderr(output, cell_index, line) + self.add_line_and_source_path_r(_nodes, token) + self.current_node.extend(_nodes) + else: + pass # TODO warning + elif output.output_type == "error": + _nodes = self.nb_renderer.render_error(output, cell_index, line) + self.add_line_and_source_path_r(_nodes, token) + self.current_node.extend(_nodes) + elif output.output_type in ("display_data", "execute_result"): + # TODO how to handle figures and other means of wrapping an output: + # TODO unwrapped Markdown (so you can output headers) + # maybe in a transform, we grab the containers and move them + # "below" the code cell container? + # if embed_markdown_outputs is True, + # this should be top priority and we "mark" the container for the transform + + # We differ from the docutils-only renderer here, because we need to + # cache all rendered outputs, then choose one from the priority list + # in a post-transform, once we know which builder is required. + mime_bundle = nodes.container(nb_element="mime_bundle") + with self.current_node_context(mime_bundle, append=True): + for mime_type, data in output["data"].items(): + container = nodes.container(mime_type=mime_type) + with self.current_node_context(container, append=True): + _nodes = self.nb_renderer.render_mime_type( + mime_type, data, cell_index, line + ) + self.current_node.extend(_nodes) + self.add_line_and_source_path_r([mime_bundle], token) + else: + self.create_warning( + f"Unsupported output type: {output.output_type}", + line=line, + append_to=self.current_node, + wtype=DEFAULT_LOG_TYPE, + subtype="output_type", + ) + + def render_nb_widget_state(self, token: SyntaxTreeNode) -> None: + """Render the HTML defining the ipywidget state.""" + # TODO handle this more generally, + # by just passing all notebook metadata to the nb_renderer + # TODO in docutils we also need to load JS URLs if widgets are present and HTML + node = self.nb_renderer.render_widget_state( + mime_type=token.attrGet("type"), data=token.meta + ) + node["nb_element"] = "widget_state" + self.add_line_and_source_path(node, token) + # always append to bottom of the document + self.document.append(node) + + +class SelectMimeType(SphinxPostTransform): + """Select the mime type to render from mime bundles, + based on the builder and its associated priority list. + """ + + default_priority = 4 # TODO set correct priority + + def run(self, **kwargs: Any) -> None: + """Run the transform.""" + # get priority list for this builder + # TODO allow for per-notebook/cell priority dicts + priority_lookup: Dict[str, Sequence[str]] = self.config["nb_render_priority"] + name = self.app.builder.name + if name not in priority_lookup: + SPHINX_LOGGER.warning( + f"Builder name {name!r} " + "not available in 'nb_render_priority', defaulting to 'html'", + type=DEFAULT_LOG_TYPE, + subtype="mime_priority", + ) + priority_list = priority_lookup["html"] + else: + priority_list = priority_lookup[name] + + # findall replaces traverse in docutils v0.18 + iterator = getattr(self.document, "findall", self.document.traverse) + condition = ( + lambda node: isinstance(node, nodes.container) + and node.attributes.get("nb_element", "") == "mime_bundle" + ) + # remove/replace_self will not work with an iterator + for node in list(iterator(condition)): + # get available mime types + mime_types = [node["mime_type"] for node in node.children] + # select top priority + index = None + for mime_type in priority_list: + try: + index = mime_types.index(mime_type) + except ValueError: + continue + else: + break + if index is None: + SPHINX_LOGGER.warning( + f"No mime type available in priority list builder {name!r}", + type=DEFAULT_LOG_TYPE, + subtype="mime_priority", + location=node, + ) + node.parent.remove(node) + else: + node.replace_self(node.children[index]) diff --git a/tests/conftest.py b/tests/conftest.py index 12e8353b..c191501e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -90,8 +90,9 @@ def invalidate_files(self): for name, _ in self.files: self.env.all_docs.pop(name) - def get_resolved_doctree(self, docname): + def get_resolved_doctree(self, docname=None): """Load and return the built docutils.document, after post-transforms.""" + docname = docname or self.files[0][0] doctree = self.env.get_and_resolve_doctree(docname, self.app.builder) doctree["source"] = docname return doctree diff --git a/tests/test_sphinx_builds.py b/tests/test_sphinx_builds.py index ef8a1ad6..61e77604 100644 --- a/tests/test_sphinx_builds.py +++ b/tests/test_sphinx_builds.py @@ -3,14 +3,39 @@ @pytest.mark.sphinx_params( - "basic_run.ipynb", conf={"extensions": ["myst_nb.new.sphinx_"]} + "basic_run.ipynb", + conf={"extensions": ["myst_nb.new.sphinx_"], "nb_execution_mode": "off"}, ) def test_basic_run(sphinx_run, file_regression): sphinx_run.build() # print(sphinx_run.status()) assert sphinx_run.warnings() == "" - # TODO implement "cleaning" of doctree["nb_language_info"] this on SphinxFixture - # e.g. remove/replace the python 'version' key + assert set(sphinx_run.app.env.metadata["basic_run"].keys()) == { + "test_name", + "kernelspec", + "language_info", + "wordcount", + } file_regression.check( sphinx_run.get_doctree().pformat(), extension=".xml", encoding="utf8" ) + + +@pytest.mark.sphinx_params( + "complex_outputs.ipynb", + conf={"extensions": ["myst_nb.new.sphinx_"], "nb_execution_mode": "off"}, +) +def test_complex_outputs_run(sphinx_run, file_regression): + sphinx_run.build() + # print(sphinx_run.status()) + assert sphinx_run.warnings() == "" + try: + file_regression.check( + sphinx_run.get_doctree().pformat(), extension=".xml", encoding="utf8" + ) + finally: + file_regression.check( + sphinx_run.get_resolved_doctree().pformat(), + extension=".resolved.xml", + encoding="utf8", + ) diff --git a/tests/test_sphinx_builds/test_basic_run.xml b/tests/test_sphinx_builds/test_basic_run.xml index 2e26f613..668e5841 100644 --- a/tests/test_sphinx_builds/test_basic_run.xml +++ b/tests/test_sphinx_builds/test_basic_run.xml @@ -1,14 +1,14 @@ -<document nb_kernelspec="{'display_name': 'Python 3', 'language': 'python', 'name': 'python3'}" nb_language_info="{'codemirror_mode': {'name': 'ipython', 'version': 3}, 'file_extension': '.py', 'mimetype': 'text/x-python', 'name': 'python', 'nbconvert_exporter': 'python', 'pygments_lexer': 'ipython3', 'version': '3.6.1'}" source="basic_run"> +<document source="basic_run"> <section classes="tex2jax_ignore mathjax_ignore" ids="a-title" names="a\ title"> <title> a title <paragraph> some text - <container cell_index="1" cell_metadata="{}" classes="cell" exec_count="1" nb_type="cell_code"> - <container classes="cell_input" nb_type="cell_code_source"> + <container cell_index="1" cell_metadata="{}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> a=1 print(a) - <container classes="cell_output" nb_type="cell_code_output"> + <container classes="cell_output" nb_element="cell_code_output"> <literal_block classes="output stream" language="myst-ansi" xml:space="preserve"> 1 diff --git a/tests/test_sphinx_builds/test_complex_outputs_run.resolved.xml b/tests/test_sphinx_builds/test_complex_outputs_run.resolved.xml new file mode 100644 index 00000000..16c177bd --- /dev/null +++ b/tests/test_sphinx_builds/test_complex_outputs_run.resolved.xml @@ -0,0 +1,229 @@ +<document source="complex_outputs"> + <container cell_index="0" cell_metadata="{'init_cell': True, 'slideshow': {'slide_type': 'skip'}}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> + <literal_block language="ipython3" linenos="False" xml:space="preserve"> + import matplotlib.pyplot as plt + import pandas as pd + pd.set_option('display.latex.repr', True) + import sympy as sym + sym.init_printing(use_latex=True) + import numpy as np + from IPython.display import Image, Latex + <section classes="tex2jax_ignore mathjax_ignore" ids="markdown" names="markdown"> + <title> + Markdown + <section ids="general" names="general"> + <title> + General + <paragraph> + Some markdown text. + <paragraph> + A list: + <bullet_list bullet="-"> + <list_item> + <paragraph> + something + <list_item> + <paragraph> + something else + <paragraph> + A numbered list + <enumerated_list enumtype="arabic" prefix="" suffix="."> + <list_item> + <paragraph> + something + <list_item> + <paragraph> + something else + <paragraph> + non-ascii characters TODO + <paragraph> + This is a long section of text, which we only want in a document (not a presentation) + + some text + + some more text + + some more text + + some more text + + some more text + + some more text + + some more text + + some more text + + some more text + <paragraph> + This is an abbreviated section of the document text, which we only want in a presentation + <bullet_list bullet="-"> + <list_item> + <paragraph> + summary of document text + <section ids="references-and-citations" names="references\ and\ citations"> + <title> + References and Citations + <paragraph> + References to \cref{fig:example}, \cref{tbl:example}, =@eqn:example_sympy and \cref{code:example_mpl}. + <paragraph> + A latex citation.\cite{zelenyak_molecular_2016} + <paragraph> + A html citation. + <raw format="html" xml:space="preserve"> + <cite data-cite="kirkeminde_thermodynamic_2012"> + (Kirkeminde, 2012) + <raw format="html" xml:space="preserve"> + </cite> + <section ids="todo-notes" names="todo\ notes"> + <title> + Todo notes + <paragraph> + \todo[inline]{an inline todo} + <paragraph> + Some text.\todo{a todo in the margins} + <section classes="tex2jax_ignore mathjax_ignore" ids="text-output" names="text\ output"> + <title> + Text Output + <container cell_index="11" cell_metadata="{'ipub': {'text': {'format': {'backgroundcolor': '\\color{blue!10}'}}}}" classes="cell" exec_count="2" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> + <literal_block language="ipython3" linenos="False" xml:space="preserve"> + print(""" + This is some printed text, + with a nicely formatted output. + """) + <container classes="cell_output" nb_element="cell_code_output"> + <literal_block classes="output stream" language="myst-ansi" linenos="False" xml:space="preserve"> + + This is some printed text, + with a nicely formatted output. + + <section classes="tex2jax_ignore mathjax_ignore" ids="images-and-figures" names="images\ and\ figures"> + <title> + Images and Figures + <container cell_index="13" cell_metadata="{'ipub': {'figure': {'caption': 'A nice picture.', 'label': 'fig:example', 'placement': '!bh'}}}" classes="cell" exec_count="3" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> + <literal_block language="ipython3" linenos="False" xml:space="preserve"> + Image('example.jpg',height=400) + <container classes="cell_output" nb_element="cell_code_output"> + <container mime_type="image/jpeg"> + <image candidates="{'*': '_build/jupyter_execute/a4c9580c74dacf6f3316a3bd2e2a347933aa4463834dcf1bb8f20b4fcb476ae1.jpg'}" uri="_build/jupyter_execute/a4c9580c74dacf6f3316a3bd2e2a347933aa4463834dcf1bb8f20b4fcb476ae1.jpg"> + <section ids="displaying-a-plot-with-its-code" names="displaying\ a\ plot\ with\ its\ code"> + <title> + Displaying a plot with its code + <paragraph> + A matplotlib figure, with the caption set in the markdowncell above the figure. + <paragraph> + The plotting code for a matplotlib figure (\cref{fig:example_mpl}). + <container cell_index="17" cell_metadata="{'ipub': {'code': {'asfloat': True, 'caption': 'a', 'label': 'code:example_mpl', 'widefigure': False}, 'figure': {'caption': '', 'label': 'fig:example_mpl', 'widefigure': False}}}" classes="cell" exec_count="4" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> + <literal_block language="ipython3" linenos="False" xml:space="preserve"> + plt.scatter(np.random.rand(10), np.random.rand(10), + label='data label') + plt.ylabel(r'a y label with latex $\alpha$') + plt.legend(); + <container classes="cell_output" nb_element="cell_code_output"> + <container mime_type="image/png"> + <image candidates="{'*': '_build/jupyter_execute/16832f45917c1c9862c50f0948f64a498402d6ccde1f3a291da17f240797b160.png'}" uri="_build/jupyter_execute/16832f45917c1c9862c50f0948f64a498402d6ccde1f3a291da17f240797b160.png"> + <section classes="tex2jax_ignore mathjax_ignore" ids="tables-with-pandas" names="tables\ (with\ pandas)"> + <title> + Tables (with pandas) + <paragraph> + The plotting code for a pandas Dataframe table (\cref{tbl:example}). + <container cell_index="20" cell_metadata="{'ipub': {'code': {'asfloat': True, 'caption': '', 'label': 'code:example_pd', 'placement': 'H', 'widefigure': False}, 'table': {'alternate': 'gray!20', 'caption': 'An example of a table created with pandas dataframe.', 'label': 'tbl:example', 'placement': 'H'}}}" classes="cell" exec_count="5" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> + <literal_block language="ipython3" linenos="False" xml:space="preserve"> + df = pd.DataFrame(np.random.rand(3,4),columns=['a','b','c','d']) + df.a = ['$\delta$','x','y'] + df.b = ['l','m','n'] + df.set_index(['a','b']) + df.round(3) + <container classes="cell_output" nb_element="cell_code_output"> + <container mime_type="text/html"> + <raw classes="output text_html" format="html" xml:space="preserve"> + <div> + <style scoped> + .dataframe tbody tr th:only-of-type { + vertical-align: middle; + } + + .dataframe tbody tr th { + vertical-align: top; + } + + .dataframe thead th { + text-align: right; + } + </style> + <table border="1" class="dataframe"> + <thead> + <tr style="text-align: right;"> + <th></th> + <th>a</th> + <th>b</th> + <th>c</th> + <th>d</th> + </tr> + </thead> + <tbody> + <tr> + <th>0</th> + <td>$\delta$</td> + <td>l</td> + <td>0.391</td> + <td>0.607</td> + </tr> + <tr> + <th>1</th> + <td>x</td> + <td>m</td> + <td>0.132</td> + <td>0.205</td> + </tr> + <tr> + <th>2</th> + <td>y</td> + <td>n</td> + <td>0.969</td> + <td>0.726</td> + </tr> + </tbody> + </table> + </div> + <section classes="tex2jax_ignore mathjax_ignore" ids="equations-with-ipython-or-sympy" names="equations\ (with\ ipython\ or\ sympy)"> + <title> + Equations (with ipython or sympy) + <container cell_index="22" cell_metadata="{'ipub': {'equation': {'label': 'eqn:example_ipy'}}}" classes="cell" exec_count="6" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> + <literal_block language="ipython3" linenos="False" xml:space="preserve"> + Latex('$$ a = b+c $$') + <container classes="cell_output" nb_element="cell_code_output"> + <container mime_type="text/latex"> + <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> + a = b+c + <paragraph> + The plotting code for a sympy equation (=@eqn:example_sympy). + <container cell_index="24" cell_metadata="{'ipub': {'code': {'asfloat': True, 'caption': '', 'label': 'code:example_sym', 'placement': 'H', 'widefigure': False}, 'equation': {'environment': 'equation', 'label': 'eqn:example_sympy'}}}" classes="cell" exec_count="7" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> + <literal_block language="ipython3" linenos="False" xml:space="preserve"> + y = sym.Function('y') + n = sym.symbols(r'\alpha') + f = y(n)-2*y(n-1/sym.pi)-5*y(n-2) + sym.rsolve(f,y(n),[1,4]) + <container classes="cell_output" nb_element="cell_code_output"> + <container mime_type="image/png"> + <image candidates="{'*': '_build/jupyter_execute/8c43e5c8cccf697754876b7fec1b0a9b731d7900bb585e775a5fa326b4de8c5a.png'}" uri="_build/jupyter_execute/8c43e5c8cccf697754876b7fec1b0a9b731d7900bb585e775a5fa326b4de8c5a.png"> + <container cell_index="25" cell_metadata="{}" classes="cell" exec_count="7" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> + <literal_block language="ipython3" linenos="False" xml:space="preserve"> + from IPython.display import display, Markdown + display(Markdown('**_some_ markdown**')) + <container classes="cell_output" nb_element="cell_code_output"> + <container mime_type="text/markdown"> + <paragraph> + <strong> + <emphasis> + some + markdown diff --git a/tests/test_sphinx_builds/test_complex_outputs_run.xml b/tests/test_sphinx_builds/test_complex_outputs_run.xml new file mode 100644 index 00000000..d673be65 --- /dev/null +++ b/tests/test_sphinx_builds/test_complex_outputs_run.xml @@ -0,0 +1,272 @@ +<document source="complex_outputs"> + <container cell_index="0" cell_metadata="{'init_cell': True, 'slideshow': {'slide_type': 'skip'}}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> + <literal_block language="ipython3" xml:space="preserve"> + import matplotlib.pyplot as plt + import pandas as pd + pd.set_option('display.latex.repr', True) + import sympy as sym + sym.init_printing(use_latex=True) + import numpy as np + from IPython.display import Image, Latex + <section classes="tex2jax_ignore mathjax_ignore" ids="markdown" names="markdown"> + <title> + Markdown + <section ids="general" names="general"> + <title> + General + <paragraph> + Some markdown text. + <paragraph> + A list: + <bullet_list bullet="-"> + <list_item> + <paragraph> + something + <list_item> + <paragraph> + something else + <paragraph> + A numbered list + <enumerated_list enumtype="arabic" prefix="" suffix="."> + <list_item> + <paragraph> + something + <list_item> + <paragraph> + something else + <paragraph> + non-ascii characters TODO + <paragraph> + This is a long section of text, which we only want in a document (not a presentation) + + some text + + some more text + + some more text + + some more text + + some more text + + some more text + + some more text + + some more text + + some more text + <paragraph> + This is an abbreviated section of the document text, which we only want in a presentation + <bullet_list bullet="-"> + <list_item> + <paragraph> + summary of document text + <section ids="references-and-citations" names="references\ and\ citations"> + <title> + References and Citations + <paragraph> + References to \cref{fig:example}, \cref{tbl:example}, =@eqn:example_sympy and \cref{code:example_mpl}. + <paragraph> + A latex citation.\cite{zelenyak_molecular_2016} + <paragraph> + A html citation. + <raw format="html" xml:space="preserve"> + <cite data-cite="kirkeminde_thermodynamic_2012"> + (Kirkeminde, 2012) + <raw format="html" xml:space="preserve"> + </cite> + <section ids="todo-notes" names="todo\ notes"> + <title> + Todo notes + <paragraph> + \todo[inline]{an inline todo} + <paragraph> + Some text.\todo{a todo in the margins} + <section classes="tex2jax_ignore mathjax_ignore" ids="text-output" names="text\ output"> + <title> + Text Output + <container cell_index="11" cell_metadata="{'ipub': {'text': {'format': {'backgroundcolor': '\\color{blue!10}'}}}}" classes="cell" exec_count="2" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> + <literal_block language="ipython3" xml:space="preserve"> + print(""" + This is some printed text, + with a nicely formatted output. + """) + <container classes="cell_output" nb_element="cell_code_output"> + <literal_block classes="output stream" language="myst-ansi" xml:space="preserve"> + + This is some printed text, + with a nicely formatted output. + + <section classes="tex2jax_ignore mathjax_ignore" ids="images-and-figures" names="images\ and\ figures"> + <title> + Images and Figures + <container cell_index="13" cell_metadata="{'ipub': {'figure': {'caption': 'A nice picture.', 'label': 'fig:example', 'placement': '!bh'}}}" classes="cell" exec_count="3" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> + <literal_block language="ipython3" xml:space="preserve"> + Image('example.jpg',height=400) + <container classes="cell_output" nb_element="cell_code_output"> + <container nb_element="mime_bundle"> + <container mime_type="image/jpeg"> + <image candidates="{'*': '_build/jupyter_execute/a4c9580c74dacf6f3316a3bd2e2a347933aa4463834dcf1bb8f20b4fcb476ae1.jpg'}" uri="_build/jupyter_execute/a4c9580c74dacf6f3316a3bd2e2a347933aa4463834dcf1bb8f20b4fcb476ae1.jpg"> + <container mime_type="text/plain"> + <literal_block classes="output text_plain" language="myst-ansi" xml:space="preserve"> + <IPython.core.display.Image object> + <section ids="displaying-a-plot-with-its-code" names="displaying\ a\ plot\ with\ its\ code"> + <title> + Displaying a plot with its code + <paragraph> + A matplotlib figure, with the caption set in the markdowncell above the figure. + <paragraph> + The plotting code for a matplotlib figure (\cref{fig:example_mpl}). + <container cell_index="17" cell_metadata="{'ipub': {'code': {'asfloat': True, 'caption': 'a', 'label': 'code:example_mpl', 'widefigure': False}, 'figure': {'caption': '', 'label': 'fig:example_mpl', 'widefigure': False}}}" classes="cell" exec_count="4" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> + <literal_block language="ipython3" xml:space="preserve"> + plt.scatter(np.random.rand(10), np.random.rand(10), + label='data label') + plt.ylabel(r'a y label with latex $\alpha$') + plt.legend(); + <container classes="cell_output" nb_element="cell_code_output"> + <container nb_element="mime_bundle"> + <container mime_type="image/png"> + <image candidates="{'*': '_build/jupyter_execute/16832f45917c1c9862c50f0948f64a498402d6ccde1f3a291da17f240797b160.png'}" uri="_build/jupyter_execute/16832f45917c1c9862c50f0948f64a498402d6ccde1f3a291da17f240797b160.png"> + <container mime_type="text/plain"> + <literal_block classes="output text_plain" language="myst-ansi" xml:space="preserve"> + <Figure size 432x288 with 1 Axes> + <section classes="tex2jax_ignore mathjax_ignore" ids="tables-with-pandas" names="tables\ (with\ pandas)"> + <title> + Tables (with pandas) + <paragraph> + The plotting code for a pandas Dataframe table (\cref{tbl:example}). + <container cell_index="20" cell_metadata="{'ipub': {'code': {'asfloat': True, 'caption': '', 'label': 'code:example_pd', 'placement': 'H', 'widefigure': False}, 'table': {'alternate': 'gray!20', 'caption': 'An example of a table created with pandas dataframe.', 'label': 'tbl:example', 'placement': 'H'}}}" classes="cell" exec_count="5" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> + <literal_block language="ipython3" xml:space="preserve"> + df = pd.DataFrame(np.random.rand(3,4),columns=['a','b','c','d']) + df.a = ['$\delta$','x','y'] + df.b = ['l','m','n'] + df.set_index(['a','b']) + df.round(3) + <container classes="cell_output" nb_element="cell_code_output"> + <container nb_element="mime_bundle"> + <container mime_type="text/html"> + <raw classes="output text_html" format="html" xml:space="preserve"> + <div> + <style scoped> + .dataframe tbody tr th:only-of-type { + vertical-align: middle; + } + + .dataframe tbody tr th { + vertical-align: top; + } + + .dataframe thead th { + text-align: right; + } + </style> + <table border="1" class="dataframe"> + <thead> + <tr style="text-align: right;"> + <th></th> + <th>a</th> + <th>b</th> + <th>c</th> + <th>d</th> + </tr> + </thead> + <tbody> + <tr> + <th>0</th> + <td>$\delta$</td> + <td>l</td> + <td>0.391</td> + <td>0.607</td> + </tr> + <tr> + <th>1</th> + <td>x</td> + <td>m</td> + <td>0.132</td> + <td>0.205</td> + </tr> + <tr> + <th>2</th> + <td>y</td> + <td>n</td> + <td>0.969</td> + <td>0.726</td> + </tr> + </tbody> + </table> + </div> + <container mime_type="text/latex"> + <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> + \begin{tabular}{lllrr} + \toprule + {} & a & b & c & d \\ + \midrule + 0 & \$\textbackslash delta\$ & l & 0.391 & 0.607 \\ + 1 & x & m & 0.132 & 0.205 \\ + 2 & y & n & 0.969 & 0.726 \\ + \bottomrule + \end{tabular} + <container mime_type="text/plain"> + <literal_block classes="output text_plain" language="myst-ansi" xml:space="preserve"> + a b c d + 0 $\delta$ l 0.391 0.607 + 1 x m 0.132 0.205 + 2 y n 0.969 0.726 + <section classes="tex2jax_ignore mathjax_ignore" ids="equations-with-ipython-or-sympy" names="equations\ (with\ ipython\ or\ sympy)"> + <title> + Equations (with ipython or sympy) + <container cell_index="22" cell_metadata="{'ipub': {'equation': {'label': 'eqn:example_ipy'}}}" classes="cell" exec_count="6" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> + <literal_block language="ipython3" xml:space="preserve"> + Latex('$$ a = b+c $$') + <container classes="cell_output" nb_element="cell_code_output"> + <container nb_element="mime_bundle"> + <container mime_type="text/latex"> + <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> + a = b+c + <container mime_type="text/plain"> + <literal_block classes="output text_plain" language="myst-ansi" xml:space="preserve"> + <IPython.core.display.Latex object> + <paragraph> + The plotting code for a sympy equation (=@eqn:example_sympy). + <container cell_index="24" cell_metadata="{'ipub': {'code': {'asfloat': True, 'caption': '', 'label': 'code:example_sym', 'placement': 'H', 'widefigure': False}, 'equation': {'environment': 'equation', 'label': 'eqn:example_sympy'}}}" classes="cell" exec_count="7" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> + <literal_block language="ipython3" xml:space="preserve"> + y = sym.Function('y') + n = sym.symbols(r'\alpha') + f = y(n)-2*y(n-1/sym.pi)-5*y(n-2) + sym.rsolve(f,y(n),[1,4]) + <container classes="cell_output" nb_element="cell_code_output"> + <container nb_element="mime_bundle"> + <container mime_type="image/png"> + <image candidates="{'*': '_build/jupyter_execute/8c43e5c8cccf697754876b7fec1b0a9b731d7900bb585e775a5fa326b4de8c5a.png'}" uri="_build/jupyter_execute/8c43e5c8cccf697754876b7fec1b0a9b731d7900bb585e775a5fa326b4de8c5a.png"> + <container mime_type="text/latex"> + <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> + \displaystyle \left(\sqrt{5} i\right)^{\alpha} \left(\frac{1}{2} - \frac{2 \sqrt{5} i}{5}\right) + \left(- \sqrt{5} i\right)^{\alpha} \left(\frac{1}{2} + \frac{2 \sqrt{5} i}{5}\right) + <container mime_type="text/plain"> + <literal_block classes="output text_plain" language="myst-ansi" xml:space="preserve"> + \alpha ⎛1 2⋅√5⋅ⅈ⎞ \alpha ⎛1 2⋅√5⋅ⅈ⎞ + (√5⋅ⅈ) ⋅⎜─ - ──────⎟ + (-√5⋅ⅈ) ⋅⎜─ + ──────⎟ + ⎝2 5 ⎠ ⎝2 5 ⎠ + <container cell_index="25" cell_metadata="{}" classes="cell" exec_count="7" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> + <literal_block language="ipython3" xml:space="preserve"> + from IPython.display import display, Markdown + display(Markdown('**_some_ markdown**')) + <container classes="cell_output" nb_element="cell_code_output"> + <container nb_element="mime_bundle"> + <container mime_type="text/markdown"> + <paragraph> + <strong> + <emphasis> + some + markdown + <container mime_type="text/plain"> + <literal_block classes="output text_plain" language="myst-ansi" xml:space="preserve"> + <IPython.core.display.Markdown object> From fa395a6f6d49e52e6b015707ae59c1551e626481 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Mon, 3 Jan 2022 22:31:55 +0100 Subject: [PATCH 15/75] rename logger classes --- myst_nb/docutils_.py | 4 ++-- myst_nb/new/loggers.py | 4 ++-- myst_nb/new/sphinx_.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/myst_nb/docutils_.py b/myst_nb/docutils_.py index 1ec8576b..18d43267 100644 --- a/myst_nb/docutils_.py +++ b/myst_nb/docutils_.py @@ -14,7 +14,7 @@ from myst_nb.configuration import NbParserConfig from myst_nb.new.execute import update_notebook -from myst_nb.new.loggers import DEFAULT_LOG_TYPE, DocutilsLogger +from myst_nb.new.loggers import DEFAULT_LOG_TYPE, DocutilsDocLogger from myst_nb.new.parse import notebook_to_tokens from myst_nb.new.read import create_nb_reader from myst_nb.new.render import NbElementRenderer, load_renderer @@ -50,7 +50,7 @@ def parse(self, inputstring: str, document: nodes.document) -> None: document_source = document["source"] # get a logger for this document - logger = DocutilsLogger(document) + logger = DocutilsDocLogger(document) # get markdown parsing configuration try: diff --git a/myst_nb/new/loggers.py b/myst_nb/new/loggers.py index fc805aab..b34fe57c 100644 --- a/myst_nb/new/loggers.py +++ b/myst_nb/new/loggers.py @@ -16,7 +16,7 @@ DEFAULT_LOG_TYPE = "mystnb" -class SphinxLogger(logging.LoggerAdapter): +class SphinxDocLogger(logging.LoggerAdapter): """Wraps a Sphinx logger, which routes messages to the docutils document reporter. The document path and message type are automatically included in the message, @@ -53,7 +53,7 @@ def process(self, msg, kwargs): return f"{msg} [{self.extra['type']}{subtype}]", kwargs -class DocutilsLogger(logging.LoggerAdapter): +class DocutilsDocLogger(logging.LoggerAdapter): """A logger which routes messages to the docutils document reporter. The document path and message type are automatically included in the message, diff --git a/myst_nb/new/sphinx_.py b/myst_nb/new/sphinx_.py index eae7c474..f36d9114 100644 --- a/myst_nb/new/sphinx_.py +++ b/myst_nb/new/sphinx_.py @@ -18,7 +18,7 @@ from myst_nb import __version__ from myst_nb.configuration import NbParserConfig from myst_nb.new.execute import update_notebook -from myst_nb.new.loggers import DEFAULT_LOG_TYPE, SphinxLogger +from myst_nb.new.loggers import DEFAULT_LOG_TYPE, SphinxDocLogger from myst_nb.new.parse import notebook_to_tokens from myst_nb.new.read import create_nb_reader from myst_nb.new.render import NbElementRenderer, load_renderer @@ -121,7 +121,7 @@ def parse(self, inputstring: str, document: nodes.document) -> None: document_source = self.env.doc2path(self.env.docname) # get a logger for this document - logger = SphinxLogger(document) + logger = SphinxDocLogger(document) # get markdown parsing configuration md_config: MdParserConfig = self.env.myst_config From 88f2db6cf5145cd9df4fa941e0b8a83ad3dafc86 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Mon, 3 Jan 2022 23:43:04 +0100 Subject: [PATCH 16/75] Add deprecation pathway for legacy config names --- myst_nb/configuration.py | 45 ++++++++++++++++++++++++++++++---------- myst_nb/new/sphinx_.py | 42 ++++++++++++++++++++++++++----------- tests/conftest.py | 9 +++----- 3 files changed, 67 insertions(+), 29 deletions(-) diff --git a/myst_nb/configuration.py b/myst_nb/configuration.py index 77ad6665..8395d458 100644 --- a/myst_nb/configuration.py +++ b/myst_nb/configuration.py @@ -137,15 +137,13 @@ def render_priority_factory() -> Dict[str, Sequence[str]]: class NbParserConfig: """Global configuration options for the MyST-NB parser. - Note: in the sphinx configuration these option names are prepended with ``nb_`` + Note: in the docutils/sphinx configuration, + these option names are prepended with ``nb_`` """ - # TODO: nb_render_key, execution_show_tb, - # execution_excludepatterns, jupyter_cache + # TODO: nb_render_key, execution_show_tb, execution_excludepatterns # jupyter_sphinx_require_url, jupyter_sphinx_embed_url - # TODO handle old names; put in metadata, then auto generate warnings - # TODO mark which config are allowed per notebook/cell # file read options @@ -164,7 +162,9 @@ class NbParserConfig: # notebook execution options execution_mode: Literal["off", "force", "cache"] = attr.ib( - default="off", # TODO different default for docutils (off) and sphinx (cache)? + # TODO different default for docutils (off) and sphinx (cache)? + # TODO deprecate auto + default="off", validator=in_( [ "off", @@ -172,29 +172,50 @@ class NbParserConfig: "cache", ] ), - metadata={"help": "Execution mode for notebooks"}, + metadata={ + "help": "Execution mode for notebooks", + "legacy_name": "jupyter_execute_notebooks", + }, ) execution_cache_path: str = attr.ib( default="", validator=instance_of(str), - metadata={"help": "Path to folder for caching notebooks"}, + metadata={ + "help": "Path to folder for caching notebooks", + "legacy_name": "jupyter_cache", + }, ) execution_timeout: int = attr.ib( default=30, validator=instance_of(int), - metadata={"help": "Execution timeout (seconds)"}, + metadata={ + "help": "Execution timeout (seconds)", + "legacy_name": "execution_timeout", + }, ) execution_in_temp: bool = attr.ib( default=False, validator=instance_of(bool), metadata={ - "help": "Use a temporary folder for the execution current working directory" + "help": "Use temporary folder for the execution current working directory", + "legacy_name": "execution_in_temp", }, ) execution_allow_errors: bool = attr.ib( default=False, validator=instance_of(bool), - metadata={"help": "Allow errors during execution"}, + metadata={ + "help": "Allow errors during execution", + "legacy_name": "execution_allow_errors", + }, + ) + execution_show_tb: bool = attr.ib( # TODO implement + default=False, + validator=instance_of(bool), + metadata={ + "help": "Print traceback to stderr on execution error", + "legacy_name": "execution_show_tb", + }, ) # render options @@ -265,6 +286,7 @@ class NbParserConfig: ), validator=deep_iterable(instance_of(str)), metadata={"help": "Render priority for mime types", "sphinx_exclude": True}, + repr=False, ) render_priority: Dict[str, Sequence[str]] = attr.ib( factory=render_priority_factory, @@ -273,6 +295,7 @@ class NbParserConfig: "help": "Render priority for mime types, by builder name", "docutils_exclude": True, }, + repr=False, ) render_text_lexer: str = attr.ib( default="myst-ansi", diff --git a/myst_nb/new/sphinx_.py b/myst_nb/new/sphinx_.py index f36d9114..e2a485b7 100644 --- a/myst_nb/new/sphinx_.py +++ b/myst_nb/new/sphinx_.py @@ -25,6 +25,7 @@ from myst_nb.render_outputs import coalesce_streams SPHINX_LOGGER = sphinx_logging.getLogger(__name__) +UNSET = "--unset--" def setup(app): @@ -44,8 +45,11 @@ def sphinx_setup(app: Sphinx): for name, default, field in NbParserConfig().as_triple(): if not field.metadata.get("sphinx_exclude"): # TODO add types? - app.add_config_value(f"nb_{name}", default, "env") - # TODO add deprecated names + app.add_config_value(f"nb_{name}", default, "env", Any) + if "legacy_name" in field.metadata: + app.add_config_value( + f"{field.metadata['legacy_name']}", UNSET, "env", Any + ) # generate notebook configuration from Sphinx configuration app.connect("builder-inited", create_mystnb_config) @@ -60,7 +64,11 @@ def sphinx_setup(app: Sphinx): # TODO do we need to add lexers, if they are anyhow added via entry-points? - return {"version": __version__, "parallel_read_safe": True} + return { + "version": __version__, + "parallel_read_safe": True, + "parallel_write_safe": True, + } def create_mystnb_config(app): @@ -69,12 +77,21 @@ def create_mystnb_config(app): # Ignore type checkers because the attribute is dynamically assigned from sphinx.util.console import bold # type: ignore[attr-defined] - # TODO deal with deprecated names - values = { - name: app.config[f"nb_{name}"] - for name, _, field in NbParserConfig().as_triple() - if not field.metadata.get("sphinx_exclude") - } + values = {} + for name, _, field in NbParserConfig().as_triple(): + if not field.metadata.get("sphinx_exclude"): + values[name] = app.config[f"nb_{name}"] + if "legacy_name" in field.metadata: + legacy_value = app.config[field.metadata["legacy_name"]] + if legacy_value != UNSET: + legacy_name = field.metadata["legacy_name"] + SPHINX_LOGGER.warning( + f"{legacy_name!r} is deprecated for 'nb_{name}' " + f"[{DEFAULT_LOG_TYPE}.config]", + type=DEFAULT_LOG_TYPE, + subtype="config", + ) + values[name] = legacy_value try: app.env.mystnb_config = NbParserConfig(**values) @@ -375,8 +392,8 @@ def run(self, **kwargs: Any) -> None: name = self.app.builder.name if name not in priority_lookup: SPHINX_LOGGER.warning( - f"Builder name {name!r} " - "not available in 'nb_render_priority', defaulting to 'html'", + f"Builder name {name!r} not available in 'nb_render_priority', " + f"defaulting to 'html' [{DEFAULT_LOG_TYPE}.mime_priority]", type=DEFAULT_LOG_TYPE, subtype="mime_priority", ) @@ -405,7 +422,8 @@ def run(self, **kwargs: Any) -> None: break if index is None: SPHINX_LOGGER.warning( - f"No mime type available in priority list builder {name!r}", + f"No mime type available in priority list builder {name!r} " + f"[{DEFAULT_LOG_TYPE}.mime_priority]", type=DEFAULT_LOG_TYPE, subtype="mime_priority", location=node, diff --git a/tests/conftest.py b/tests/conftest.py index c191501e..338ab68c 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -70,11 +70,8 @@ def __init__(self, app, filenames): def build(self): """Run the sphinx build.""" - # reset streams before each build - self.app._status.truncate(0) - self.app._status.seek(0) - self.app._warning.truncate(0) - self.app._warning.seek(0) + # TODO reset streams before each build, + # but this was wiping the warnings of a build self.app.build() def status(self): @@ -165,7 +162,7 @@ def sphinx_run(sphinx_params, make_app, tempdir): "extensions": ["myst_nb"], "master_doc": os.path.splitext(sphinx_params["files"][0])[0], "exclude_patterns": ["_build"], - "execution_show_tb": True, + "nb_execution_show_tb": True, } confoverrides.update(conf) From 30e29b626251f7003fa26fbe39b18ec81ef57a85 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Tue, 4 Jan 2022 07:09:46 +0100 Subject: [PATCH 17/75] Add notebook reading of custom formats --- myst_nb/configuration.py | 13 +- myst_nb/docutils_.py | 42 ++- myst_nb/new/execute.py | 2 +- myst_nb/new/parse.py | 8 +- myst_nb/new/read.py | 333 +++++++++++++++++- myst_nb/new/sphinx_.py | 51 +-- tests/test_sphinx_builds.py | 13 + .../test_sphinx_builds/test_basic_run_md.xml | 13 + 8 files changed, 415 insertions(+), 60 deletions(-) create mode 100644 tests/test_sphinx_builds/test_basic_run_md.xml diff --git a/myst_nb/configuration.py b/myst_nb/configuration.py index 8395d458..3ccf7f68 100644 --- a/myst_nb/configuration.py +++ b/myst_nb/configuration.py @@ -33,6 +33,7 @@ def custom_formats_converter(value: dict) -> dict: raise TypeError( f"`nb_custom_formats` values[0] must be a string: {output[suffix][0]}" ) + # TODO check can be loaded as a python object? if not isinstance(output[suffix][1], dict): raise TypeError( f"`nb_custom_formats` values[1] must be a dict: {output[suffix][1]}" @@ -151,13 +152,21 @@ class NbParserConfig: custom_formats: Dict[str, Tuple[str, dict, bool]] = attr.ib( factory=dict, converter=custom_formats_converter, - # TODO check can be loaded from string? metadata={ "help": "Custom formats for reading notebook; suffix -> reader", - # TODO can we make this work for docutils? "docutils_exclude": True, }, ) + # docutils does not support directly the custom format mechanism + read_as_md: bool = attr.ib( + default=False, + validator=instance_of(bool), + metadata={ + "help": "Read as the MyST Markdown format", + "sphinx_exclude": True, + }, + repr=False, + ) # notebook execution options diff --git a/myst_nb/docutils_.py b/myst_nb/docutils_.py index 18d43267..2045ee43 100644 --- a/myst_nb/docutils_.py +++ b/myst_nb/docutils_.py @@ -1,4 +1,5 @@ """A parser for docutils.""" +from functools import partial from typing import Any, Dict, List, Optional, Tuple import nbformat @@ -16,7 +17,7 @@ from myst_nb.new.execute import update_notebook from myst_nb.new.loggers import DEFAULT_LOG_TYPE, DocutilsDocLogger from myst_nb.new.parse import notebook_to_tokens -from myst_nb.new.read import create_nb_reader +from myst_nb.new.read import NbReader, read_myst_markdown_notebook, standard_nb_read from myst_nb.new.render import NbElementRenderer, load_renderer from myst_nb.render_outputs import coalesce_streams @@ -71,15 +72,34 @@ def parse(self, inputstring: str, document: nodes.document) -> None: nb_config = NbParserConfig() # convert inputstring to notebook - nb_reader, md_config = create_nb_reader( - inputstring, document_source, md_config, nb_config - ) - notebook = nb_reader(inputstring) + # note docutils does not support the full custom format mechanism + if nb_config.read_as_md: + nb_reader = NbReader( + partial( + read_myst_markdown_notebook, + config=md_config, + add_source_map=True, + ), + md_config, + ) + else: + nb_reader = NbReader(standard_nb_read, md_config) + notebook = nb_reader.read(inputstring) # TODO update nb_config from notebook metadata + # potentially execute notebook and/or populate outputs from cache + # TODO parse notebook reader? + notebook, exec_data = update_notebook( + notebook, document_source, nb_config, logger + ) + if exec_data: + document["nb_exec_data"] = exec_data + + # TODO store/print error traceback? + # Setup the markdown parser - mdit_parser = create_md_parser(md_config, DocutilsNbRenderer) + mdit_parser = create_md_parser(nb_reader.md_config, DocutilsNbRenderer) mdit_parser.options["document"] = document mdit_parser.options["notebook"] = notebook mdit_parser.options["nb_config"] = nb_config.as_dict() @@ -93,15 +113,6 @@ def parse(self, inputstring: str, document: nodes.document) -> None: ) mdit_parser.options["nb_renderer"] = nb_renderer - # potentially execute notebook and/or populate outputs from cache - notebook, exec_data = update_notebook( - notebook, document_source, nb_config, logger - ) - if exec_data: - document["nb_exec_data"] = exec_data - - # TODO store/print error traceback? - # parse to tokens mdit_tokens = notebook_to_tokens(notebook, mdit_parser, mdit_env, logger) # convert to docutils AST, which is added to the document @@ -297,6 +308,7 @@ def render_nb_widget_state(self, token: SyntaxTreeNode) -> None: def _run_cli(writer_name: str, writer_description: str, argv: Optional[List[str]]): """Run the command line interface for a particular writer.""" + # TODO note to run this with --report="info", to see notebook execution publish_cmdline( parser=Parser(), writer_name=writer_name, diff --git a/myst_nb/new/execute.py b/myst_nb/new/execute.py index 75801200..71b759c3 100644 --- a/myst_nb/new/execute.py +++ b/myst_nb/new/execute.py @@ -85,7 +85,7 @@ def update_notebook( if cache.get_cache_record_of_staged(stage_record.pk) is None: executor = load_executor("basic", cache, logger=logger) executor.run_and_cache( - filter_pks=[stage_record.pk], # TODO specitfy, rather than filter + filter_pks=[stage_record.pk], # TODO specify, rather than filter allow_errors=nb_config.execution_allow_errors, timeout=nb_config.execution_timeout, run_in_temp=nb_config.execution_in_temp, diff --git a/myst_nb/new/parse.py b/myst_nb/new/parse.py index e100179c..9fdc10b1 100644 --- a/myst_nb/new/parse.py +++ b/myst_nb/new/parse.py @@ -35,16 +35,16 @@ def notebook_to_tokens( # Parse block tokens only first, leaving inline parsing to a second phase # (required to collect all reference definitions, before assessing references). metadata = nb_node_to_dict(notebook.metadata) - # save these keys on the document, rather than as docinfo + # save these special keys on the document, rather than as docinfo spec_data = { key: metadata.pop(key, None) for key in ("kernelspec", "language_info") } # attempt to get language lexer name - langinfo = spec_data.get("language_info", {}) - lexer = langinfo.get("pygments_lexer", langinfo.get("name", None)) + langinfo = spec_data.get("language_info") or {} + lexer = langinfo.get("pygments_lexer") or langinfo.get("name", None) if lexer is None: - lexer = spec_data.get("kernelspec", {}).get("language", None) + lexer = (spec_data.get("kernelspec") or {}).get("language", None) if lexer is None: logger.warning( "No source code lexer found in notebook metadata", subtype="lexer" diff --git a/myst_nb/new/read.py b/myst_nb/new/read.py index 9d9547ef..10c19b0f 100644 --- a/myst_nb/new/read.py +++ b/myst_nb/new/read.py @@ -1,27 +1,334 @@ -"""Module for reading notebooks from a string input.""" -from typing import Callable, Tuple +"""Module for reading notebook formats from a string input.""" +import json +from functools import partial +from pathlib import Path +from typing import Callable, Iterator, Optional, Union -from myst_parser.main import MdParserConfig -from nbformat import NotebookNode -from nbformat import reads as read_ipynb +import attr +import nbformat as nbf +import yaml +from markdown_it.renderer import RendererHTML +from myst_parser.main import MdParserConfig, create_md_parser from myst_nb.configuration import NbParserConfig NOTEBOOK_VERSION = 4 +"""The notebook version that readers should return.""" + + +@attr.s +class NbReader: + """A data class for reading a notebook format.""" + + read: Callable[[str], nbf.NotebookNode] = attr.ib() + """The function to read a notebook from a string.""" + md_config: MdParserConfig = attr.ib() + """The configuration for parsing markdown cells.""" + + +def standard_nb_read(text: str) -> nbf.NotebookNode: + """Read a standard .ipynb notebook from a string.""" + return nbf.reads(text, as_version=NOTEBOOK_VERSION) def create_nb_reader( - string: str, source: str, md_config: MdParserConfig, nb_config: NbParserConfig -) -> Tuple[Callable[[str], NotebookNode], MdParserConfig]: - """Create a notebook reader, given a string, source and configuration. + path: str, + md_config: MdParserConfig, + nb_config: NbParserConfig, + content: Union[None, str, Iterator[str]], +) -> Optional[NbReader]: + """Create a notebook reader, given a string, source path and configuration. Note, we do not directly parse to a notebook, since jupyter-cache functionality requires the reader. - :param string: The input string. - :param source: Path to or description of the input source being processed. + :param path: Path to the input source being processed. + :param nb_config: The configuration for parsing Notebooks. + :param md_config: The default configuration for parsing Markown. + :param content: The input string (optionally used to check for text-based notebooks) + + :returns: the notebook reader, and the (potentially modified) MdParserConfig, + or None if the input cannot be read as a notebook. + """ + # the import is here so this module can be loaded without sphinx + from sphinx.util import import_object + + # get all possible readers + readers = nb_config.custom_formats.copy() + # add the default reader + readers.setdefault(".ipynb", (standard_nb_read, {}, False)) + + # we check suffixes ordered by longest first, to ensure we get the "closest" match + iterator = sorted(readers.items(), key=lambda x: len(x[0]), reverse=True) + for suffix, (reader, reader_kwargs, commonmark_only) in iterator: + if path.endswith(suffix): + if isinstance(reader, str): + # attempt to load the reader as an object path + reader = import_object(reader) + if commonmark_only: + # Markdown cells should be read as Markdown only + md_config = attr.evolve(md_config, commonmark_only=True) + return NbReader(partial(reader, **(reader_kwargs or {})), md_config) + + # a Markdown file is a special case, since we only treat it as a notebook, + # if it starts with certain "top-matter" + if content is not None and is_myst_markdown_notebook(content): + return NbReader( + partial( + read_myst_markdown_notebook, + config=md_config, + add_source_map=True, + path=path, + ), + md_config, + ) + + # if we get here, we did not find a reader + return None + + +def is_myst_markdown_notebook(text: Union[str, Iterator[str]]) -> bool: + """Check if the input is a MyST Markdown notebook. - :returns: the notebook reader, and the (potentially modified) MdParserConfig. + This is identified by the presence of a top-matter section, containing:: + + --- + jupytext: + text_representation: + format_name: myst + --- + + :param text: The input text. + :returns: True if the input is a markdown notebook. """ - # TODO handle converters - return lambda text: read_ipynb(text, as_version=NOTEBOOK_VERSION), md_config + if isinstance(text, str): + if not text.startswith("---"): # skip creating the line list in memory + return False + text = (line for line in text.splitlines()) + try: + if not next(text).startswith("---"): + return False + except StopIteration: + return False + top_matter = [] + for line in text: + if line.startswith("---") or line.startswith("..."): + break + top_matter.append(line.rstrip() + "\n") + try: + metadata = yaml.safe_load("".join(top_matter)) + assert isinstance(metadata, dict) + except Exception: + return False + if ( + metadata.get("jupytext", {}) + .get("text_representation", {}) + .get("format_name", None) + != "myst" + ): + return False + + return True + + # TODO move this to reader, since not strictly part of function objective + # or just allow nbformat/nbclient to handle the failure + # if "name" not in metadata.get("kernelspec", {}): + # raise IOError( + # "A myst notebook text-representation requires " "kernelspec/name metadata" + # ) + # if "display_name" not in metadata.get("kernelspec", {}): + # raise IOError( + # "A myst notebook text-representation requires " + # "kernelspec/display_name metadata" + # ) + + +def read_myst_markdown_notebook( + text, + config: MdParserConfig, + code_directive="{code-cell}", + raw_directive="{raw-cell}", + add_source_map=False, + path: Optional[str] = None, +) -> nbf.NotebookNode: + """Convert text written in the myst format to a notebook. + + :param text: the file text + :param code_directive: the name of the directive to search for containing code cells + :param raw_directive: the name of the directive to search for containing raw cells + :param add_source_map: add a `source_map` key to the notebook metadata, + which is a list of the starting source line number for each cell. + :param path: path to notebook (required for :load:) + + :raises _MystMetadataParsingError if the metadata block is not valid JSON/YAML + + NOTE: we assume here that all of these directives are at the top-level, + i.e. not nested in other directives. + """ + # parse markdown file up to the block level (i.e. don't worry about inline text) + inline_config = attr.evolve( + config, disable_syntax=(config.disable_syntax + ["inline"]) + ) + parser = create_md_parser(inline_config, RendererHTML) + tokens = parser.parse(text + "\n") + lines = text.splitlines() + md_start_line = 0 + + # get the document metadata + metadata_nb = {} + if tokens[0].type == "front_matter": + metadata = tokens.pop(0) + md_start_line = metadata.map[1] + try: + metadata_nb = yaml.safe_load(metadata.content) + except (yaml.parser.ParserError, yaml.scanner.ScannerError) as error: + raise _MystMetadataParsingError("Notebook metadata: {}".format(error)) + + # create an empty notebook + nbf_version = nbf.v4 + kwargs = {"metadata": nbf.from_dict(metadata_nb)} + notebook = nbf_version.new_notebook(**kwargs) + source_map = [] # this is a list of the starting line number for each cell + + def _flush_markdown(start_line, token, md_metadata): + """When we find a cell we check if there is preceding text.o""" + endline = token.map[0] if token else len(lines) + md_source = _strip_blank_lines("\n".join(lines[start_line:endline])) + meta = nbf.from_dict(md_metadata) + if md_source: + source_map.append(start_line) + notebook.cells.append( + nbf_version.new_markdown_cell(source=md_source, metadata=meta) + ) + + # iterate through the tokens to identify notebook cells + nesting_level = 0 + md_metadata = {} + + for token in tokens: + + nesting_level += token.nesting + + if nesting_level != 0: + # we ignore fenced block that are nested, e.g. as part of lists, etc + continue + + if token.type == "fence" and token.info.startswith(code_directive): + _flush_markdown(md_start_line, token, md_metadata) + options, body_lines = _read_fenced_cell(token, len(notebook.cells), "Code") + # Parse :load: or load: tags and populate body with contents of file + if "load" in options: + body_lines = _load_code_from_file( + path, options["load"], token, body_lines + ) + meta = nbf.from_dict(options) + source_map.append(token.map[0] + 1) + notebook.cells.append( + nbf_version.new_code_cell(source="\n".join(body_lines), metadata=meta) + ) + md_metadata = {} + md_start_line = token.map[1] + + elif token.type == "fence" and token.info.startswith(raw_directive): + _flush_markdown(md_start_line, token, md_metadata) + options, body_lines = _read_fenced_cell(token, len(notebook.cells), "Raw") + meta = nbf.from_dict(options) + source_map.append(token.map[0] + 1) + notebook.cells.append( + nbf_version.new_raw_cell(source="\n".join(body_lines), metadata=meta) + ) + md_metadata = {} + md_start_line = token.map[1] + + elif token.type == "myst_block_break": + _flush_markdown(md_start_line, token, md_metadata) + md_metadata = _read_cell_metadata(token, len(notebook.cells)) + md_start_line = token.map[1] + + _flush_markdown(md_start_line, None, md_metadata) + + if add_source_map: + notebook.metadata["source_map"] = source_map + return notebook + + +class _MystMetadataParsingError(Exception): + """Error when parsing metadata from myst formatted text""" + + +class _LoadFileParsingError(Exception): + """Error when parsing files for code-blocks/code-cells""" + + +def _strip_blank_lines(text): + text = text.rstrip() + while text and text.startswith("\n"): + text = text[1:] + return text + + +class _MockDirective: + option_spec = {"options": True} + required_arguments = 0 + optional_arguments = 1 + has_content = True + + +def _read_fenced_cell(token, cell_index, cell_type): + from myst_parser.parse_directives import DirectiveParsingError, parse_directive_text + + try: + _, options, body_lines = parse_directive_text( + directive_class=_MockDirective, + first_line="", + content=token.content, + validate_options=False, + ) + except DirectiveParsingError as err: + raise _MystMetadataParsingError( + "{0} cell {1} at line {2} could not be read: {3}".format( + cell_type, cell_index, token.map[0] + 1, err + ) + ) + return options, body_lines + + +def _read_cell_metadata(token, cell_index): + metadata = {} + if token.content: + try: + metadata = json.loads(token.content.strip()) + except Exception as err: + raise _MystMetadataParsingError( + "Markdown cell {0} at line {1} could not be read: {2}".format( + cell_index, token.map[0] + 1, err + ) + ) + if not isinstance(metadata, dict): + raise _MystMetadataParsingError( + "Markdown cell {0} at line {1} is not a dict".format( + cell_index, token.map[0] + 1 + ) + ) + + return metadata + + +def _load_code_from_file(nb_path, file_name, token, body_lines): + """load source code from a file.""" + if nb_path is None: + raise _LoadFileParsingError("path to notebook not supplied for :load:") + file_path = Path(nb_path).parent.joinpath(file_name).resolve() + if len(body_lines): + pass # TODO this would make the reader dependent on sphinx + # line = token.map[0] if token.map else 0 + # msg = ( + # f"{nb_path}:{line} content of code-cell is being overwritten by " + # f":load: {file_name}" + # ) + # LOGGER.warning(msg) + try: + body_lines = file_path.read_text().split("\n") + except Exception: + raise _LoadFileParsingError("Can't read file from :load: {}".format(file_path)) + return body_lines diff --git a/myst_nb/new/sphinx_.py b/myst_nb/new/sphinx_.py index e2a485b7..7bcfe909 100644 --- a/myst_nb/new/sphinx_.py +++ b/myst_nb/new/sphinx_.py @@ -34,8 +34,7 @@ def setup(app): def sphinx_setup(app: Sphinx): """Initialize Sphinx extension.""" - # TODO perhaps there should be a way to turn this off, - # app.add_source_suffix(".md", "myst-nb") + app.add_source_suffix(".md", "myst-nb") app.add_source_suffix(".ipynb", "myst-nb") app.add_source_parser(MystNbParser) @@ -135,7 +134,7 @@ def parse(self, inputstring: str, document: nodes.document) -> None: :param inputstring: The source string to parse :param document: The root docutils node to add AST elements to """ - document_source = self.env.doc2path(self.env.docname) + document_path = self.env.doc2path(self.env.docname) # get a logger for this document logger = SphinxDocLogger(document) @@ -145,34 +144,21 @@ def parse(self, inputstring: str, document: nodes.document) -> None: # get notebook rendering configuration nb_config: NbParserConfig = self.env.mystnb_config - # convert inputstring to notebook - # TODO in sphinx, we also need to allow for the fact - # that the input could be a standard markdown file - nb_reader, md_config = create_nb_reader( - inputstring, document_source, md_config, nb_config - ) - notebook = nb_reader(inputstring) + # create a reader for the notebook + nb_reader = create_nb_reader(document_path, md_config, nb_config, inputstring) - # TODO update nb_config from notebook metadata + # If the nb_reader is None, then we default to a standard Markdown parser + if nb_reader is None: + return super().parse(inputstring, document) - # Setup the markdown parser - mdit_parser = create_md_parser(md_config, SphinxNbRenderer) - mdit_parser.options["document"] = document - mdit_parser.options["notebook"] = notebook - mdit_parser.options["nb_config"] = nb_config.as_dict() - mdit_env: Dict[str, Any] = {} + notebook = nb_reader.read(inputstring) - # load notebook element renderer class from entry-point name - # this is separate from DocutilsNbRenderer, so that users can override it - renderer_name = nb_config.render_plugin - nb_renderer: NbElementRenderer = load_renderer(renderer_name)( - mdit_parser.renderer, logger - ) - mdit_parser.options["nb_renderer"] = nb_renderer + # TODO update nb_config from notebook metadata # potentially execute notebook and/or populate outputs from cache + # TODO parse notebook reader? notebook, exec_data = update_notebook( - notebook, document_source, nb_config, logger + notebook, document_path, nb_config, logger ) if exec_data: # TODO note this is a different location to previous env.nb_execution_data @@ -188,6 +174,21 @@ def parse(self, inputstring: str, document: nodes.document) -> None: # TODO store/print error traceback? + # Setup the parser + mdit_parser = create_md_parser(nb_reader.md_config, SphinxNbRenderer) + mdit_parser.options["document"] = document + mdit_parser.options["notebook"] = notebook + mdit_parser.options["nb_config"] = nb_config.as_dict() + mdit_env: Dict[str, Any] = {} + + # load notebook element renderer class from entry-point name + # this is separate from SphinxNbRenderer, so that users can override it + renderer_name = nb_config.render_plugin + nb_renderer: NbElementRenderer = load_renderer(renderer_name)( + mdit_parser.renderer, logger + ) + mdit_parser.options["nb_renderer"] = nb_renderer + # parse to tokens mdit_tokens = notebook_to_tokens(notebook, mdit_parser, mdit_env, logger) # convert to docutils AST, which is added to the document diff --git a/tests/test_sphinx_builds.py b/tests/test_sphinx_builds.py index 61e77604..d2c76206 100644 --- a/tests/test_sphinx_builds.py +++ b/tests/test_sphinx_builds.py @@ -21,6 +21,19 @@ def test_basic_run(sphinx_run, file_regression): ) +@pytest.mark.sphinx_params( + "basic_unrun.md", + conf={"extensions": ["myst_nb.new.sphinx_"], "nb_execution_mode": "off"}, +) +def test_basic_run_md(sphinx_run, file_regression): + sphinx_run.build() + # print(sphinx_run.status()) + assert sphinx_run.warnings() == "" + file_regression.check( + sphinx_run.get_doctree().pformat(), extension=".xml", encoding="utf8" + ) + + @pytest.mark.sphinx_params( "complex_outputs.ipynb", conf={"extensions": ["myst_nb.new.sphinx_"], "nb_execution_mode": "off"}, diff --git a/tests/test_sphinx_builds/test_basic_run_md.xml b/tests/test_sphinx_builds/test_basic_run_md.xml new file mode 100644 index 00000000..a24bae2e --- /dev/null +++ b/tests/test_sphinx_builds/test_basic_run_md.xml @@ -0,0 +1,13 @@ +<document source="basic_unrun"> + <section classes="tex2jax_ignore mathjax_ignore" ids="a-title" names="a\ title"> + <title> + a title + <paragraph> + this was created using + <literal> + jupytext --to myst tests/notebooks/basic_unrun.ipynb + <container cell_index="1" cell_metadata="{}" classes="cell" exec_count="True" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> + <literal_block language="python" xml:space="preserve"> + a=1 + print(a) From 7c9358126e355f78df4bad5f77e70b77f7d489c9 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Tue, 4 Jan 2022 14:21:52 +0100 Subject: [PATCH 18/75] Improve execute code --- .gitignore | 4 +- myst_nb/configuration.py | 2 +- myst_nb/docutils_.py | 3 -- myst_nb/new/execute.py | 113 ++++++++++++++++++++++++++++++--------- myst_nb/new/read.py | 2 +- myst_nb/new/sphinx_.py | 13 +++-- 6 files changed, 101 insertions(+), 36 deletions(-) diff --git a/.gitignore b/.gitignore index 389567f5..dfdb11a6 100644 --- a/.gitignore +++ b/.gitignore @@ -130,9 +130,11 @@ dmypy.json .pyre/ # Jupyter Cache -docs/.jupyter_cache +.jupyter_cache # OSX .DS_Store .vscode/ + +todos.md diff --git a/myst_nb/configuration.py b/myst_nb/configuration.py index 3ccf7f68..520d2858 100644 --- a/myst_nb/configuration.py +++ b/myst_nb/configuration.py @@ -187,7 +187,7 @@ class NbParserConfig: }, ) execution_cache_path: str = attr.ib( - default="", + default="", # No default, so that sphinx can set it inside outdir, if empty validator=instance_of(str), metadata={ "help": "Path to folder for caching notebooks", diff --git a/myst_nb/docutils_.py b/myst_nb/docutils_.py index 2045ee43..f7852ea5 100644 --- a/myst_nb/docutils_.py +++ b/myst_nb/docutils_.py @@ -89,15 +89,12 @@ def parse(self, inputstring: str, document: nodes.document) -> None: # TODO update nb_config from notebook metadata # potentially execute notebook and/or populate outputs from cache - # TODO parse notebook reader? notebook, exec_data = update_notebook( notebook, document_source, nb_config, logger ) if exec_data: document["nb_exec_data"] = exec_data - # TODO store/print error traceback? - # Setup the markdown parser mdit_parser = create_md_parser(nb_reader.md_config, DocutilsNbRenderer) mdit_parser.options["document"] = document diff --git a/myst_nb/new/execute.py b/myst_nb/new/execute.py index 71b759c3..772126de 100644 --- a/myst_nb/new/execute.py +++ b/myst_nb/new/execute.py @@ -1,6 +1,6 @@ """Module for executing notebooks.""" import os -from contextlib import nullcontext +from contextlib import nullcontext, suppress from datetime import datetime from logging import Logger from pathlib import Path @@ -8,7 +8,8 @@ from typing import Optional, Tuple from jupyter_cache import get_cache -from jupyter_cache.executors import load_executor +from jupyter_cache.base import NbBundleIn +from jupyter_cache.cache.db import NbStageRecord from jupyter_cache.executors.utils import single_nb_execution from nbformat import NotebookNode from typing_extensions import TypedDict @@ -27,7 +28,7 @@ class ExecutionResult(TypedDict): """method used to execute the notebook""" succeeded: bool """True if the notebook executed successfully""" - # TODO error_log: str + # TODO error def update_notebook( @@ -47,17 +48,27 @@ def update_notebook( :returns: The updated notebook, and the (optional) execution metadata. """ + # path should only be None when using docutils programmatically + path = Path(source) if Path(source).is_file() else None + exec_metadata: Optional[ExecutionResult] = None if nb_config.execution_mode == "force": - # TODO what if source is a descriptor? - path = str(Path(source).parent) - cwd_context = ( - TemporaryDirectory() if nb_config.execution_in_temp else nullcontext(path) - ) + + # setup the execution current working directory + if nb_config.execution_in_temp: + cwd_context = TemporaryDirectory() + else: + if path is None: + raise ValueError( + f"source must exist as file, if execution_in_temp=False: {source}" + ) + cwd_context = nullcontext(str(path.parent)) + + # execute in the context of the current working directory with cwd_context as cwd: cwd = os.path.abspath(cwd) - logger.info(f"Executing notebook in {cwd}") + logger.info(f"Executing notebook: CWD={cwd!r}") result = single_nb_execution( notebook, cwd=cwd, @@ -66,6 +77,12 @@ def update_notebook( ) logger.info(f"Executed notebook in {result.time:.2f} seconds") + if result.err: + msg = f"Executing notebook failed: {result.err.__class__.__name__}" + if nb_config.execution_show_tb: + msg += f"\n{result.exc_string}" + logger.warning(msg, subtype="exec") + exec_metadata = { "mtime": datetime.now().timestamp(), "runtime": result.time, @@ -73,33 +90,77 @@ def update_notebook( "succeeded": False if result.err else True, } - # TODO handle errors - elif nb_config.execution_mode == "cache": - # TODO for sphinx, the default would be in the output directory - # also in sphinx we run and cache up front - cache = get_cache(nb_config.execution_cache_path or ".cache") - stage_record = cache.stage_notebook_file(source) - # TODO handle converters - if cache.get_cache_record_of_staged(stage_record.pk) is None: - executor = load_executor("basic", cache, logger=logger) - executor.run_and_cache( - filter_pks=[stage_record.pk], # TODO specify, rather than filter + # setup the cache + cache = get_cache(nb_config.execution_cache_path or ".jupyter_cache") + # TODO config on what notebook/cell metadata to merge + + # attempt to match the notebook to one in the cache + cache_record = None + with suppress(KeyError): + cache_record = cache.match_cache_notebook(notebook) + + # use the cached notebook if it exists + if cache_record is not None: + logger.info(f"Using cached notebook: PK={cache_record.pk}") + _, notebook = cache.merge_match_into_notebook(notebook) + exec_metadata = { + "mtime": cache_record.created.timestamp(), + "runtime": cache_record.data.get("execution_seconds", None), + "method": nb_config.execution_mode, + "succeeded": True, + } + return notebook, exec_metadata + + if path is None: + raise ValueError( + f"source must exist as file, if execution_mode is 'cache': {source}" + ) + + # attempt to execute the notebook + stage_record = cache.stage_notebook_file(str(path)) + # TODO do in try/except, in case of db write errors + NbStageRecord.remove_tracebacks([stage_record.pk], cache.db) + cwd_context = ( + TemporaryDirectory() + if nb_config.execution_in_temp + else nullcontext(str(path.parent)) + ) + with cwd_context as cwd: + cwd = os.path.abspath(cwd) + logger.info(f"Executing notebook: CWD={cwd!r}") + result = single_nb_execution( + notebook, + cwd=cwd, allow_errors=nb_config.execution_allow_errors, timeout=nb_config.execution_timeout, - run_in_temp=nb_config.execution_in_temp, ) - else: - logger.info("Using cached notebook outputs") + logger.info(f"Executed notebook in {result.time:.2f} seconds") - _, notebook = cache.merge_match_into_notebook(notebook) + # handle success / failure cases + # TODO do in try/except to be careful (in case of database write errors? + if result.err: + msg = f"Executing notebook failed: {result.err.__class__.__name__}" + if nb_config.execution_show_tb: + msg += f"\n{result.exc_string}" + logger.warning(msg, subtype="exec") + NbStageRecord.set_traceback(stage_record.uri, result.exc_string, cache.db) + else: + cache_record = cache.cache_notebook_bundle( + NbBundleIn( + notebook, stage_record.uri, data={"execution_seconds": result.time} + ), + check_validity=False, + overwrite=True, + ) + logger.info(f"Cached executed notebook: PK={cache_record.pk}") exec_metadata = { "mtime": datetime.now().timestamp(), - "runtime": None, # TODO get runtime from cache + "runtime": result.time, "method": nb_config.execution_mode, - "succeeded": True, # TODO handle errors + "succeeded": False if result.err else True, } return notebook, exec_metadata diff --git a/myst_nb/new/read.py b/myst_nb/new/read.py index 10c19b0f..e1352e45 100644 --- a/myst_nb/new/read.py +++ b/myst_nb/new/read.py @@ -21,7 +21,7 @@ class NbReader: """A data class for reading a notebook format.""" read: Callable[[str], nbf.NotebookNode] = attr.ib() - """The function to read a notebook from a string.""" + """The function to read a notebook from a (utf8) string.""" md_config: MdParserConfig = attr.ib() """The configuration for parsing markdown cells.""" diff --git a/myst_nb/new/sphinx_.py b/myst_nb/new/sphinx_.py index 7bcfe909..e235fb2d 100644 --- a/myst_nb/new/sphinx_.py +++ b/myst_nb/new/sphinx_.py @@ -38,7 +38,7 @@ def sphinx_setup(app: Sphinx): app.add_source_suffix(".ipynb", "myst-nb") app.add_source_parser(MystNbParser) - # Add myst-parser transforms and configuration + # Add myst-parser configuration and transforms setup_myst_parser(app) for name, default, field in NbParserConfig().as_triple(): @@ -102,9 +102,15 @@ def create_mystnb_config(app): app.env.mystnb_config = NbParserConfig() # update the output_folder (for writing external files like images), + # and the execution_cache_path (for caching notebook outputs) # to a set path within the sphinx build folder output_folder = Path(app.outdir).parent.joinpath("jupyter_execute").resolve() - app.env.mystnb_config = app.env.mystnb_config.copy(output_folder=str(output_folder)) + exec_cache_path = app.env.mystnb_config.execution_cache_path + if not exec_cache_path: + exec_cache_path = Path(app.outdir).parent.joinpath(".jupyter_cache").resolve() + app.env.mystnb_config = app.env.mystnb_config.copy( + output_folder=str(output_folder), execution_cache_path=str(exec_cache_path) + ) def add_exclude_patterns(app: Sphinx, config): @@ -156,7 +162,6 @@ def parse(self, inputstring: str, document: nodes.document) -> None: # TODO update nb_config from notebook metadata # potentially execute notebook and/or populate outputs from cache - # TODO parse notebook reader? notebook, exec_data = update_notebook( notebook, document_path, nb_config, logger ) @@ -172,7 +177,7 @@ def parse(self, inputstring: str, document: nodes.document) -> None: # this and just check the mtime of the exec_data instead, # using that for the the exec_table extension - # TODO store/print error traceback? + # TODO store error traceback in outdir and log its path # Setup the parser mdit_parser = create_md_parser(nb_reader.md_config, SphinxNbRenderer) From 7148c5ffcaafe68b251b6a97745fb33b0c910e1d Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Tue, 4 Jan 2022 18:48:49 +0100 Subject: [PATCH 19/75] Add UnexpectedCellDirective --- myst_nb/docutils_.py | 12 +++++++++++- myst_nb/new/execute.py | 2 +- myst_nb/new/read.py | 38 +++++++++++++++++++++++++++++++++++++- myst_nb/new/sphinx_.py | 14 ++++++++++---- 4 files changed, 59 insertions(+), 7 deletions(-) diff --git a/myst_nb/docutils_.py b/myst_nb/docutils_.py index f7852ea5..b50a8eec 100644 --- a/myst_nb/docutils_.py +++ b/myst_nb/docutils_.py @@ -5,6 +5,7 @@ import nbformat from docutils import nodes from docutils.core import default_description, publish_cmdline +from docutils.parsers.rst.directives import register_directive from markdown_it.tree import SyntaxTreeNode from myst_parser.docutils_ import DOCUTILS_EXCLUDED_ARGS as DOCUTILS_EXCLUDED_ARGS_MYST from myst_parser.docutils_ import Parser as MystParser @@ -17,7 +18,12 @@ from myst_nb.new.execute import update_notebook from myst_nb.new.loggers import DEFAULT_LOG_TYPE, DocutilsDocLogger from myst_nb.new.parse import notebook_to_tokens -from myst_nb.new.read import NbReader, read_myst_markdown_notebook, standard_nb_read +from myst_nb.new.read import ( + NbReader, + UnexpectedCellDirective, + read_myst_markdown_notebook, + standard_nb_read, +) from myst_nb.new.render import NbElementRenderer, load_renderer from myst_nb.render_outputs import coalesce_streams @@ -50,6 +56,10 @@ def parse(self, inputstring: str, document: nodes.document) -> None: """ document_source = document["source"] + # register special directives + register_directive("code-cell", UnexpectedCellDirective) + register_directive("raw-cell", UnexpectedCellDirective) + # get a logger for this document logger = DocutilsDocLogger(document) diff --git a/myst_nb/new/execute.py b/myst_nb/new/execute.py index 772126de..2c11fb5b 100644 --- a/myst_nb/new/execute.py +++ b/myst_nb/new/execute.py @@ -119,7 +119,7 @@ def update_notebook( ) # attempt to execute the notebook - stage_record = cache.stage_notebook_file(str(path)) + stage_record = cache.stage_notebook_file(str(path)) # TODO record nb reader # TODO do in try/except, in case of db write errors NbStageRecord.remove_tracebacks([stage_record.pk], cache.db) cwd_context = ( diff --git a/myst_nb/new/read.py b/myst_nb/new/read.py index e1352e45..5a3fd926 100644 --- a/myst_nb/new/read.py +++ b/myst_nb/new/read.py @@ -7,10 +7,12 @@ import attr import nbformat as nbf import yaml +from docutils.parsers.rst import Directive from markdown_it.renderer import RendererHTML from myst_parser.main import MdParserConfig, create_md_parser from myst_nb.configuration import NbParserConfig +from myst_nb.new.loggers import DocutilsDocLogger, SphinxDocLogger NOTEBOOK_VERSION = 4 """The notebook version that readers should return.""" @@ -278,7 +280,7 @@ def _read_fenced_cell(token, cell_index, cell_type): from myst_parser.parse_directives import DirectiveParsingError, parse_directive_text try: - _, options, body_lines = parse_directive_text( + _, options, body_lines, _ = parse_directive_text( directive_class=_MockDirective, first_line="", content=token.content, @@ -332,3 +334,37 @@ def _load_code_from_file(nb_path, file_name, token, body_lines): except Exception: raise _LoadFileParsingError("Can't read file from :load: {}".format(file_path)) return body_lines + + +class UnexpectedCellDirective(Directive): + """The `{code-cell}`` and ``{raw-cell}`` directives, are special cases, + which are picked up by the MyST Markdown reader to convert them into notebooks. + + If any are left in the parsed Markdown, it probably means that they were nested + inside another directive, which is not allowed. + + Therefore, we log a warning if it is triggered, and discard it. + + """ + + optional_arguments = 1 + final_argument_whitespace = True + has_content = True + + def run(self): + """Run the directive.""" + message = ( + "Found an unexpected `code-cell` or `raw-cell` directive. " + "Either this file was not converted to a notebook, " + "because Jupytext header content was missing, " + "or the `code-cell` was not converted, because it is nested. " + "See https://myst-nb.readthedocs.io/en/latest/use/markdown.html " + "for more information." + ) + document = self.state.document + if hasattr(document.settings, "env"): + logger = SphinxDocLogger(document) + else: + logger = DocutilsDocLogger(document) + logger.warning(message, line=self.lineno, subtype="nbcell") + return [] diff --git a/myst_nb/new/sphinx_.py b/myst_nb/new/sphinx_.py index e235fb2d..62f8f24c 100644 --- a/myst_nb/new/sphinx_.py +++ b/myst_nb/new/sphinx_.py @@ -20,7 +20,7 @@ from myst_nb.new.execute import update_notebook from myst_nb.new.loggers import DEFAULT_LOG_TYPE, SphinxDocLogger from myst_nb.new.parse import notebook_to_tokens -from myst_nb.new.read import create_nb_reader +from myst_nb.new.read import UnexpectedCellDirective, create_nb_reader from myst_nb.new.render import NbElementRenderer, load_renderer from myst_nb.render_outputs import coalesce_streams @@ -53,13 +53,19 @@ def sphinx_setup(app: Sphinx): # generate notebook configuration from Sphinx configuration app.connect("builder-inited", create_mystnb_config) - # ensure notebook checkpoints are excluded + # ensure notebook checkpoints are excluded from parsing app.connect("config-inited", add_exclude_patterns) + + # add directive to ensure all notebook cells are converted + app.add_directive("code-cell", UnexpectedCellDirective) + app.add_directive("raw-cell", UnexpectedCellDirective) + + # add post-transform for selecting mime type from a bundle + app.add_post_transform(SelectMimeType) + # add HTML resources app.connect("builder-inited", add_static_path) app.add_css_file("mystnb.css") - # add post-transform for selecting mime type from a bundle - app.add_post_transform(SelectMimeType) # TODO do we need to add lexers, if they are anyhow added via entry-points? From 50230345ccb249a326e6c296c0f757892a6d8fad Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Tue, 4 Jan 2022 19:24:49 +0100 Subject: [PATCH 20/75] Move coalesce_streams to new module --- myst_nb/docutils_.py | 3 +-- myst_nb/new/render.py | 52 ++++++++++++++++++++++++++++++++++++++++++ myst_nb/new/sphinx_.py | 3 +-- 3 files changed, 54 insertions(+), 4 deletions(-) diff --git a/myst_nb/docutils_.py b/myst_nb/docutils_.py index b50a8eec..3160e4cf 100644 --- a/myst_nb/docutils_.py +++ b/myst_nb/docutils_.py @@ -24,8 +24,7 @@ read_myst_markdown_notebook, standard_nb_read, ) -from myst_nb.new.render import NbElementRenderer, load_renderer -from myst_nb.render_outputs import coalesce_streams +from myst_nb.new.render import NbElementRenderer, coalesce_streams, load_renderer DOCUTILS_EXCLUDED_ARGS = { f.name for f in NbParserConfig.get_fields() if f.metadata.get("docutils_exclude") diff --git a/myst_nb/new/render.py b/myst_nb/new/render.py index 5172fead..db84dd43 100644 --- a/myst_nb/new/render.py +++ b/myst_nb/new/render.py @@ -53,6 +53,58 @@ def strip_latex_delimiters(source): return source +_RGX_CARRIAGERETURN = re.compile(r".*\r(?=[^\n])") +_RGX_BACKSPACE = re.compile(r"[^\n]\b") + + +def coalesce_streams(outputs: List[NotebookNode]) -> List[NotebookNode]: + """Merge all stream outputs with shared names into single streams. + + This ensure deterministic outputs. + + Adapted from: + https://github.com/computationalmodelling/nbval/blob/master/nbval/plugin.py. + """ + if not outputs: + return [] + + new_outputs = [] + streams = {} + for output in outputs: + if output["output_type"] == "stream": + if output["name"] in streams: + streams[output["name"]]["text"] += output["text"] + else: + new_outputs.append(output) + streams[output["name"]] = output + else: + new_outputs.append(output) + + # process \r and \b characters + for output in streams.values(): + old = output["text"] + while len(output["text"]) < len(old): + old = output["text"] + # Cancel out anything-but-newline followed by backspace + output["text"] = _RGX_BACKSPACE.sub("", output["text"]) + # Replace all carriage returns not followed by newline + output["text"] = _RGX_CARRIAGERETURN.sub("", output["text"]) + + # We also want to ensure stdout and stderr are always in the same consecutive order, + # because they are asynchronous, so order isn't guaranteed. + for i, output in enumerate(new_outputs): + if output["output_type"] == "stream" and output["name"] == "stderr": + if ( + len(new_outputs) >= i + 2 + and new_outputs[i + 1]["output_type"] == "stream" + and new_outputs[i + 1]["name"] == "stdout" + ): + stdout = new_outputs.pop(i + 1) + new_outputs.insert(i, stdout) + + return new_outputs + + class NbElementRenderer: """A class for rendering notebook elements.""" diff --git a/myst_nb/new/sphinx_.py b/myst_nb/new/sphinx_.py index 62f8f24c..5e8b82d3 100644 --- a/myst_nb/new/sphinx_.py +++ b/myst_nb/new/sphinx_.py @@ -21,8 +21,7 @@ from myst_nb.new.loggers import DEFAULT_LOG_TYPE, SphinxDocLogger from myst_nb.new.parse import notebook_to_tokens from myst_nb.new.read import UnexpectedCellDirective, create_nb_reader -from myst_nb.new.render import NbElementRenderer, load_renderer -from myst_nb.render_outputs import coalesce_streams +from myst_nb.new.render import NbElementRenderer, coalesce_streams, load_renderer SPHINX_LOGGER = sphinx_logging.getLogger(__name__) UNSET = "--unset--" From b0c2183b3b2fb835cf0960880e1f2e4f41efba88 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Tue, 4 Jan 2022 23:33:42 +0100 Subject: [PATCH 21/75] Handle ipywidgets --- myst_nb/configuration.py | 27 ++ myst_nb/docutils_.py | 57 ++- myst_nb/new/parse.py | 35 +- myst_nb/new/render.py | 16 +- myst_nb/new/sphinx_.py | 116 +++-- setup.cfg | 4 +- tests/notebooks/ipywidgets.ipynb | 699 +++++++++++++++++++++++++++++++ tests/test_sphinx_builds.py | 17 + 8 files changed, 875 insertions(+), 96 deletions(-) create mode 100644 tests/notebooks/ipywidgets.ipynb diff --git a/myst_nb/configuration.py b/myst_nb/configuration.py index 520d2858..b51ffb01 100644 --- a/myst_nb/configuration.py +++ b/myst_nb/configuration.py @@ -134,6 +134,22 @@ def render_priority_factory() -> Dict[str, Sequence[str]]: return output +def ipywidgets_js_factory() -> Dict[str, Dict[str, str]]: + """Create a default ipywidgets js dict.""" + # see: https://ipywidgets.readthedocs.io/en/7.6.5/embedding.html + return { + # Load RequireJS, used by the IPywidgets for dependency management + "https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js": { + "integrity": "sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=", + "crossorigin": "anonymous", + }, + # Load IPywidgets bundle for embedding. + "https://unpkg.com/@jupyter-widgets/html-manager@^0.20.0/dist/embed-amd.js": { + "crossorigin": "anonymous", + }, + } + + @attr.s() class NbParserConfig: """Global configuration options for the MyST-NB parser. @@ -328,6 +344,17 @@ class NbParserConfig: "(in group `myst_nb.output_renderer`)" }, ) + ipywidgets_js: Dict[str, Dict[str, str]] = attr.ib( + factory=ipywidgets_js_factory, + validator=deep_mapping( + instance_of(str), deep_mapping(instance_of(str), instance_of(str)) + ), + metadata={ + "help": "Javascript to be loaded on pages containing ipywidgets", + "docutils_exclude": True, + }, + repr=False, + ) @classmethod def get_fields(cls) -> Tuple[attr.Attribute, ...]: diff --git a/myst_nb/docutils_.py b/myst_nb/docutils_.py index 3160e4cf..bb1a79cb 100644 --- a/myst_nb/docutils_.py +++ b/myst_nb/docutils_.py @@ -6,6 +6,7 @@ from docutils import nodes from docutils.core import default_description, publish_cmdline from docutils.parsers.rst.directives import register_directive +from markdown_it.token import Token from markdown_it.tree import SyntaxTreeNode from myst_parser.docutils_ import DOCUTILS_EXCLUDED_ARGS as DOCUTILS_EXCLUDED_ARGS_MYST from myst_parser.docutils_ import Parser as MystParser @@ -132,6 +133,7 @@ def parse(self, inputstring: str, document: nodes.document) -> None: path = ["rendered.ipynb"] nb_renderer.write_file(path, content, overwrite=True) # TODO also write CSS to output folder if necessary or always? + # TODO we also need to load JS URLs if ipywidgets are present and HTML class DocutilsNbRenderer(DocutilsRenderer): @@ -151,11 +153,43 @@ def get_nb_config(self, key: str, cell_index: Optional[int]) -> Any: # TODO handle KeyError better return self.config["nb_config"][key] - def render_nb_spec_data(self, token: SyntaxTreeNode) -> None: - """Add a notebook spec data to the document attributes.""" - # TODO in sphinx moves these to env metadata? - self.document["nb_kernelspec"] = token.meta["kernelspec"] - self.document["nb_language_info"] = token.meta["language_info"] + def render_nb_metadata(self, token: SyntaxTreeNode) -> None: + """Render the notebook metadata.""" + metadata = dict(token.meta) + + # save these special keys on the document, rather than as docinfo + self.document["nb_kernelspec"] = metadata.pop("kernelspec", None) + self.document["nb_language_info"] = metadata.pop("language_info", None) + + # TODO should we provide hook for NbElementRenderer? + + # TODO how to handle ipywidgets in docutils? + ipywidgets = metadata.pop("widgets", None) # noqa: F841 + # ipywidgets_mime = (ipywidgets or {}).get(WIDGET_STATE_MIMETYPE, {}) + # ipywidgets_state = ipywidgets_mime.get("state", None) + + # forward the rest to the front_matter renderer + self.render_front_matter( + Token( + "front_matter", + "", + 0, + map=[0, 0], + content=metadata, # type: ignore[arg-type] + ), + ) + + def render_nb_widget_state(self, token: SyntaxTreeNode) -> None: + """Render the HTML defining the ipywidget state.""" + # TODO handle this more generally, + # by just passing all notebook metadata to the nb_renderer + node = self.nb_renderer.render_widget_state( + mime_type=token.attrGet("type"), data=token.meta + ) + node["nb_element"] = "widget_state" + self.add_line_and_source_path(node, token) + # always append to bottom of the document + self.document.append(node) def render_nb_cell_markdown(self, token: SyntaxTreeNode) -> None: """Render a notebook markdown cell.""" @@ -298,19 +332,6 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: subtype="output_type", ) - def render_nb_widget_state(self, token: SyntaxTreeNode) -> None: - """Render the HTML defining the ipywidget state.""" - # TODO handle this more generally, - # by just passing all notebook metadata to the nb_renderer - # TODO in docutils we also need to load JS URLs if widgets are present and HTML - node = self.nb_renderer.render_widget_state( - mime_type=token.attrGet("type"), data=token.meta - ) - node["nb_element"] = "widget_state" - self.add_line_and_source_path(node, token) - # always append to bottom of the document - self.document.append(node) - def _run_cli(writer_name: str, writer_description: str, argv: Optional[List[str]]): """Run the command line interface for a particular writer.""" diff --git a/myst_nb/new/parse.py b/myst_nb/new/parse.py index 9fdc10b1..7f2d8bdd 100644 --- a/myst_nb/new/parse.py +++ b/myst_nb/new/parse.py @@ -7,8 +7,6 @@ from markdown_it.token import Token from nbformat import NotebookNode -from myst_nb.new.render import WIDGET_STATE_MIMETYPE - def nb_node_to_dict(node: NotebookNode) -> Dict[str, Any]: """Recursively convert a notebook node to a dict.""" @@ -35,32 +33,19 @@ def notebook_to_tokens( # Parse block tokens only first, leaving inline parsing to a second phase # (required to collect all reference definitions, before assessing references). metadata = nb_node_to_dict(notebook.metadata) - # save these special keys on the document, rather than as docinfo - spec_data = { - key: metadata.pop(key, None) for key in ("kernelspec", "language_info") - } # attempt to get language lexer name - langinfo = spec_data.get("language_info") or {} + langinfo = metadata.get("language_info") or {} lexer = langinfo.get("pygments_lexer") or langinfo.get("name", None) if lexer is None: - lexer = (spec_data.get("kernelspec") or {}).get("language", None) + lexer = (metadata.get("kernelspec") or {}).get("language", None) if lexer is None: logger.warning( "No source code lexer found in notebook metadata", subtype="lexer" ) - # extract widgets - widgets = metadata.pop("widgets", None) block_tokens = [ - Token( - "front_matter", - "", - 0, - map=[0, 0], - content=metadata, # type: ignore[arg-type] - ), - Token("nb_spec_data", "", 0, meta=spec_data), + Token("nb_metadata", "", 0, meta=metadata, map=[0, 0]), ] for cell_index, nb_cell in enumerate(notebook.cells): @@ -163,20 +148,6 @@ def notebook_to_tokens( # add tokens to list block_tokens.extend(tokens) - # The widget state will be embedded as a script, at the end of HTML output - widget_state = (widgets or {}).get(WIDGET_STATE_MIMETYPE, None) - if widget_state and widget_state.get("state", None): - block_tokens.append( - Token( - "nb_widget_state", - "script", - 0, - attrs={"type": WIDGET_STATE_MIMETYPE}, - meta={"state": widget_state}, - map=[0, 0], - ) - ) - # Now all definitions have been gathered, run the inline parsing phase state = StateCore("", mdit_parser, mdit_env, block_tokens) with mdit_parser.reset_rules(): diff --git a/myst_nb/new/render.py b/myst_nb/new/render.py index db84dd43..ea949e8f 100644 --- a/myst_nb/new/render.py +++ b/myst_nb/new/render.py @@ -453,7 +453,7 @@ def render_widget_view( :param cell_index: the index of the cell containing the output :param source_line: the line number of the cell in the source document """ - content = json.dumps(sanitize_script_content(data)) + content = sanitize_script_content(json.dumps(data)) return [ nodes.raw( text=f'<script type="{WIDGET_VIEW_MIMETYPE}">{content}</script>', @@ -461,20 +461,6 @@ def render_widget_view( ) ] - def render_widget_state(self, mime_type: str, data: dict) -> nodes.Element: - """Render a notebook application/vnd.jupyter.widget-state+json mime output. - - :param mime_type: the key from the "notebook.metdata.widgets" dict - :param data: the value from the "notebook.metdata.widgets" dict - """ - # The JSON inside the script tag is identified and parsed by: - # https://github.com/jupyter-widgets/ipywidgets/blob/32f59acbc63c3ff0acf6afa86399cb563d3a9a86/packages/html-manager/src/libembed.ts#L36 - content = json.dumps(sanitize_script_content(data["state"])) - return nodes.raw( - text=f'<script type="{mime_type}">\n{content}\n</script>', - format="html", - ) - @lru_cache(maxsize=10) def load_renderer(name: str) -> NbElementRenderer: diff --git a/myst_nb/new/sphinx_.py b/myst_nb/new/sphinx_.py index 5e8b82d3..13ad63d4 100644 --- a/myst_nb/new/sphinx_.py +++ b/myst_nb/new/sphinx_.py @@ -1,9 +1,11 @@ """An extension for sphinx""" +import json from pathlib import Path from typing import Any, Dict, List, Optional, Sequence import nbformat from docutils import nodes +from markdown_it.token import Token from markdown_it.tree import SyntaxTreeNode from myst_parser import setup_sphinx as setup_myst_parser from myst_parser.docutils_renderer import token_line @@ -12,6 +14,7 @@ from myst_parser.sphinx_renderer import SphinxRenderer from nbformat import NotebookNode from sphinx.application import Sphinx +from sphinx.environment import BuildEnvironment from sphinx.transforms.post_transforms import SphinxPostTransform from sphinx.util import logging as sphinx_logging @@ -21,7 +24,13 @@ from myst_nb.new.loggers import DEFAULT_LOG_TYPE, SphinxDocLogger from myst_nb.new.parse import notebook_to_tokens from myst_nb.new.read import UnexpectedCellDirective, create_nb_reader -from myst_nb.new.render import NbElementRenderer, coalesce_streams, load_renderer +from myst_nb.new.render import ( + WIDGET_STATE_MIMETYPE, + NbElementRenderer, + coalesce_streams, + load_renderer, + sanitize_script_content, +) SPHINX_LOGGER = sphinx_logging.getLogger(__name__) UNSET = "--unset--" @@ -65,6 +74,8 @@ def sphinx_setup(app: Sphinx): # add HTML resources app.connect("builder-inited", add_static_path) app.add_css_file("mystnb.css") + # note, this event is only available in Sphinx >= 3.5 + app.connect("html-page-context", install_ipywidgets) # TODO do we need to add lexers, if they are anyhow added via entry-points? @@ -130,6 +141,47 @@ def add_static_path(app: Sphinx): app.config.html_static_path.append(str(static_path)) +def install_ipywidgets(app: Sphinx, pagename: str, *args: Any, **kwargs: Any) -> None: + """Install ipywidgets Javascript, if required on the page.""" + if app.builder.format != "html": + return + ipywidgets_state = get_doc_metadata(app.env, pagename, "ipywidgets_state") + if ipywidgets_state is not None: + # see: https://ipywidgets.readthedocs.io/en/7.6.5/embedding.html + + for path, kwargs in app.env.config["nb_ipywidgets_js"].items(): + app.add_js_file(path, **kwargs) + + # The state of all the widget models on the page + # TODO how to add data-jupyter-widgets-cdn="https://cdn.jsdelivr.net/npm/"? + app.add_js_file( + None, + type="application/vnd.jupyter.widget-state+json", + body=ipywidgets_state, + ) + + +def store_doc_metadata(env: BuildEnvironment, docname: str, key: str, value: Any): + """Store myst-nb metadata for a document.""" + # Data in env.metadata is correctly handled, by sphinx.MetadataCollector, + # for clearing removed documents and for merging on parallel builds + + # however, one drawback is that it also extracts all docinfo to here, + # so we prepend the key name to hopefully avoid it being overwritten + + # TODO is it worth implementing a custom MetadataCollector? + if docname not in env.metadata: + env.metadata[docname] = {} + env.metadata[docname][f"__mystnb__{key}"] = value + + +def get_doc_metadata( + env: BuildEnvironment, docname: str, key: str, default=None +) -> Any: + """Get myst-nb metadata for a document.""" + return env.metadata.get(docname, {}).get(f"__mystnb__{key}", default) + + class MystNbParser(MystParser): """Sphinx parser for Jupyter Notebook formats, containing MyST Markdown.""" @@ -171,16 +223,7 @@ def parse(self, inputstring: str, document: nodes.document) -> None: notebook, document_path, nb_config, logger ) if exec_data: - # TODO note this is a different location to previous env.nb_execution_data - # but it is a more standard place, which will be merged on parallel builds - # (via MetadataCollector) - # Also to note, in docutils we store it on the document - # TODO should we deal with this getting overwritten by docinfo? - self.env.metadata[self.env.docname]["nb_exec_data"] = exec_data - # self.env.nb_exec_data_changed = True - # TODO how to do this in a "parallel friendly" way? perhaps we don't store - # this and just check the mtime of the exec_data instead, - # using that for the the exec_table extension + store_doc_metadata(self.env, self.env.docname, "exec_data", exec_data) # TODO store error traceback in outdir and log its path @@ -232,12 +275,40 @@ def get_nb_config(self, key: str, cell_index: Optional[int]) -> Any: # TODO handle KeyError better return self.config["nb_config"][key] - def render_nb_spec_data(self, token: SyntaxTreeNode) -> None: - """Add a notebook spec data to the document attributes.""" - # This is different to docutils-only, where we store it on the document + def render_nb_metadata(self, token: SyntaxTreeNode) -> None: + """Render the notebook metadata.""" + metadata = dict(token.meta) + + # save these special keys on the metadata, rather than as docinfo env = self.sphinx_env - env.metadata[env.docname]["kernelspec"] = token.meta["kernelspec"] - env.metadata[env.docname]["language_info"] = token.meta["language_info"] + env.metadata[env.docname]["kernelspec"] = metadata.pop("kernelspec", None) + env.metadata[env.docname]["language_info"] = metadata.pop("language_info", None) + + # TODO should we provide hook for NbElementRenderer? + + # store ipywidgets state in metadata, + # which will be later added to HTML page context + # The JSON inside the script tag is identified and parsed by: + # https://github.com/jupyter-widgets/ipywidgets/blob/32f59acbc63c3ff0acf6afa86399cb563d3a9a86/packages/html-manager/src/libembed.ts#L36 + ipywidgets = metadata.pop("widgets", None) + ipywidgets_mime = (ipywidgets or {}).get(WIDGET_STATE_MIMETYPE, {}) + ipywidgets_state = ipywidgets_mime.get("state", None) + if ipywidgets_state: + string = sanitize_script_content(json.dumps(ipywidgets_state)) + store_doc_metadata( + self.sphinx_env, self.sphinx_env.docname, "ipywidgets_state", string + ) + + # forward the rest to the front_matter renderer + self.render_front_matter( + Token( + "front_matter", + "", + 0, + map=[0, 0], + content=metadata, # type: ignore[arg-type] + ), + ) def render_nb_cell_markdown(self, token: SyntaxTreeNode) -> None: """Render a notebook markdown cell.""" @@ -374,19 +445,6 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: subtype="output_type", ) - def render_nb_widget_state(self, token: SyntaxTreeNode) -> None: - """Render the HTML defining the ipywidget state.""" - # TODO handle this more generally, - # by just passing all notebook metadata to the nb_renderer - # TODO in docutils we also need to load JS URLs if widgets are present and HTML - node = self.nb_renderer.render_widget_state( - mime_type=token.attrGet("type"), data=token.meta - ) - node["nb_element"] = "widget_state" - self.add_line_and_source_path(node, token) - # always append to bottom of the document - self.document.append(node) - class SelectMimeType(SphinxPostTransform): """Select the mime type to render from mime bundles, diff --git a/setup.cfg b/setup.cfg index 3c2ef799..5cd605b0 100644 --- a/setup.cfg +++ b/setup.cfg @@ -43,10 +43,9 @@ install_requires = ipython jupyter-cache~=0.4.1 myst-parser @ git+git://github.com/executablebooks/MyST-Parser.git@master - nbconvert>=5.6,<7 nbformat~=5.0 pyyaml - sphinx>=3.1,<5 + sphinx>=3.5,<5 sphinx-togglebutton~=0.2.2 typing-extensions python_requires = >=3.6 @@ -95,6 +94,7 @@ rtd = sympy testing = coverage<5.0 + beautifulsoup4 ipykernel~=5.5 ipywidgets jupytext~=1.11.2 diff --git a/tests/notebooks/ipywidgets.ipynb b/tests/notebooks/ipywidgets.ipynb new file mode 100644 index 00000000..441446a3 --- /dev/null +++ b/tests/notebooks/ipywidgets.ipynb @@ -0,0 +1,699 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "init_cell": true, + "slideshow": { + "slide_type": "skip" + } + }, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "import pandas as pd\n", + "pd.set_option('display.latex.repr', True)\n", + "import sympy as sym\n", + "sym.init_printing(use_latex=True)\n", + "import numpy as np\n", + "from IPython.display import Image, Latex" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "ipub": { + "slide": true + } + }, + "source": [ + "# Markdown" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "ipub": { + "slide": true + } + }, + "source": [ + "## General" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "ipub": { + "slide": true + } + }, + "source": [ + "Some markdown text.\n", + "\n", + "A list:\n", + "\n", + "- something\n", + "- something else\n", + "\n", + "A numbered list\n", + "\n", + "1. something\n", + "2. something else\n", + "\n", + "non-ascii characters TODO" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "ipub": {} + }, + "source": [ + "This is a long section of text, which we only want in a document (not a presentation)\n", + "some text\n", + "some more text\n", + "some more text\n", + "some more text\n", + "some more text\n", + "some more text\n", + "some more text\n", + "some more text\n", + "some more text\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "ipub": { + "slide": true, + "slideonly": true + } + }, + "source": [ + "This is an abbreviated section of the document text, which we only want in a presentation\n", + "\n", + "- summary of document text" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "ipub": { + "slide": true + } + }, + "source": [ + "## References and Citations" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "ipub": { + "slide": true + } + }, + "source": [ + "References to \\cref{fig:example}, \\cref{tbl:example}, =@eqn:example_sympy and \\cref{code:example_mpl}.\n", + "\n", + "A latex citation.\\cite{zelenyak_molecular_2016}\n", + "\n", + "A html citation.<cite data-cite=\"kirkeminde_thermodynamic_2012\">(Kirkeminde, 2012)</cite> " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "ipub": { + "slide": true + } + }, + "source": [ + "## Todo notes" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "ipub": { + "slide": true + } + }, + "source": [ + "\\todo[inline]{an inline todo}\n", + "\n", + "Some text.\\todo{a todo in the margins}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Text Output" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "ipub": { + "text": { + "format": { + "backgroundcolor": "\\color{blue!10}" + } + } + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "This is some printed text,\n", + "with a nicely formatted output.\n", + "\n" + ] + } + ], + "source": [ + "print(\"\"\"\n", + "This is some printed text,\n", + "with a nicely formatted output.\n", + "\"\"\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Images and Figures" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Displaying a plot with its code" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "ipub": { + "caption": "fig:example_mpl" + } + }, + "source": [ + "A matplotlib figure, with the caption set in the markdowncell above the figure." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "ipub": { + "caption": "code:example_mpl" + } + }, + "source": [ + "The plotting code for a matplotlib figure (\\cref{fig:example_mpl})." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "# Tables (with pandas)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "ipub": { + "caption": "code:example_pd" + } + }, + "source": [ + "The plotting code for a pandas Dataframe table (\\cref{tbl:example})." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "ipub": { + "code": { + "asfloat": true, + "caption": "", + "label": "code:example_pd", + "placement": "H", + "widefigure": false + }, + "table": { + "alternate": "gray!20", + "caption": "An example of a table created with pandas dataframe.", + "label": "tbl:example", + "placement": "H" + } + } + }, + "outputs": [ + { + "data": { + "text/html": [ + "<div>\n", + "<style scoped>\n", + " .dataframe tbody tr th:only-of-type {\n", + " vertical-align: middle;\n", + " }\n", + "\n", + " .dataframe tbody tr th {\n", + " vertical-align: top;\n", + " }\n", + "\n", + " .dataframe thead th {\n", + " text-align: right;\n", + " }\n", + "</style>\n", + "<table border=\"1\" class=\"dataframe\">\n", + " <thead>\n", + " <tr style=\"text-align: right;\">\n", + " <th></th>\n", + " <th>a</th>\n", + " <th>b</th>\n", + " <th>c</th>\n", + " <th>d</th>\n", + " </tr>\n", + " </thead>\n", + " <tbody>\n", + " <tr>\n", + " <th>0</th>\n", + " <td>$\\delta$</td>\n", + " <td>l</td>\n", + " <td>0.603</td>\n", + " <td>0.545</td>\n", + " </tr>\n", + " <tr>\n", + " <th>1</th>\n", + " <td>x</td>\n", + " <td>m</td>\n", + " <td>0.438</td>\n", + " <td>0.892</td>\n", + " </tr>\n", + " <tr>\n", + " <th>2</th>\n", + " <td>y</td>\n", + " <td>n</td>\n", + " <td>0.792</td>\n", + " <td>0.529</td>\n", + " </tr>\n", + " </tbody>\n", + "</table>\n", + "</div>" + ], + "text/latex": [ + "\\begin{tabular}{lllrr}\n", + "\\toprule\n", + "{} & a & b & c & d \\\\\n", + "\\midrule\n", + "0 & \\$\\textbackslash delta\\$ & l & 0.603 & 0.545 \\\\\n", + "1 & x & m & 0.438 & 0.892 \\\\\n", + "2 & y & n & 0.792 & 0.529 \\\\\n", + "\\bottomrule\n", + "\\end{tabular}\n" + ], + "text/plain": [ + " a b c d\n", + "0 $\\delta$ l 0.603 0.545\n", + "1 x m 0.438 0.892\n", + "2 y n 0.792 0.529" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "np.random.seed(0) \n", + "df = pd.DataFrame(np.random.rand(3,4),columns=['a','b','c','d'])\n", + "df.a = ['$\\delta$','x','y']\n", + "df.b = ['l','m','n']\n", + "df.set_index(['a','b'])\n", + "df.round(3)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Equations (with ipython or sympy)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "ipub": { + "equation": { + "label": "eqn:example_ipy" + } + } + }, + "outputs": [ + { + "data": { + "text/latex": [ + "$$ a = b+c $$" + ], + "text/plain": [ + "<IPython.core.display.Latex object>" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "Latex('$$ a = b+c $$')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "ipub": { + "caption": "code:example_sym" + } + }, + "source": [ + "The plotting code for a sympy equation (=@eqn:example_sympy)." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "ipub": { + "code": { + "asfloat": true, + "caption": "", + "label": "code:example_sym", + "placement": "H", + "widefigure": false + }, + "equation": { + "environment": "equation", + "label": "eqn:example_sympy" + } + } + }, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAa8AAAA/CAYAAABXekf2AAAACXBIWXMAAA7EAAAOxAGVKw4bAAAT2ElEQVR4Ae2d7ZEdtRKG11sEYEwEFzLAJgL7ZgDcCMAZQPkf/yjIwBCBgQyACDBkACG4NoO976OdPsyZnQ9ppNFozraq5sxXq9V6W63W1+g8ur29vfJQHoFvvvnmtbj+rfP35bk7xzEEhPVjPf9dx3Nd34zR+DNHIAYBt98YlMrSpNrvddnknRsISAlf6fRMZ3dcFYuE8MZhvdHxc8VkPakLQ8Dtdx+FptqvO6/CepICXojldzo+K8za2UUgIPxDg0FndODBEUhCwO03Ca7ixCn2686rIPwCnmErWv0vdf1PQdbOKg0BGg5fSgefpkVz6oeMgNtvM9qPst9HPudVTmEq/MxzMVz4tBzXy+QkjD5Wzv6MzZ3oH8XSQid6HNePOv6ja5//AhQPswionLj9ziL070thtbv9vvevOH6Vg0CnzC/Fwx1XHJAM6z0Vbn/FkadRie8vOkiD42VabKd+aAi4/SZrfHf79WHDZJ1NRqCVT4W5SWU8meoBX3QVxVUFrL4WPAwffnhAmFzkugi4/Ubi3Yr9uvOKVNgcmZTJEBXdaCpLD8sIvBIJLbcQzBjsnrOePdaR5XQU/xexojHBcJAHR2AUAZUTt99RZCYfNmG/Pmw4qZ+kF1TEv8kIkhZpiB6HxwIPhs+ampeRPDgOc8bPdP2Oez3P6ll2fD/U+Tfxs/C77lnsYry5JkwOwYqezxE+0nlpSPBb0f0suo91GH94e3AEDAG3X0Ni4Swbol5own7deS0oa+k1lSLK1GEV/WwU0VMxM0SBM8ApZPUuFL946Aroa53/a8x1jYH/yTMdfcdjJLFncBpiBRYEsKQBQI/pW6Uz59BxWtDNBvFgKBc+0C85ulle/vLyEFDZcPtNU2sz9uvOK01xY9R0oa+oJMdeDp91FWn4BkzX9B4wntYCjuqsopes9LpYkEJP8f01Ais+jpvVmGe89ewvPUv6Lk70HyXI8JNokX2YbgILJ71QBNx+IxXbmv1eR8p9MWRSwHc6mMT/imvLmK6pWJNCF4fx8h+SIrZP/EIisrXVEBN6XDlzUeD97Q7ZDztuKD84MA8HRkA6dPtd1t+DsN9Z56WCwtARlXMzQfJQeTJ8NaxYF2VUnL9F9EbnH3SwEwNjt/R+CKEFdncZ/ft5R/k6OsYxCHFS/wibmwlx12BPnBfiGdVDnUh31WOlSX7Iy6F6XpK7OftbpYBeJOXJ7beHx0aXD8J+J4cNMRwB+0Tn6pXNnEIlz42OL0SDA4te6CBa8sPwVH/S/lc9YzcMnnGdGmz4r88zlUdz9MJjaggvDHEOMIyVn8bBqac7jCSe9IpwcB/oYB6QOa8zXHXPe+gYMkT/KT3eMHSoODRYkhbWKK3qQTI2aX+5QChfbr+5IC7EF8YPwn5He17KPBUEcxNTICzAt+1ryUWlRkXIwofYQC+JCqEfqMSoKNcuQqB7Tivn4oMwn5zY1rtPdYDFXIBmytnglH7S++912IQwzmnI8xU0omUYcNIRTghhjZMhzwny/R4rj03bXy4yyp/bby6IifGF+cXZ7z3n1WWSiuF5Ij5VySUnFSFDELHDmlSQwxb3O3joSJ6HUbpWCZ71DsTrUgMOg5V7OI9T0D3DrjQiJp1JRzNsOPR50Hi4sQe6Rk80Ck5x9Azj+6OjoVE11GX3avJkejqtoJyk3PFFl8/m7S8XIuXT7TcXxLT4F2e/95yX8KAiYk7oVJmkYVSVmkrsR8mKA1oKVF70svrhf9yQ10ge/bjWK7UWff/dRV0LG5wIc2CW55A/3eNQqIQ4+I6K+7HA0OyZ0xsjGjwLvWLFM52Rvg1h04t+M6CfvVVc+FGmrdExS7/jyyPZXy5Mbr+5CEbEV9m/SPt9r593ZZJeDBVQ0V6X+MITR0ErPSoozuJGrKLB6TCXgcGfVawjifCelUrIQkXGfB7Lv+m9IRfPrHLU5WJ41lG8XaQ8MIGwYQgLrO71WPQs9GZ0pucKhq90nOmhi3/qQen9WdB7nD/8pz5IDg0TvQ+NKZ0pozzDYaYGdMWiEXQe+KUy2JK+y1tx+8uVWXK5/eaCuFN86e5i7ffMeQlfhiuYdyht2PDFUTCfUTrAm2XdsxPxeo9zOqtYEUTP165AC05Q8UtjhVhNBOUNR8EuFifcdB16QjqDZwi6phGBM/mS9/13eobOpxwT8WkEvONiEJ5wL1423GevcZDhw2O9S3VCOEp6XhwpDRWRVwlb2V+u8G6/uQjuEF/2cdH2e22YKqNUxlRMyfM/xmPs3PG90nlYCY2RJz8TXypR5kfWOqE1aYYKXBFPFXgyk8YjdHr7ROdhgwODGHM2VHCEE73iQms7XISXIz8MUY99cIyDOVsMIzp6XJRT68nhyFKC6eteLzKFyRa0ytsm9pcrayfXlc5uv7lgVozf6e2i7ffkvIQrFQH785XuScDXKrat1MdkJN3jWoGKhmCV4d1d+i/LwglP7k5t/KoM4JzBlJ4N3xqdDj1j/upeGdEza0TQ+8LJEND9UmMo8A7U3Y/i2/DyqcfXvQqNBr2nnHLNd3spwfQV+KRErEC7lf3liu72O42g268akrLF132Iatlvf9iQVnLR3ktXwTCMNGxBM7l/1pLTPRUecx9WwfTxWLqGPxUty7FrDAdZ5ZdaeYZ8SEYcA4HeBYGNY8n3rzqvmcsJTAr+MLxGHscaBGd6G6RJI4U84cDIz1ud7zm6fhzodDAXaQaAI6dnd+9PJEXDd3r05IJcOqdiZWXL9NcXZe/r4vaXmyHhC05uvwMghYvbb4eJsNjNfoPzkgDWkzhzMgOdrbllCOk0jNRjkLSLuOSjJT65g3gHIBUTw0E1nJcNc1ll2Mva8qXkHfYoliNVpJB8lr+kVBWPHhHOjdY62ETlU3GgjWo4iTaKp/jdC4rL3BzPm3Jekmkr+7uHQeIDt98RwHLK4Ai74o8k34Ow3+sOuWeclelVlfEY+uJFT4oPncccIi1rWuQYLXQ4nLndMqjYZlvwek86IR86bx2s8iuG19YCV+TPMCE6pUXWIj5Bpq58VoRlNqni9jebWsRLt98IkC6T5DD2G3pe0gE9lrnhoDVqYggJIMZC0i7iMqSYlgRDeGPDXGPp5z5jaIuw5FDvqB7Qr3TFsB5laazH3QISpjN0aNer5FI+GSJlWC11+HKY3hb2N0wj9d7tNxWxTPqC5Wm1JJLhMPZrPS+MsFgrWQDQ8uZ7mhpDeKYoa1HbEIw93+JsPS96kB4GCEjv9KKLlacB+9xb05npMIcf5ZwjNxS1v1xh3H5zEVwdv1R5Wi0AEY9iv+a8AK1kZVNjhdJQQSZ/iUppyHt4HyosKflm+MLvm0fAdBZ02Ii0yGLltwWR3H5b0ILLMIbAyX5t2BCiqJVzqrBZFcXE99hcliXGqr/ZoT69Z4gPo2W5KQ7nbBdxvecdNPCJ2UHcjN+G9BTNgyNwGASi7C83N7Irt99cED1+EwhcqzBbT8WGUyYFEy2r/tiKafK7rY7Glj1P8cIxLe0i/kq8oncQF+3JI08l6s8dASFg5byJRo7KbbT95Wqvs02331wgPf6eCJzs91pS4EgWgwo+c0lMTHPwndbU3NLiJqyKO7uLeMf7j04olkZbr6p7NHuyDwdnida+lGxReK3l7/EeHAJVylNnU26/br8XY2AMG1oL1Houo5lT4f+LFzqzgnDVJqyjjP99iINikQctUZZZh/R0/bmOqVWLenUvVKkMlOosXvekKvhA2JDH33Wk5PWzHqYFpTkcK9NbNHbCjZEEFlUMQ7AdvX85fKH72BW1sfaXpXPTvc5uv3fKsnIworptH21cnrYVfn/uprfHOC/rhkUZs4BnvosWHLsosEy43ytiefTTufyJnt0bZncRN546Mz6PXKR3+KD83MZmQrSTu+rrHQqcxTk2nSFdiozDuHvez+GVK5d4jzmnKz2nfGIDqX/30hcpyv6URhGdw0eH229fA5HXwq2U/W5WnlJkjMx2FTLJPVnfTQnQX7AxRTP2nDkvFlPgrIIilDiGzDcCGNlceKaXZrB9OmvFWo+Ld6x6CjzF93EEb+JUmfgmodQg+ZMVlJpGLv0RZMzNo8cPc9Zuv4kF4Qi2cQQZE2GfJGfOy5yNDV9MEtsLAURvi9WG9L4ed89xNAxJLIWoXcQ7vh+LmS3+gH9MsPzE0CbTSC7jb/lO5uERdkXA9GZ63FUYJW5yRNtfrsBuvwFBKwe5cHr8ugiY3m5wXtYLsoexotiKQxwYva63vYp9jkfsLsTMfV2JZ9QO4qIL9Ipi+ZmTwd85Aq0gYOU11f5y5Xf7zUXQ4++KAEvlbyQBx+x3WUMpcSp6xhAfPSIOMwZdTgfFo9cWdhHXNY6MHZpJ+2wXcT2HN0OGDG+wkGNp3suMn7x4cASmELAejjmNKboqz1WuV9lfrnBK1+03F0SPvwcCJ/u1OS8civVcUgRimBDnk7QJqwyH9EYnLfuJiy5lB3GT/22fx0bXVDjmLDdK4vhspb/Sf31zfFDGc7DW/sa5xT91+43H6sFRtm6/5ryo8FmSnhSUOXpG9JBa2IT1E8mBE63R86LVzgKS2EUkZ7gqHo7WMLMFLF93WJ7RHvwm6a9vKuXVGh04jNxAWStR3lbZX67wKm9uvyvqiw3tt1R5yi0aFr9p+zXn9aekDYsvpJgkYxT9Jku2Db2EM4s7cKQ1grWUcUJJaXYFn+HS01/R65ohV7bA4uPtuW23auStZBo4eQK6ATM2amYbsKQypjglg/XQs2UoqKvV9pcLjPLg9psAovCi/GxivwXLU0KOZkmbtl9zXj+hEB0vdFDBHDEg++JQZKGMWavdxl9T2OKozuRUoaXXxdweQ7DvpzBrnDb2Q92a2Qg6E96mw5ppT6V1CfY3lbfY526/sUjVo2vafq/BQYZMK5QW/6k3wPOjBMlPwSfU6rXYt2TWir9LPe4XWf+WzI8H5MjOMOQangNWfjuDALhn97pm+Ce/Orr9JWd4EMHtdwCI384hcLLf4Lw6Slr9yfNec6lUfBf+zE9GUKs1bZVf0grNDg+c1NzcHMrxsAECvYaBDYdskMpqlke2v9WZ7iK6/eYi+ADiD+33vV6ew9AFrSAdtXowveSzLhly+yKLQ1pkc5LJvSRhO7WCknmhK71PmkNLE7s+tfKDbnDIH+gAr7O/vtF9zWD6ahHjI9tfrg7dfnMR3Ch+y/Z7bXmWkPQmvtcR9b2Wxdv7LLnZJPhK55pzdawOI1hleHe38ley47jgZSsQV3JqLhpOa+mvb2oKbfr6o2aiMWmpDBzS/mLyNkejfLv9zgG077um7ffkvMBIBYnKk41GQy9gX9yiU+cD6Zq9LnCioqH3VQonhoxYtkzj4WKC8jP71zc7ZNRW1jU5siC8jmh/uWp0+81FcKP4rdvvmfPqMMCA+MO65oPApdXG/FHNXpfhEipApW2teXuedFZ8VnmSh6nhxCR+ByDG6dNAysJtZT75pu5Kabc4bGhZOoz9mcBrz9KD2+9a8PaL14z93nNeKlBhGyadGYduNkg+Kj9abXtV+nybQ7CVjnd3Cb8dxvw9DBPWFxWUp191GEZjeWNIonagp2zzlbXTjkpPmB3C/qIyM0OkfLr9zuCz96sj2O8959WB9lxnvj2igDUXJBcVH/8L9oWu96qMbOjJhqKScJLcbGb8kc4n56vrvXokSbJHEtPLGXNQT4ivvFbt/Sg9G+Ldo5ceCdmJrGn7O0m58kK6cPtdiV3FaM3b76jzUuG6EUj0Bn7uClpFzKKSYo6IzX13q4iUNk4TnJJ7XopLRfqJzgwR9QMO7V3/wYGvo/76pmL+TE9vKqa5KimVi9btb1W+epHcfntgNHrZvP2OOi/AlAFROdMraKr3JbloteG4lnaZF9nmARnoLSFTVBAteGK8fJAc/h7Gznr2UtdUXJcQQt76GVHewsoyPTv1NvvvN76mMXYjGar2+NbmSXI2aX9r82PxlC+3XwOj7XPz9tv/zuselJ0B3Xu+5wPJROVuQ3Z7ikLaLLagQmZ+MHalIMOdOLCxOcVDVKySfTFQdnSEv77piBkupFd59tc3i4wKEEgOKkx6Xrv11NdkAwzXxGs5jvLk9tuygjrZKHs6mrbfR7e3tweAsl0RpeCwKEHnVXNf7ebsciSTbmgo0NB4quuLaSBcjob2y4nb737Yx6Y8Zb+Tw4axjJ0uVIr8b1VTw6uulzME2AiZlqQ7rjNY/EYI0Khx+227KIzarzuvfKWxrQ/hbKf4u0f+uzcCXaOCBTKH2jlmb9weUPpuvw0re85+3XllKk7gMobPwo2xOaxM7h69AAKhUSE9tbDAp0B2nEVJBNx+S6K5Ca9J+3XnVQZvlryzetBW05Xh6lyyEJA+WKiBToafJGTx9cgXh4Dbb4MqXbJfd14FlNa13jAAdvzw0A4CDBUy1xW7ErQdyV2Sagi4/VaDOjWhWft155UK5wR9V0G+09l7XxMY1XwsPXyo9BjK9bnImsAfNC2337YUF2O/7rzK6oyKkm8jqDg97IsArTZ26m/lm8B90fDUYxBw+41BqQ7Nov268yqoiK6iZIiKHTQ87ISA9ECPi4+Sq/5Vzk7Z9WQLIeD2WwjITDax9usfKWcCPRZd4LOLBruq+1zLGEAbPhPm9Hr5cPy5rv27rg2xvlTWbr/7aTbFfr3ntY2e2LuPfQptJ/NtUnGuYwjQ6+UfEdxxjaHjz2IQcPuNQWkbmmj7dee1gQJUcfLt18X9R9cGUBVlKdxZGo/j8m+6iiL7sJi5/e6j71T7/T9XOwttR1rR7gAAAABJRU5ErkJggg==\n", + "text/latex": [ + "$\\displaystyle \\left(\\sqrt{5} i\\right)^{\\alpha} \\left(\\frac{1}{2} - \\frac{2 \\sqrt{5} i}{5}\\right) + \\left(- \\sqrt{5} i\\right)^{\\alpha} \\left(\\frac{1}{2} + \\frac{2 \\sqrt{5} i}{5}\\right)$" + ], + "text/plain": [ + " \\alpha ⎛1 2⋅√5⋅ⅈ⎞ \\alpha ⎛1 2⋅√5⋅ⅈ⎞\n", + "(√5⋅ⅈ) ⋅⎜─ - ──────⎟ + (-√5⋅ⅈ) ⋅⎜─ + ──────⎟\n", + " ⎝2 5 ⎠ ⎝2 5 ⎠" + ] + }, + "execution_count": 5, + "metadata": { + "filenames": { + "image/png": "/private/var/folders/_w/bsp9j6414gs4gdlnhhcnqm9c0000gn/T/pytest-of-matthewmckay/pytest-37/test_complex_outputs_unrun_cac0/source/_build/jupyter_execute/complex_outputs_unrun_22_0.png" + } + }, + "output_type": "execute_result" + } + ], + "source": [ + "y = sym.Function('y')\n", + "n = sym.symbols(r'\\alpha')\n", + "f = y(n)-2*y(n-1/sym.pi)-5*y(n-2)\n", + "sym.rsolve(f,y(n),[1,4])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Interactive outputs\n", + "\n", + "## ipywidgets" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "1337h4x0R", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Layout()" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import ipywidgets as widgets\n", + "widgets.Layout(model_id=\"1337h4x0R\")" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "**_some_ markdown**" + ], + "text/plain": [ + "<IPython.core.display.Markdown object>" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from IPython.display import display, Markdown\n", + "display(Markdown('**_some_ markdown**'))" + ] + } + ], + "metadata": { + "celltoolbar": "Edit Metadata", + "hide_input": false, + "ipub": { + "bibliography": "example.bib", + "biboptions": [ + "super", + "sort" + ], + "bibstyle": "unsrtnat", + "language": "portuges", + "listcode": true, + "listfigures": true, + "listtables": true, + "pandoc": { + "at_notation": true, + "use_numref": true + }, + "sphinx": { + "bib_title": "My Bibliography" + }, + "titlepage": { + "author": "Authors Name", + "email": "authors@email.com", + "institution": [ + "Institution1", + "Institution2" + ], + "logo": "logo_example.png", + "subtitle": "Sub-Title", + "supervisors": [ + "First Supervisor", + "Second Supervisor" + ], + "tagline": "A tagline for the report.", + "title": "Main-Title" + }, + "toc": { + "depth": 2 + } + }, + "jupytext": { + "notebook_metadata_filter": "ipub" + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + }, + "latex_envs": { + "LaTeX_envs_menu_present": true, + "autocomplete": true, + "bibliofile": "example.bib", + "cite_by": "apalike", + "current_citInitial": 1, + "eqLabelWithNumbers": true, + "eqNumInitial": 1, + "hotkeys": { + "equation": "Ctrl-E", + "itemize": "Ctrl-I" + }, + "labels_anchors": false, + "latex_user_defs": false, + "report_style_numbering": false, + "user_envs_cfg": true + }, + "nav_menu": {}, + "toc": { + "colors": { + "hover_highlight": "#DAA520", + "navigate_num": "#000000", + "navigate_text": "#333333", + "running_highlight": "#FF0000", + "selected_highlight": "#FFD700", + "sidebar_border": "#EEEEEE", + "wrapper_background": "#FFFFFF" + }, + "moveMenuLeft": true, + "nav_menu": { + "height": "161px", + "width": "252px" + }, + "navigate_menu": true, + "number_sections": true, + "sideBar": true, + "threshold": 4, + "toc_cell": false, + "toc_section_display": "block", + "toc_window_display": true, + "widenNotebook": false + }, + "varInspector": { + "cols": { + "lenName": 16, + "lenType": 16, + "lenVar": 40 + }, + "kernels_config": { + "python": { + "delete_cmd_postfix": "", + "delete_cmd_prefix": "del ", + "library": "var_list.py", + "varRefreshCmd": "print(var_dic_list())" + }, + "r": { + "delete_cmd_postfix": ") ", + "delete_cmd_prefix": "rm(", + "library": "var_list.r", + "varRefreshCmd": "cat(var_dic_list()) " + } + }, + "types_to_exclude": [ + "module", + "function", + "builtin_function_or_method", + "instance", + "_Feature" + ], + "window_display": false + }, + "widgets": { + "application/vnd.jupyter.widget-state+json": { + "state": { + "1337h4x0R": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + } + }, + "version_major": 2, + "version_minor": 0 + } + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/tests/test_sphinx_builds.py b/tests/test_sphinx_builds.py index d2c76206..1cbef807 100644 --- a/tests/test_sphinx_builds.py +++ b/tests/test_sphinx_builds.py @@ -1,4 +1,5 @@ """Test full sphinx builds.""" +import bs4 import pytest @@ -52,3 +53,19 @@ def test_complex_outputs_run(sphinx_run, file_regression): extension=".resolved.xml", encoding="utf8", ) + + +@pytest.mark.sphinx_params( + "ipywidgets.ipynb", + conf={"extensions": ["myst_nb.new.sphinx_"], "nb_execution_mode": "off"}, +) +def test_ipywidgets(sphinx_run): + """Test that ipywidget state is extracted and JS is included in the HTML head.""" + sphinx_run.build() + # print(sphinx_run.status()) + assert sphinx_run.warnings() == "" + assert "__mystnb__ipywidgets_state" in sphinx_run.env.metadata["ipywidgets"] + html = bs4.BeautifulSoup(sphinx_run.get_html(), "html.parser") + head_scripts = html.select("head > script") + assert any("require.js" in script.get("src", "") for script in head_scripts) + assert any("embed-amd.js" in script.get("src", "") for script in head_scripts) From 478b5500ebc9712f44010edface29a51380f5a44 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Wed, 5 Jan 2022 04:49:13 +0100 Subject: [PATCH 22/75] Successfully build the docs (with warnings) --- docs/api/index.rst | 2 +- docs/conf.py | 79 ++++++--------- docs/use/config-reference.md | 2 +- docs/use/execute.md | 24 ++--- docs/use/formatting_outputs.md | 2 +- docs/use/markdown.md | 2 +- myst_nb/__init__.py | 26 ++++- myst_nb/configuration.py | 16 ++- myst_nb/docutils_.py | 1 - myst_nb/nb_glue/__init__.py | 2 +- myst_nb/new/execute.py | 16 ++- myst_nb/new/execution_tables.py | 168 ++++++++++++++++++++++++++++++++ myst_nb/new/loggers.py | 6 +- myst_nb/new/sphinx_.py | 75 +++++++++++--- tests/test_sphinx_builds.py | 20 +--- 15 files changed, 337 insertions(+), 104 deletions(-) create mode 100644 myst_nb/new/execution_tables.py diff --git a/docs/api/index.rst b/docs/api/index.rst index a4a1fcd8..fd4e3ef8 100644 --- a/docs/api/index.rst +++ b/docs/api/index.rst @@ -12,7 +12,7 @@ Python API Miscellaneous ------------- -.. autoclass:: myst_nb.ansi_lexer.AnsiColorLexer +.. autoclass:: myst_nb.lexers.AnsiColorLexer :members: :undoc-members: :show-inheritance: diff --git a/docs/conf.py b/docs/conf.py index 6ddc3d55..ad1d1901 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,25 +1,12 @@ # Configuration file for the Sphinx documentation builder. -# -# This file only contains a selection of the most common options. For a full -# list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html -# -- Path setup -------------------------------------------------------------- - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# import os -# import sys -# sys.path.insert(0, os.path.abspath('.')) - - # -- Project information ----------------------------------------------------- project = "MyST-NB" -copyright = "2020, Executable Book Project" +copyright = "2022, Executable Book Project" author = "Executable Book Project" master_doc = "index" @@ -38,32 +25,23 @@ "sphinx.ext.viewcode", ] -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "**.ipynb_checkpoints"] +myst_enable_extensions = [ + "amsmath", + "colon_fence", + "deflist", + "dollarmath", + "html_image", +] -# -- Options for HTML output ------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -html_title = "" -html_theme = "sphinx_book_theme" -html_logo = "_static/logo-wide.svg" -html_favicon = "_static/logo-square.svg" -html_theme_options = { - "github_url": "https://github.com/executablebooks/myst-nb", - "repository_url": "https://github.com/executablebooks/myst-nb", - "repository_branch": "master", - "use_edit_page_button": True, - "path_to_docs": "docs/", - "show_navbar_depth": 2, -} +nb_custom_formats = {".Rmd": ["jupytext.reads", {"fmt": "Rmd"}]} +nb_execution_mode = "cache" +nb_execution_show_tb = "READTHEDOCS" in os.environ +nb_execution_timeout = 60 # Note: 30 was timing out on RTD intersphinx_mapping = { "python": ("https://docs.python.org/3.8", None), @@ -74,7 +52,6 @@ "nbformat": ("https://nbformat.readthedocs.io/en/latest", None), "sphinx": ("https://www.sphinx-doc.org/en/master", None), } - intersphinx_cache_limit = 5 nitpick_ignore = [ @@ -86,6 +63,24 @@ ("py:class", "pygments.lexer.RegexLexer"), ] +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_title = "" +html_theme = "sphinx_book_theme" +html_logo = "_static/logo-wide.svg" +html_favicon = "_static/logo-square.svg" +html_theme_options = { + "github_url": "https://github.com/executablebooks/myst-nb", + "repository_url": "https://github.com/executablebooks/myst-nb", + "repository_branch": "master", + "use_edit_page_button": True, + "path_to_docs": "docs/", + "show_navbar_depth": 2, +} + # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". @@ -93,22 +88,10 @@ copybutton_selector = "div:not(.output) > div.highlight pre" -nb_custom_formats = {".Rmd": ["jupytext.reads", {"fmt": "Rmd"}]} -jupyter_execute_notebooks = "cache" -execution_show_tb = "READTHEDOCS" in os.environ -execution_timeout = 60 # Note: 30 was timing out on RTD - -myst_enable_extensions = [ - "amsmath", - "colon_fence", - "deflist", - "dollarmath", - "html_image", -] panels_add_bootstrap_css = False -def setup(app): +def setup(_): import subprocess # this is required to register the coconut kernel with Jupyter, diff --git a/docs/use/config-reference.md b/docs/use/config-reference.md index aa0b7361..197d7b41 100644 --- a/docs/use/config-reference.md +++ b/docs/use/config-reference.md @@ -26,7 +26,7 @@ This configuration is used to control how Jupyter Notebooks are executed at buil * - `execution_excludepatterns` - () - Exclude certain file patterns from execution, [see here](execute/config) for details. -* - `jupyter_execute_notebooks` +* - `nb_execution_mode` - "auto" - The logic for executing notebooks, [see here](execute/config) for details. * - `execution_in_temp` diff --git a/docs/use/execute.md b/docs/use/execute.md index 276a2101..86120347 100644 --- a/docs/use/execute.md +++ b/docs/use/execute.md @@ -28,7 +28,7 @@ See the sections below for each configuration option and its effect. To trigger the execution of notebook pages, use the following configuration in `conf.py`: ```python -jupyter_execute_notebooks = "auto" +nb_execution_mode = "auto" ``` By default, this will only execute notebooks that are missing at least one output. @@ -37,13 +37,13 @@ If a notebook has *all* of its outputs populated, then it will not be executed. **To force the execution of all notebooks, regardless of their outputs**, change the above configuration value to: ```python -jupyter_execute_notebooks = "force" +nb_execution_mode = "force" ``` **To cache execution outputs with [jupyter-cache]**, change the above configuration value to: ```python -jupyter_execute_notebooks = "cache" +nb_execution_mode = "cache" ``` See {ref}`execute/cache` for more information. @@ -51,16 +51,16 @@ See {ref}`execute/cache` for more information. **To turn off notebook execution**, change the above configuration value to: ```python -jupyter_execute_notebooks = "off" +nb_execution_mode = "off" ``` **To exclude certain file patterns from execution**, use the following configuration: ```python -execution_excludepatterns = ['list', 'of', '*patterns'] +nb_execution_excludepatterns = ['list', 'of', '*patterns'] ``` -Any file that matches one of the items in `execution_excludepatterns` will not be executed. +Any file that matches one of the items in `nb_execution_excludepatterns` will not be executed. (execute/cache)= ## Cache execution outputs @@ -68,7 +68,7 @@ Any file that matches one of the items in `execution_excludepatterns` will not b As mentioned above, you can **cache the results of executing a notebook page** by setting: ```python -jupyter_execute_notebooks = "cache" +nb_execution_mode = "cache" ``` in your conf.py file. @@ -89,7 +89,7 @@ Generally, this is in `_build/.jupyter_cache`. You may also specify a path to the location of a jupyter cache you'd like to use: ```python -jupyter_cache = "path/to/mycache" +nb_execution_cache_path = "path/to/mycache" ``` The path should point to an **empty folder**, or a folder where a **jupyter cache already exists**. @@ -99,14 +99,14 @@ The path should point to an **empty folder**, or a folder where a **jupyter cach ## Executing in temporary folders By default, the command working directory (cwd) that a notebook runs in will be the directory it is located in. -However, you can set `execution_in_temp=True` in your `conf.py`, to change this behaviour such that, for each execution, a temporary directory will be created and used as the cwd. +However, you can set `nb_execution_in_temp=True` in your `conf.py`, to change this behaviour such that, for each execution, a temporary directory will be created and used as the cwd. (execute/timeout)= ## Execution Timeout The execution of notebooks is managed by {doc}`nbclient <nbclient:client>`. -The `execution_timeout` sphinx option defines the maximum time (in seconds) each notebook cell is allowed to run. +The `nb_execution_timeout` sphinx option defines the maximum time (in seconds) each notebook cell is allowed to run. If the execution takes longer an exception will be raised. The default is 30 s, so in cases of long-running cells you may want to specify an higher value. The timeout option can also be set to `None` or -1 to remove any restriction on execution time. @@ -128,7 +128,7 @@ This global value can also be overridden per notebook by adding this to you note In some cases, you may want to intentionally show code that doesn't work (e.g., to show the error message). You can achieve this at "three levels": -Globally, by setting `execution_allow_errors=True` in your `conf.py`. +Globally, by setting `nb_execution_allow_errors=True` in your `conf.py`. Per notebook (overrides global), by adding this to you notebooks metadata: @@ -164,7 +164,7 @@ print(thisvariabledoesntexist) (execute/statistics)= ## Execution statistics -As notebooks are executed, certain statistics are stored in a dictionary (`{docname:data}`), and saved on the [sphinx environment object](https://www.sphinx-doc.org/en/master/extdev/envapi.html#sphinx.environment.BuildEnvironment) as `env.nb_execution_data`. +As notebooks are executed, certain statistics are stored in a dictionary, and saved on the [sphinx environment object](https://www.sphinx-doc.org/en/master/extdev/envapi.html#sphinx.environment.BuildEnvironment) in `env.metadata[docname]`. You can access this in a post-transform in your own sphinx extensions, or use the built-in `nb-exec-table` directive: diff --git a/docs/use/formatting_outputs.md b/docs/use/formatting_outputs.md index f882967b..8ee938a6 100644 --- a/docs/use/formatting_outputs.md +++ b/docs/use/formatting_outputs.md @@ -184,7 +184,7 @@ print("AB\x1b[43mCD\x1b[35mEF\x1b[1mGH\x1b[4mIJ\x1b[7m" "KL\x1b[49mMN\x1b[39mOP\x1b[22mQR\x1b[24mST\x1b[27mUV") ``` -This uses the built-in {py:class}`~myst_nb.ansi_lexer.AnsiColorLexer` [pygments lexer](https://pygments.org/). +This uses the built-in {py:class}`~myst_nb.lexers.AnsiColorLexer` [pygments lexer](https://pygments.org/). You can change the lexer used in the `conf.py`, for example to turn off lexing: ```python diff --git a/docs/use/markdown.md b/docs/use/markdown.md index c4442abf..0e904d56 100644 --- a/docs/use/markdown.md +++ b/docs/use/markdown.md @@ -30,7 +30,7 @@ When used with Sphinx, MyST Notebooks are also integrated directly into the {ref}`Execution and Caching <execute/cache>` machinery! [^download]: This notebook can be downloaded as - **{nb-download}`markdown.py`** and {download}`markdown.md` + **{nb-download}`markdown.ipynb`** and {download}`markdown.md` ## The MyST Notebook Structure diff --git a/myst_nb/__init__.py b/myst_nb/__init__.py index 1ac7dd1c..5d44742f 100644 --- a/myst_nb/__init__.py +++ b/myst_nb/__init__.py @@ -1,10 +1,32 @@ """A docutils/sphinx parser for Jupyter Notebooks.""" -__version__ = "0.13.1" +__version__ = "0.14.0" def setup(app): """Sphinx extension setup.""" # we import this locally, so sphinx is not automatically imported - from .extension import sphinx_setup + from .new.sphinx_ import sphinx_setup return sphinx_setup(app) + + +def glue(name: str, variable, display: bool = True) -> None: + """Glue a variable into the notebook's cell metadata. + + Parameters + ---------- + name: string + A unique name for the variable. You can use this name to refer to the variable + later on. + variable: Python object + A variable in Python for which you'd like to store its display value. This is + not quite the same as storing the object itself - the stored information is + what is *displayed* when you print or show the object in a Jupyter Notebook. + display: bool + Display the object you are gluing. This is helpful in sanity-checking the + state of the object at glue-time. + """ + # we import this locally, so IPython is not automatically imported + from myst_nb.nb_glue import glue + + return glue(name, variable, display) diff --git a/myst_nb/configuration.py b/myst_nb/configuration.py index b51ffb01..14c726d0 100644 --- a/myst_nb/configuration.py +++ b/myst_nb/configuration.py @@ -158,8 +158,11 @@ class NbParserConfig: these option names are prepended with ``nb_`` """ - # TODO: nb_render_key, execution_show_tb, execution_excludepatterns - # jupyter_sphinx_require_url, jupyter_sphinx_embed_url + # TODO: nb_render_key + + # TODO jupyter_sphinx_require_url, jupyter_sphinx_embed_url, + # are no longer used by this package, replaced by ipywidgets_js + # do we add any deprecation warnings? # TODO mark which config are allowed per notebook/cell @@ -210,6 +213,15 @@ class NbParserConfig: "legacy_name": "jupyter_cache", }, ) + execution_excludepatterns: Sequence[str] = attr.ib( + default=(), + validator=deep_iterable(instance_of(str)), + metadata={ + "help": "Exclude patterns for notebooks", + "legacy_name": "execution_excludepatterns", + "docutils_exclude": True, + }, + ) execution_timeout: int = attr.ib( default=30, validator=instance_of(int), diff --git a/myst_nb/docutils_.py b/myst_nb/docutils_.py index bb1a79cb..4fa7fa3b 100644 --- a/myst_nb/docutils_.py +++ b/myst_nb/docutils_.py @@ -166,7 +166,6 @@ def render_nb_metadata(self, token: SyntaxTreeNode) -> None: # TODO how to handle ipywidgets in docutils? ipywidgets = metadata.pop("widgets", None) # noqa: F841 # ipywidgets_mime = (ipywidgets or {}).get(WIDGET_STATE_MIMETYPE, {}) - # ipywidgets_state = ipywidgets_mime.get("state", None) # forward the rest to the front_matter renderer self.render_front_matter( diff --git a/myst_nb/nb_glue/__init__.py b/myst_nb/nb_glue/__init__.py index 2f361f65..f6ed5efd 100644 --- a/myst_nb/nb_glue/__init__.py +++ b/myst_nb/nb_glue/__init__.py @@ -4,7 +4,7 @@ GLUE_PREFIX = "application/papermill.record/" -def glue(name, variable, display=True): +def glue(name: str, variable, display: bool = True) -> None: """Glue a variable into the notebook's cell metadata. Parameters diff --git a/myst_nb/new/execute.py b/myst_nb/new/execute.py index 2c11fb5b..79669e2a 100644 --- a/myst_nb/new/execute.py +++ b/myst_nb/new/execute.py @@ -28,7 +28,10 @@ class ExecutionResult(TypedDict): """method used to execute the notebook""" succeeded: bool """True if the notebook executed successfully""" - # TODO error + error: Optional[str] + """error type if the notebook failed to execute""" + traceback: Optional[str] + """traceback if the notebook failed""" def update_notebook( @@ -48,11 +51,14 @@ def update_notebook( :returns: The updated notebook, and the (optional) execution metadata. """ - # path should only be None when using docutils programmatically + # path should only be None when using docutils programmatically, + # e.g. source="<string>" path = Path(source) if Path(source).is_file() else None exec_metadata: Optional[ExecutionResult] = None + # TODO deal with nb_config.execution_excludepatterns + if nb_config.execution_mode == "force": # setup the execution current working directory @@ -88,6 +94,8 @@ def update_notebook( "runtime": result.time, "method": nb_config.execution_mode, "succeeded": False if result.err else True, + "error": f"{result.err.__class__.__name__}" if result.err else None, + "traceback": result.exc_string if result.err else None, } elif nb_config.execution_mode == "cache": @@ -110,6 +118,8 @@ def update_notebook( "runtime": cache_record.data.get("execution_seconds", None), "method": nb_config.execution_mode, "succeeded": True, + "error": None, + "traceback": None, } return notebook, exec_metadata @@ -161,6 +171,8 @@ def update_notebook( "runtime": result.time, "method": nb_config.execution_mode, "succeeded": False if result.err else True, + "error": f"{result.err.__class__.__name__}" if result.err else None, + "traceback": result.exc_string if result.err else None, } return notebook, exec_metadata diff --git a/myst_nb/new/execution_tables.py b/myst_nb/new/execution_tables.py new file mode 100644 index 00000000..1c33d408 --- /dev/null +++ b/myst_nb/new/execution_tables.py @@ -0,0 +1,168 @@ +"""Sphinx elements to create tables of statistics on executed notebooks. + +The `nb-exec-table` directive adds a placeholder node to the document, +which is then replaced by a table of statistics in a post-transformation +(once all the documents have been executed and these statistics are available). +""" +import posixpath +from datetime import datetime +from typing import Any, Dict + +from docutils import nodes +from sphinx.addnodes import pending_xref +from sphinx.application import Sphinx +from sphinx.transforms.post_transforms import SphinxPostTransform +from sphinx.util import logging +from sphinx.util.docutils import SphinxDirective + +SPHINX_LOGGER = logging.getLogger(__name__) + + +def setup_exec_table_extension(app: Sphinx) -> None: + """Add the Sphinx extension to the Sphinx application.""" + app.add_node(ExecutionStatsNode) + app.add_directive("nb-exec-table", ExecutionStatsTable) + app.connect("env-before-read-docs", check_if_executing) + app.connect("env-updated", update_exec_tables) + app.add_post_transform(ExecutionStatsPostTransform) + + +class ExecutionStatsNode(nodes.General, nodes.Element): + """A placeholder node, for adding a notebook execution statistics table.""" + + +class ExecutionStatsTable(SphinxDirective): + """Add a notebook execution statistics table.""" + + has_content = True + final_argument_whitespace = True + + def run(self): + """Add a placeholder node to the document, and mark it as having a table.""" + self.env.metadata[self.env.docname]["__mystnb__has_exec_table"] = True + return [ExecutionStatsNode()] + + +def check_if_executing(app: Sphinx, env, docnames) -> None: + """Check if a document might be executed.""" + # TODO this is a sub-optimal solution, since it only stops exec tables from being + # updated if any document is reparsed. + # Ideally we would only update the tables if a document is re-executed, but + # but we need to store this on the env, whilst accounting for parallel env merges. + env.mystnb_update_exec_tables = True if docnames else False + + +def update_exec_tables(app: Sphinx, env): + """If a document has been re-executed, return all documents containing tables. + + These documents will be updated with the new statistics. + """ + if not env.mystnb_update_exec_tables: + return None + to_update = [ + docname + for docname in env.metadata + if "__mystnb__has_exec_table" in env.metadata[docname] + ] + if to_update: + SPHINX_LOGGER.info( + f"Updating {len(to_update)} file(s) with execution table [mystnb]" + ) + return to_update + + +class ExecutionStatsPostTransform(SphinxPostTransform): + """Replace the placeholder node with the final table nodes.""" + + default_priority = 8 # before ReferencesResolver (10) and MystReferenceResolver(9) + + def run(self, **kwargs) -> None: + """Replace the placeholder node with the final table nodes.""" + for node in self.document.traverse(ExecutionStatsNode): + node.replace_self(make_stat_table(self.env.docname, self.env.metadata)) + + +_key2header = { + "mtime": "Modified", + "method": "Method", + "runtime": "Run Time (s)", + "succeeded": "Status", +} + +_key2transform = { + "mtime": lambda x: datetime.fromtimestamp(x).strftime("%Y-%m-%d %H:%M") + if x + else "", + "method": str, + "runtime": lambda x: "-" if x is None else str(round(x, 2)), + "succeeded": lambda x: "✅" if x is True else "❌", +} + + +def make_stat_table( + parent_docname: str, metadata: Dict[str, Dict[str, Any]] +) -> nodes.table: + """Create a table of statistics on executed notebooks.""" + + # top-level element + table = nodes.table() + table["classes"] += ["colwidths-auto"] + # self.set_source_info(table) + + # column settings element + ncols = len(_key2header) + 1 + tgroup = nodes.tgroup(cols=ncols) + table += tgroup + colwidths = [round(100 / ncols, 2)] * ncols + for colwidth in colwidths: + colspec = nodes.colspec(colwidth=colwidth) + tgroup += colspec + + # header + thead = nodes.thead() + tgroup += thead + row = nodes.row() + thead += row + + for name in ["Document"] + list(_key2header.values()): + row.append(nodes.entry("", nodes.paragraph(text=name))) + + # body + tbody = nodes.tbody() + tgroup += tbody + + for docname in sorted(metadata): + if "__mystnb__exec_data" not in metadata[docname]: + continue + data = metadata[docname]["__mystnb__exec_data"] + row = nodes.row() + tbody += row + + # document name + doclink = pending_xref( + refdoc=parent_docname, + reftarget=posixpath.relpath(docname, posixpath.dirname(parent_docname)), + reftype="doc", + refdomain="std", + refexplicit=True, + refwarn=True, + classes=["xref", "doc"], + ) + doclink += nodes.inline(text=docname) + paragraph = nodes.paragraph() + paragraph += doclink + row.append(nodes.entry("", paragraph)) + + # other rows + for name in _key2header.keys(): + paragraph = nodes.paragraph() + if name == "succeeded" and data[name] is False: + paragraph += nodes.abbreviation( + text=_key2transform[name](data[name]), + explanation=(data["error"] or ""), + ) + else: + paragraph += nodes.Text(_key2transform[name](data[name])) + row.append(nodes.entry("", paragraph)) + + return table diff --git a/myst_nb/new/loggers.py b/myst_nb/new/loggers.py index b34fe57c..0204987a 100644 --- a/myst_nb/new/loggers.py +++ b/myst_nb/new/loggers.py @@ -36,7 +36,7 @@ def __init__(self, document: nodes.document, type_name: str = DEFAULT_LOG_TYPE): self.logger = sphinx_logging.getLogger(f"{type_name}-{docname}") # default extras to parse to sphinx logger # location can be: docname, (docname, lineno), or a node - self.extra = {"location": docname, "type": type_name} + self.extra = {"docname": docname, "type": type_name} def process(self, msg, kwargs): kwargs["extra"] = self.extra @@ -45,7 +45,9 @@ def process(self, msg, kwargs): subtype = ("." + kwargs["subtype"]) if "subtype" in kwargs else "" if "line" in kwargs: # add line to location # note this will be overridden by the location keyword - self.extra["location"] = (self.extra["location"], kwargs.pop("line")) + self.extra["location"] = (self.extra["docname"], kwargs.pop("line")) + else: + self.extra["location"] = self.extra["docname"] if "parent" in kwargs: # TODO ideally here we would append a system_message to this node, # then it could replace myst_parser.SphinxRenderer.create_warning diff --git a/myst_nb/new/sphinx_.py b/myst_nb/new/sphinx_.py index 13ad63d4..a6089a1e 100644 --- a/myst_nb/new/sphinx_.py +++ b/myst_nb/new/sphinx_.py @@ -1,5 +1,6 @@ """An extension for sphinx""" import json +import os from pathlib import Path from typing import Any, Dict, List, Optional, Sequence @@ -13,14 +14,17 @@ from myst_parser.sphinx_parser import MystParser from myst_parser.sphinx_renderer import SphinxRenderer from nbformat import NotebookNode +from sphinx.addnodes import download_reference from sphinx.application import Sphinx from sphinx.environment import BuildEnvironment from sphinx.transforms.post_transforms import SphinxPostTransform from sphinx.util import logging as sphinx_logging +from sphinx.util.docutils import ReferenceRole from myst_nb import __version__ from myst_nb.configuration import NbParserConfig from myst_nb.new.execute import update_notebook +from myst_nb.new.execution_tables import setup_exec_table_extension from myst_nb.new.loggers import DEFAULT_LOG_TYPE, SphinxDocLogger from myst_nb.new.parse import notebook_to_tokens from myst_nb.new.read import UnexpectedCellDirective, create_nb_reader @@ -34,21 +38,18 @@ SPHINX_LOGGER = sphinx_logging.getLogger(__name__) UNSET = "--unset--" - - -def setup(app): - return sphinx_setup(app) +OUTPUT_FOLDER = "jupyter_execute" def sphinx_setup(app: Sphinx): """Initialize Sphinx extension.""" - app.add_source_suffix(".md", "myst-nb") - app.add_source_suffix(".ipynb", "myst-nb") - app.add_source_parser(MystNbParser) + # note, for core events overview, see: + # https://www.sphinx-doc.org/en/master/extdev/appapi.html#sphinx-core-events - # Add myst-parser configuration and transforms + # Add myst-parser configuration and transforms (but does not add the parser) setup_myst_parser(app) + # add myst-nb configuration variables for name, default, field in NbParserConfig().as_triple(): if not field.metadata.get("sphinx_exclude"): # TODO add types? @@ -59,15 +60,28 @@ def sphinx_setup(app: Sphinx): ) # generate notebook configuration from Sphinx configuration + # this also validates the configuration values app.connect("builder-inited", create_mystnb_config) + # add parser and default associated file suffixes + app.add_source_parser(MystNbParser) + app.add_source_suffix(".md", "myst-nb", override=True) + app.add_source_suffix(".ipynb", "myst-nb") + # add additional file suffixes for parsing + app.connect("config-inited", add_nb_custom_formats) # ensure notebook checkpoints are excluded from parsing app.connect("config-inited", add_exclude_patterns) + # TODO add an event which, if any files have been removed, + # all stage records with a non-existent path are removed + # add directive to ensure all notebook cells are converted app.add_directive("code-cell", UnexpectedCellDirective) app.add_directive("raw-cell", UnexpectedCellDirective) + # add directive for downloading an executed notebook + app.add_role("nb-download", NbDownloadRole()) + # add post-transform for selecting mime type from a bundle app.add_post_transform(SelectMimeType) @@ -77,7 +91,10 @@ def sphinx_setup(app: Sphinx): # note, this event is only available in Sphinx >= 3.5 app.connect("html-page-context", install_ipywidgets) - # TODO do we need to add lexers, if they are anyhow added via entry-points? + # Note lexers are registered as `pygments.lexers` entry-points + # and so do not need to be added here. + + setup_exec_table_extension(app) return { "version": __version__, @@ -86,6 +103,12 @@ def sphinx_setup(app: Sphinx): } +def add_nb_custom_formats(app: Sphinx, config): + """Add custom conversion formats.""" + for suffix in config.nb_custom_formats: + app.add_source_suffix(suffix, "myst-nb", override=True) + + def create_mystnb_config(app): """Generate notebook configuration from Sphinx configuration""" @@ -120,7 +143,7 @@ def create_mystnb_config(app): # update the output_folder (for writing external files like images), # and the execution_cache_path (for caching notebook outputs) # to a set path within the sphinx build folder - output_folder = Path(app.outdir).parent.joinpath("jupyter_execute").resolve() + output_folder = Path(app.outdir).parent.joinpath(OUTPUT_FOLDER).resolve() exec_cache_path = app.env.mystnb_config.execution_cache_path if not exec_cache_path: exec_cache_path = Path(app.outdir).parent.joinpath(".jupyter_cache").resolve() @@ -137,7 +160,8 @@ def add_exclude_patterns(app: Sphinx, config): def add_static_path(app: Sphinx): """Add static path for myst-nb.""" - static_path = Path(__file__).absolute().with_name("_static") + # TODO better to use importlib_resources here, or perhaps now there is another way? + static_path = Path(__file__).parent.absolute().with_name("_static") app.config.html_static_path.append(str(static_path)) @@ -292,9 +316,8 @@ def render_nb_metadata(self, token: SyntaxTreeNode) -> None: # https://github.com/jupyter-widgets/ipywidgets/blob/32f59acbc63c3ff0acf6afa86399cb563d3a9a86/packages/html-manager/src/libembed.ts#L36 ipywidgets = metadata.pop("widgets", None) ipywidgets_mime = (ipywidgets or {}).get(WIDGET_STATE_MIMETYPE, {}) - ipywidgets_state = ipywidgets_mime.get("state", None) - if ipywidgets_state: - string = sanitize_script_content(json.dumps(ipywidgets_state)) + if ipywidgets_mime.get("state", None): + string = sanitize_script_content(json.dumps(ipywidgets_mime)) store_doc_metadata( self.sphinx_env, self.sphinx_env.docname, "ipywidgets_state", string ) @@ -490,6 +513,7 @@ def run(self, **kwargs: Any) -> None: else: break if index is None: + # TODO ignore if glue mime types present? SPHINX_LOGGER.warning( f"No mime type available in priority list builder {name!r} " f"[{DEFAULT_LOG_TYPE}.mime_priority]", @@ -500,3 +524,26 @@ def run(self, **kwargs: Any) -> None: node.parent.remove(node) else: node.replace_self(node.children[index]) + + +class NbDownloadRole(ReferenceRole): + """Role to download an executed notebook.""" + + def run(self): + """Run the role.""" + # get a path relative to the current document + path = Path(self.env.mystnb_config.output_folder).joinpath( + *(self.env.docname.split("/")[:-1] + self.target.split("/")) + ) + reftarget = ( + path.as_posix() + if os.name == "nt" + else ("/" + os.path.relpath(path, self.env.app.srcdir)) + ) + node = download_reference(self.rawtext, reftarget=reftarget) + self.set_source_info(node) + title = self.title if self.has_explicit_title else self.target + node += nodes.literal( + self.rawtext, title, classes=["xref", "download", "myst-nb"] + ) + return [node], [] diff --git a/tests/test_sphinx_builds.py b/tests/test_sphinx_builds.py index 1cbef807..da3e63da 100644 --- a/tests/test_sphinx_builds.py +++ b/tests/test_sphinx_builds.py @@ -3,10 +3,7 @@ import pytest -@pytest.mark.sphinx_params( - "basic_run.ipynb", - conf={"extensions": ["myst_nb.new.sphinx_"], "nb_execution_mode": "off"}, -) +@pytest.mark.sphinx_params("basic_run.ipynb", conf={"nb_execution_mode": "off"}) def test_basic_run(sphinx_run, file_regression): sphinx_run.build() # print(sphinx_run.status()) @@ -22,10 +19,7 @@ def test_basic_run(sphinx_run, file_regression): ) -@pytest.mark.sphinx_params( - "basic_unrun.md", - conf={"extensions": ["myst_nb.new.sphinx_"], "nb_execution_mode": "off"}, -) +@pytest.mark.sphinx_params("basic_unrun.md", conf={"nb_execution_mode": "off"}) def test_basic_run_md(sphinx_run, file_regression): sphinx_run.build() # print(sphinx_run.status()) @@ -35,10 +29,7 @@ def test_basic_run_md(sphinx_run, file_regression): ) -@pytest.mark.sphinx_params( - "complex_outputs.ipynb", - conf={"extensions": ["myst_nb.new.sphinx_"], "nb_execution_mode": "off"}, -) +@pytest.mark.sphinx_params("complex_outputs.ipynb", conf={"nb_execution_mode": "off"}) def test_complex_outputs_run(sphinx_run, file_regression): sphinx_run.build() # print(sphinx_run.status()) @@ -55,10 +46,7 @@ def test_complex_outputs_run(sphinx_run, file_regression): ) -@pytest.mark.sphinx_params( - "ipywidgets.ipynb", - conf={"extensions": ["myst_nb.new.sphinx_"], "nb_execution_mode": "off"}, -) +@pytest.mark.sphinx_params("ipywidgets.ipynb", conf={"nb_execution_mode": "off"}) def test_ipywidgets(sphinx_run): """Test that ipywidget state is extracted and JS is included in the HTML head.""" sphinx_run.build() From 8ce2d8cb0988e3a9930438c5665d53662caba72a Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Wed, 5 Jan 2022 05:09:16 +0100 Subject: [PATCH 23/75] re-add toggle-button integration --- docs/conf.py | 1 - myst_nb/new/sphinx_.py | 26 +++++++++++++++++++++++--- 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index ad1d1901..63d43e43 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -18,7 +18,6 @@ # ones. extensions = [ "myst_nb", - "sphinx_togglebutton", "sphinx_copybutton", "sphinx.ext.intersphinx", "sphinx.ext.autodoc", diff --git a/myst_nb/new/sphinx_.py b/myst_nb/new/sphinx_.py index a6089a1e..3fdde3c6 100644 --- a/myst_nb/new/sphinx_.py +++ b/myst_nb/new/sphinx_.py @@ -86,14 +86,20 @@ def sphinx_setup(app: Sphinx): app.add_post_transform(SelectMimeType) # add HTML resources - app.connect("builder-inited", add_static_path) + app.connect("builder-inited", add_html_static_path) app.add_css_file("mystnb.css") # note, this event is only available in Sphinx >= 3.5 app.connect("html-page-context", install_ipywidgets) + # add configuration for hiding cell input/output + # TODO replace this, or make it optional + app.setup_extension("sphinx_togglebutton") + app.connect("config-inited", update_togglebutton_classes) + # Note lexers are registered as `pygments.lexers` entry-points # and so do not need to be added here. + # setup extension for execution statistics tables setup_exec_table_extension(app) return { @@ -158,8 +164,8 @@ def add_exclude_patterns(app: Sphinx, config): config.exclude_patterns.append("**.ipynb_checkpoints") -def add_static_path(app: Sphinx): - """Add static path for myst-nb.""" +def add_html_static_path(app: Sphinx): + """Add static path for HTML resources.""" # TODO better to use importlib_resources here, or perhaps now there is another way? static_path = Path(__file__).parent.absolute().with_name("_static") app.config.html_static_path.append(str(static_path)) @@ -185,6 +191,20 @@ def install_ipywidgets(app: Sphinx, pagename: str, *args: Any, **kwargs: Any) -> ) +def update_togglebutton_classes(app: Sphinx, config): + """Update togglebutton classes to recognise hidden cell inputs/outputs.""" + to_add = [ + ".tag_hide_input div.cell_input", + ".tag_hide-input div.cell_input", + ".tag_hide_output div.cell_output", + ".tag_hide-output div.cell_output", + ".tag_hide_cell.cell", + ".tag_hide-cell.cell", + ] + for selector in to_add: + config.togglebutton_selector += f", {selector}" + + def store_doc_metadata(env: BuildEnvironment, docname: str, key: str, value: Any): """Store myst-nb metadata for a document.""" # Data in env.metadata is correctly handled, by sphinx.MetadataCollector, From c21ab70adeca7e728f33663ff2238d6cbe2e6194 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Wed, 5 Jan 2022 07:01:43 +0100 Subject: [PATCH 24/75] Replace all old code --- .gitignore | 1 + .pre-commit-config.yaml | 25 +- docs/api/index.rst | 6 - docs/api/nodes.rst | 26 - docs/api/render_outputs.rst | 30 - myst_nb/__init__.py | 2 +- myst_nb/configuration.py | 7 +- myst_nb/converter.py | 318 --------- myst_nb/docutils_.py | 12 +- myst_nb/exec_table.py | 145 ----- myst_nb/{new => }/execute.py | 10 +- myst_nb/execution.py | 345 ---------- myst_nb/{new => }/execution_tables.py | 8 +- myst_nb/extension.py | 426 ------------- myst_nb/jsphinx.py | 125 ---- myst_nb/lexers.py | 5 +- myst_nb/{new => }/loggers.py | 0 myst_nb/nb_glue/domain.py | 46 +- myst_nb/new/__init__.py | 0 myst_nb/nodes.py | 106 --- myst_nb/{new => }/parse.py | 0 myst_nb/parser.py | 314 --------- myst_nb/{new => }/read.py | 8 +- myst_nb/{new => }/render.py | 8 +- myst_nb/render_outputs.py | 602 ------------------ myst_nb/{new => }/sphinx_.py | 22 +- pyproject.toml | 1 + setup.cfg | 15 +- tests/conftest.py | 8 +- tests/test_ansi_lexer.py | 2 +- tests/test_execute.py | 44 +- tests/test_execute/test_allow_errors_auto.xml | 14 +- .../test_execute/test_allow_errors_cache.xml | 14 +- tests/test_execute/test_basic_unrun_auto.xml | 9 +- tests/test_execute/test_basic_unrun_cache.xml | 9 +- .../test_complex_outputs_unrun_auto.ipynb | 10 +- .../test_complex_outputs_unrun_auto.xml | 156 ++++- .../test_complex_outputs_unrun_cache.ipynb | 10 +- .../test_complex_outputs_unrun_cache.xml | 156 ++++- .../test_custom_convert_auto.ipynb | 15 +- .../test_execute/test_custom_convert_auto.xml | 19 +- .../test_custom_convert_cache.ipynb | 15 +- .../test_custom_convert_cache.xml | 19 +- tests/test_execute/test_no_execute.xml | 4 +- tests/test_execute/test_outputs_present.xml | 9 +- tests/test_glue.py | 6 +- tests/test_mystnb_features.py | 4 +- tests/test_nb_render.py | 10 +- tests/test_parser.py | 6 +- tests/test_parser/test_toctree_in_ipynb.xml | 9 +- tests/test_render_outputs.py | 23 +- tests/test_render_outputs/test_basic_run.xml | 6 +- .../test_complex_outputs.xml | 172 ++--- .../test_complex_outputs_latex.xml | 92 +-- .../test_merge_streams.xml | 11 +- .../test_stderr_remove.xml | 12 +- tests/test_render_outputs/test_stderr_tag.xml | 14 +- tests/test_text_based.py | 6 +- 58 files changed, 670 insertions(+), 2827 deletions(-) delete mode 100644 docs/api/nodes.rst delete mode 100644 docs/api/render_outputs.rst delete mode 100644 myst_nb/converter.py delete mode 100644 myst_nb/exec_table.py rename myst_nb/{new => }/execute.py (96%) delete mode 100644 myst_nb/execution.py rename myst_nb/{new => }/execution_tables.py (97%) delete mode 100644 myst_nb/extension.py delete mode 100644 myst_nb/jsphinx.py rename myst_nb/{new => }/loggers.py (100%) delete mode 100644 myst_nb/new/__init__.py delete mode 100644 myst_nb/nodes.py rename myst_nb/{new => }/parse.py (100%) delete mode 100644 myst_nb/parser.py rename myst_nb/{new => }/read.py (99%) rename myst_nb/{new => }/render.py (100%) delete mode 100644 myst_nb/render_outputs.py rename myst_nb/{new => }/sphinx_.py (97%) diff --git a/.gitignore b/.gitignore index dfdb11a6..c5ac8dcf 100644 --- a/.gitignore +++ b/.gitignore @@ -138,3 +138,4 @@ dmypy.json .vscode/ todos.md +_archive/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 66a447e8..cd8f4de3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -42,17 +42,20 @@ repos: - id: flake8 additional_dependencies: [flake8-bugbear==21.3.1] - - repo: https://github.com/pre-commit/mirrors-mypy - rev: v0.910-1 - hooks: - - id: mypy - args: [--config-file=setup.cfg] - additional_dependencies: - - myst-parser~=0.16.1 - files: > - (?x)^( - myst_nb/parser.py| - )$ + # - repo: https://github.com/pre-commit/mirrors-mypy + # rev: v0.910-1 + # hooks: + # - id: mypy + # args: [--config-file=setup.cfg] + # additional_dependencies: + # - importlib_metadata + # - myst-parser~=0.16.1 + # - "sphinx~=4.3.2" + # - types-PyYAML + # files: > + # (?x)^( + # myst_nb/[^/]+.py| + # )$ # this is not used for now, # since it converts myst-nb to myst_nb and removes comments diff --git a/docs/api/index.rst b/docs/api/index.rst index fd4e3ef8..867b299a 100644 --- a/docs/api/index.rst +++ b/docs/api/index.rst @@ -3,12 +3,6 @@ Python API ========== -.. toctree:: - :maxdepth: 2 - - nodes - render_outputs - Miscellaneous ------------- diff --git a/docs/api/nodes.rst b/docs/api/nodes.rst deleted file mode 100644 index cedde74f..00000000 --- a/docs/api/nodes.rst +++ /dev/null @@ -1,26 +0,0 @@ -.. _api/nodes: - -AST Nodes ---------- - -.. automodule:: myst_nb.nodes - -.. autoclass:: myst_nb.nodes.CellNode - :members: - :undoc-members: - :show-inheritance: - -.. autoclass:: myst_nb.nodes.CellInputNode - :members: - :undoc-members: - :show-inheritance: - -.. autoclass:: myst_nb.nodes.CellOutputNode - :members: - :undoc-members: - :show-inheritance: - -.. autoclass:: myst_nb.nodes.CellOutputBundleNode - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/api/render_outputs.rst b/docs/api/render_outputs.rst deleted file mode 100644 index f44a2f69..00000000 --- a/docs/api/render_outputs.rst +++ /dev/null @@ -1,30 +0,0 @@ -.. _api/output_renderer: - -Output Renderer ---------------- - -.. automodule:: myst_nb.render_outputs - -.. autoclass:: myst_nb.render_outputs.CellOutputsToNodes - :members: - :undoc-members: - :show-inheritance: - -.. autoexception:: myst_nb.render_outputs.MystNbEntryPointError - :members: - :undoc-members: - :show-inheritance: - -.. autofunction:: myst_nb.render_outputs.load_renderer - - -.. autoclass:: myst_nb.render_outputs.CellOutputRendererBase - :members: - :undoc-members: - :show-inheritance: - :special-members: __init__ - -.. autoclass:: myst_nb.render_outputs.CellOutputRenderer - :members: - :undoc-members: - :show-inheritance: diff --git a/myst_nb/__init__.py b/myst_nb/__init__.py index 5d44742f..9955a868 100644 --- a/myst_nb/__init__.py +++ b/myst_nb/__init__.py @@ -5,7 +5,7 @@ def setup(app): """Sphinx extension setup.""" # we import this locally, so sphinx is not automatically imported - from .new.sphinx_ import sphinx_setup + from .sphinx_ import sphinx_setup return sphinx_setup(app) diff --git a/myst_nb/configuration.py b/myst_nb/configuration.py index 14c726d0..5dea66d6 100644 --- a/myst_nb/configuration.py +++ b/myst_nb/configuration.py @@ -189,14 +189,15 @@ class NbParserConfig: # notebook execution options - execution_mode: Literal["off", "force", "cache"] = attr.ib( + execution_mode: Literal["off", "force", "auto", "cache"] = attr.ib( # TODO different default for docutils (off) and sphinx (cache)? - # TODO deprecate auto - default="off", + # TODO deprecate auto and set cache as default instead + default="auto", validator=in_( [ "off", "force", + "auto", "cache", ] ), diff --git a/myst_nb/converter.py b/myst_nb/converter.py deleted file mode 100644 index 1ab88f7d..00000000 --- a/myst_nb/converter.py +++ /dev/null @@ -1,318 +0,0 @@ -import json -from pathlib import Path -from typing import Callable, Iterable, Optional - -import attr -import nbformat as nbf -import yaml -from markdown_it.renderer import RendererHTML -from myst_parser.main import MdParserConfig, create_md_parser -from sphinx.environment import BuildEnvironment -from sphinx.util import import_object, logging - -NOTEBOOK_VERSION = 4 -CODE_DIRECTIVE = "{code-cell}" -RAW_DIRECTIVE = "{raw-cell}" - -LOGGER = logging.getLogger(__name__) - - -@attr.s -class NbConverter: - func: Callable[[str], nbf.NotebookNode] = attr.ib() - config: MdParserConfig = attr.ib() - - -def get_nb_converter( - path: str, - env: BuildEnvironment, - source_iter: Optional[Iterable[str]] = None, -) -> Optional[NbConverter]: - """Get function, to convert a source string to a Notebook.""" - - # Standard notebooks take priority - if path.endswith(".ipynb"): - return NbConverter( - lambda text: nbf.reads(text, as_version=NOTEBOOK_VERSION), env.myst_config - ) - - # we check suffixes ordered by longest first, to ensure we get the "closest" match - for source_suffix in sorted( - env.config.nb_custom_formats.keys(), key=len, reverse=True - ): - if path.endswith(source_suffix): - ( - converter, - converter_kwargs, - commonmark_only, - ) = env.config.nb_custom_formats[source_suffix] - converter = import_object(converter) - a = NbConverter( - lambda text: converter(text, **(converter_kwargs or {})), - env.myst_config - if commonmark_only is None - else attr.evolve(env.myst_config, commonmark_only=commonmark_only), - ) - return a - - # If there is no source text then we assume a MyST Notebook - if source_iter is None: - # Check if docname exists - return NbConverter( - lambda text: myst_to_notebook( - text, - config=env.myst_config, - add_source_map=True, - path=path, - ), - env.myst_config, - ) - - # Given the source lines, we check it can be recognised as a MyST Notebook - if is_myst_notebook(source_iter): - # Check if docname exists - return NbConverter( - lambda text: myst_to_notebook( - text, - config=env.myst_config, - add_source_map=True, - path=path, - ), - env.myst_config, - ) - - # Otherwise, we return None, - # to imply that it should be parsed as as standard Markdown file - return None - - -def is_myst_notebook(line_iter: Iterable[str]) -> bool: - """Is the text file a MyST based notebook representation?""" - # we need to distinguish between markdown representing notebooks - # and standard notebooks. - # Therefore, for now we require that, at a mimimum we can find some top matter - # containing the jupytext format_name - yaml_lines = [] - for i, line in enumerate(line_iter): - if i == 0 and not line.startswith("---"): - return False - if i != 0 and (line.startswith("---") or line.startswith("...")): - break - yaml_lines.append(line.rstrip() + "\n") - - try: - front_matter = yaml.safe_load("".join(yaml_lines)) - except Exception: - return False - if front_matter is None: # this can occur for empty files - return False - if ( - front_matter.get("jupytext", {}) - .get("text_representation", {}) - .get("format_name", None) - != "myst" - ): - return False - - if "name" not in front_matter.get("kernelspec", {}): - raise IOError( - "A myst notebook text-representation requires " "kernelspec/name metadata" - ) - if "display_name" not in front_matter.get("kernelspec", {}): - raise IOError( - "A myst notebook text-representation requires " - "kernelspec/display_name metadata" - ) - return True - - -class MystMetadataParsingError(Exception): - """Error when parsing metadata from myst formatted text""" - - -class LoadFileParsingError(Exception): - """Error when parsing files for code-blocks/code-cells""" - - -def strip_blank_lines(text): - text = text.rstrip() - while text and text.startswith("\n"): - text = text[1:] - return text - - -class MockDirective: - option_spec = {"options": True} - required_arguments = 0 - optional_arguments = 1 - has_content = True - - -def read_fenced_cell(token, cell_index, cell_type): - from myst_parser.parse_directives import DirectiveParsingError, parse_directive_text - - try: - _, options, body_lines = parse_directive_text( - directive_class=MockDirective, - first_line="", - content=token.content, - validate_options=False, - ) - except DirectiveParsingError as err: - raise MystMetadataParsingError( - "{0} cell {1} at line {2} could not be read: {3}".format( - cell_type, cell_index, token.map[0] + 1, err - ) - ) - return options, body_lines - - -def read_cell_metadata(token, cell_index): - metadata = {} - if token.content: - try: - metadata = json.loads(token.content.strip()) - except Exception as err: - raise MystMetadataParsingError( - "Markdown cell {0} at line {1} could not be read: {2}".format( - cell_index, token.map[0] + 1, err - ) - ) - if not isinstance(metadata, dict): - raise MystMetadataParsingError( - "Markdown cell {0} at line {1} is not a dict".format( - cell_index, token.map[0] + 1 - ) - ) - - return metadata - - -def load_code_from_file(nb_path, file_name, token, body_lines): - """load source code from a file.""" - if nb_path is None: - raise LoadFileParsingError("path to notebook not supplied for :load:") - file_path = Path(nb_path).parent.joinpath(file_name).resolve() - if len(body_lines): - line = token.map[0] if token.map else 0 - msg = ( - f"{nb_path}:{line} content of code-cell is being overwritten by " - f":load: {file_name}" - ) - LOGGER.warning(msg) - try: - body_lines = file_path.read_text().split("\n") - except Exception: - raise LoadFileParsingError("Can't read file from :load: {}".format(file_path)) - return body_lines - - -def myst_to_notebook( - text, - config: MdParserConfig, - code_directive=CODE_DIRECTIVE, - raw_directive=RAW_DIRECTIVE, - add_source_map=False, - path: Optional[str] = None, -): - """Convert text written in the myst format to a notebook. - - :param text: the file text - :param code_directive: the name of the directive to search for containing code cells - :param raw_directive: the name of the directive to search for containing raw cells - :param add_source_map: add a `source_map` key to the notebook metadata, - which is a list of the starting source line number for each cell. - :param path: path to notebook (required for :load:) - - :raises MystMetadataParsingError if the metadata block is not valid JSON/YAML - - NOTE: we assume here that all of these directives are at the top-level, - i.e. not nested in other directives. - """ - # TODO warn about nested code-cells - - # parse markdown file up to the block level (i.e. don't worry about inline text) - inline_config = attr.evolve( - config, disable_syntax=(config.disable_syntax + ["inline"]) - ) - parser = create_md_parser(inline_config, RendererHTML) - tokens = parser.parse(text + "\n") - lines = text.splitlines() - md_start_line = 0 - - # get the document metadata - metadata_nb = {} - if tokens[0].type == "front_matter": - metadata = tokens.pop(0) - md_start_line = metadata.map[1] - try: - metadata_nb = yaml.safe_load(metadata.content) - except (yaml.parser.ParserError, yaml.scanner.ScannerError) as error: - raise MystMetadataParsingError("Notebook metadata: {}".format(error)) - - # create an empty notebook - nbf_version = nbf.v4 - kwargs = {"metadata": nbf.from_dict(metadata_nb)} - notebook = nbf_version.new_notebook(**kwargs) - source_map = [] # this is a list of the starting line number for each cell - - def _flush_markdown(start_line, token, md_metadata): - """When we find a cell we check if there is preceding text.o""" - endline = token.map[0] if token else len(lines) - md_source = strip_blank_lines("\n".join(lines[start_line:endline])) - meta = nbf.from_dict(md_metadata) - if md_source: - source_map.append(start_line) - notebook.cells.append( - nbf_version.new_markdown_cell(source=md_source, metadata=meta) - ) - - # iterate through the tokens to identify notebook cells - nesting_level = 0 - md_metadata = {} - - for token in tokens: - - nesting_level += token.nesting - - if nesting_level != 0: - # we ignore fenced block that are nested, e.g. as part of lists, etc - continue - - if token.type == "fence" and token.info.startswith(code_directive): - _flush_markdown(md_start_line, token, md_metadata) - options, body_lines = read_fenced_cell(token, len(notebook.cells), "Code") - # Parse :load: or load: tags and populate body with contents of file - if "load" in options: - body_lines = load_code_from_file( - path, options["load"], token, body_lines - ) - meta = nbf.from_dict(options) - source_map.append(token.map[0] + 1) - notebook.cells.append( - nbf_version.new_code_cell(source="\n".join(body_lines), metadata=meta) - ) - md_metadata = {} - md_start_line = token.map[1] - - elif token.type == "fence" and token.info.startswith(raw_directive): - _flush_markdown(md_start_line, token, md_metadata) - options, body_lines = read_fenced_cell(token, len(notebook.cells), "Raw") - meta = nbf.from_dict(options) - source_map.append(token.map[0] + 1) - notebook.cells.append( - nbf_version.new_raw_cell(source="\n".join(body_lines), metadata=meta) - ) - md_metadata = {} - md_start_line = token.map[1] - - elif token.type == "myst_block_break": - _flush_markdown(md_start_line, token, md_metadata) - md_metadata = read_cell_metadata(token, len(notebook.cells)) - md_start_line = token.map[1] - - _flush_markdown(md_start_line, None, md_metadata) - - if add_source_map: - notebook.metadata["source_map"] = source_map - return notebook diff --git a/myst_nb/docutils_.py b/myst_nb/docutils_.py index 4fa7fa3b..0b5201b4 100644 --- a/myst_nb/docutils_.py +++ b/myst_nb/docutils_.py @@ -2,7 +2,6 @@ from functools import partial from typing import Any, Dict, List, Optional, Tuple -import nbformat from docutils import nodes from docutils.core import default_description, publish_cmdline from docutils.parsers.rst.directives import register_directive @@ -13,19 +12,20 @@ from myst_parser.docutils_ import create_myst_config, create_myst_settings_spec from myst_parser.docutils_renderer import DocutilsRenderer, token_line from myst_parser.main import MdParserConfig, create_md_parser +import nbformat from nbformat import NotebookNode from myst_nb.configuration import NbParserConfig -from myst_nb.new.execute import update_notebook -from myst_nb.new.loggers import DEFAULT_LOG_TYPE, DocutilsDocLogger -from myst_nb.new.parse import notebook_to_tokens -from myst_nb.new.read import ( +from myst_nb.execute import update_notebook +from myst_nb.loggers import DEFAULT_LOG_TYPE, DocutilsDocLogger +from myst_nb.parse import notebook_to_tokens +from myst_nb.read import ( NbReader, UnexpectedCellDirective, read_myst_markdown_notebook, standard_nb_read, ) -from myst_nb.new.render import NbElementRenderer, coalesce_streams, load_renderer +from myst_nb.render import NbElementRenderer, coalesce_streams, load_renderer DOCUTILS_EXCLUDED_ARGS = { f.name for f in NbParserConfig.get_fields() if f.metadata.get("docutils_exclude") diff --git a/myst_nb/exec_table.py b/myst_nb/exec_table.py deleted file mode 100644 index 2356945f..00000000 --- a/myst_nb/exec_table.py +++ /dev/null @@ -1,145 +0,0 @@ -"""A directive to create a table of executed notebooks, and related statistics. - -This directive utilises the -``env.nb_execution_data`` and ``env.nb_execution_data_changed`` variables, -set by myst-nb, to produce a table of statistics, -which will be updated when any notebooks are modified/removed. -""" -from datetime import datetime - -from docutils import nodes -from sphinx.transforms import SphinxTransform -from sphinx.transforms.post_transforms import SphinxPostTransform -from sphinx.util import logging -from sphinx.util.docutils import SphinxDirective - -LOGGER = logging.getLogger(__name__) - - -def setup_exec_table(app): - """execution statistics table.""" - app.add_node(ExecutionStatsNode) - app.add_directive("nb-exec-table", ExecutionStatsTable) - app.add_transform(ExecutionStatsTransform) - app.add_post_transform(ExecutionStatsPostTransform) - app.connect("builder-inited", add_doc_tracker) - app.connect("env-purge-doc", remove_doc) - app.connect("env-updated", update_exec_tables) - - -def add_doc_tracker(app): - """This variable keeps track of want documents contain - an `nb-exec-table` directive. - """ - if not hasattr(app.env, "docs_with_exec_table"): - app.env.docs_with_exec_table = set() - - -def remove_doc(app, env, docname): - env.docs_with_exec_table.discard(docname) - - -def update_exec_tables(app, env): - """If the execution data has changed, - this callback adds the list of documents containing an `nb-exec-table` directive - to the list of document that are outdated. - """ - if not (env.nb_execution_data_changed and env.docs_with_exec_table): - return None - if env.docs_with_exec_table: - LOGGER.info("Updating `nb-exec-table`s in: %s", env.docs_with_exec_table) - return list(env.docs_with_exec_table) - - -class ExecutionStatsNode(nodes.General, nodes.Element): - """A placeholder node, for adding a notebook execution statistics table.""" - - -class ExecutionStatsTable(SphinxDirective): - """Add a notebook execution statistics table.""" - - has_content = True - final_argument_whitespace = True - - def run(self): - - return [ExecutionStatsNode()] - - -class ExecutionStatsTransform(SphinxTransform): - """Updates the list of documents containing an `nb-exec-table` directive.""" - - default_priority = 400 - - def apply(self): - self.env.docs_with_exec_table.discard(self.env.docname) - for _ in self.document.traverse(ExecutionStatsNode): - self.env.docs_with_exec_table.add(self.env.docname) - break - - -class ExecutionStatsPostTransform(SphinxPostTransform): - """Replace the placeholder node with the final table nodes.""" - - default_priority = 400 - - def run(self, **kwargs) -> None: - for node in self.document.traverse(ExecutionStatsNode): - node.replace_self(make_stat_table(self.env.nb_execution_data)) - - -def make_stat_table(nb_execution_data): - - key2header = { - "mtime": "Modified", - "method": "Method", - "runtime": "Run Time (s)", - "succeeded": "Status", - } - - key2transform = { - "mtime": lambda x: datetime.fromtimestamp(x).strftime("%Y-%m-%d %H:%M") - if x - else "", - "method": str, - "runtime": lambda x: "-" if x is None else str(round(x, 2)), - "succeeded": lambda x: "✅" if x is True else "❌", - } - - # top-level element - table = nodes.table() - table["classes"] += ["colwidths-auto"] - # self.set_source_info(table) - - # column settings element - ncols = len(key2header) + 1 - tgroup = nodes.tgroup(cols=ncols) - table += tgroup - colwidths = [round(100 / ncols, 2)] * ncols - for colwidth in colwidths: - colspec = nodes.colspec(colwidth=colwidth) - tgroup += colspec - - # header - thead = nodes.thead() - tgroup += thead - row = nodes.row() - thead += row - - for name in ["Document"] + list(key2header.values()): - row.append(nodes.entry("", nodes.paragraph(text=name))) - - # body - tbody = nodes.tbody() - tgroup += tbody - - for docname in sorted(nb_execution_data.keys()): - data = nb_execution_data[docname] - row = nodes.row() - tbody += row - row.append(nodes.entry("", nodes.paragraph(text=docname))) - for name in key2header.keys(): - text = key2transform[name](data[name]) - row.append(nodes.entry("", nodes.paragraph(text=text))) - - return table diff --git a/myst_nb/new/execute.py b/myst_nb/execute.py similarity index 96% rename from myst_nb/new/execute.py rename to myst_nb/execute.py index 79669e2a..2d3f645d 100644 --- a/myst_nb/new/execute.py +++ b/myst_nb/execute.py @@ -1,8 +1,8 @@ """Module for executing notebooks.""" -import os from contextlib import nullcontext, suppress from datetime import datetime from logging import Logger +import os from pathlib import Path from tempfile import TemporaryDirectory from typing import Optional, Tuple @@ -59,7 +59,13 @@ def update_notebook( # TODO deal with nb_config.execution_excludepatterns - if nb_config.execution_mode == "force": + missing_outputs = ( + len(cell.outputs) == 0 for cell in notebook.cells if cell["cell_type"] == "code" + ) + + if nb_config.execution_mode == "force" or ( + nb_config.execution_mode == "auto" and any(missing_outputs) + ): # setup the execution current working directory if nb_config.execution_in_temp: diff --git a/myst_nb/execution.py b/myst_nb/execution.py deleted file mode 100644 index d47ef4e4..00000000 --- a/myst_nb/execution.py +++ /dev/null @@ -1,345 +0,0 @@ -"""Control notebook outputs generation, caching and retrieval - -The primary methods in this module are: - -- ``update_execution_cache``, which is called when sphinx detects outdated files. - When caching is enabled, this will execute the files if necessary and update the cache -- ``generate_notebook_outputs`` which is called during the parsing of each notebook. - If caching is enabled, this will attempt to pull the outputs from the cache, - or if 'auto' / 'force' is set, will execute the notebook. - -""" -import os -import re -import tempfile -from datetime import datetime -from pathlib import Path -from typing import Iterable, List, Optional, Set - -import nbformat as nbf -from jupyter_cache import get_cache -from jupyter_cache.executors import load_executor -from jupyter_cache.executors.utils import single_nb_execution -from sphinx.application import Sphinx -from sphinx.builders import Builder -from sphinx.environment import BuildEnvironment -from sphinx.util import logging, progress_message - -from myst_nb.converter import get_nb_converter - -LOGGER = logging.getLogger(__name__) - - -def update_execution_cache( - app: Sphinx, builder: Builder, added: Set[str], changed: Set[str], removed: Set[str] -): - """If caching is required, stage and execute the added or modified notebooks, - and cache them for later retrieval. - - This is called by sphinx as an `env-get-outdated` event, - which is emitted when the environment determines which source files have changed - and should be re-read. - - """ - # all the added and changed notebooks should be operated on. - # note docnames are paths relative to the sphinx root folder, with no extensions - altered_docnames = added.union(changed) - - exec_docnames = [ - docname for docname in altered_docnames if is_valid_exec_file(app.env, docname) - ] - LOGGER.verbose("MyST-NB: Potential docnames to execute: %s", exec_docnames) - - if app.config["jupyter_execute_notebooks"] == "cache": - - app.env.nb_path_to_cache = str( - app.config["jupyter_cache"] - or Path(app.outdir).parent.joinpath(".jupyter_cache") - ) - - cache_base = get_cache(app.env.nb_path_to_cache) - for path in removed: - - if path in app.env.nb_execution_data: - app.env.nb_execution_data_changed = True - app.env.nb_execution_data.pop(path, None) - - docpath = app.env.doc2path(path) - # there is an issue in sphinx doc2path, whereby if the path does not - # exist then it will be assigned the default source_suffix (usually .rst) - # therefore, to be safe here, we run through all possible suffixes - for suffix in app.env.nb_allowed_exec_suffixes: - docpath = os.path.splitext(docpath)[0] + suffix - if not os.path.exists(docpath): - cache_base.discard_staged_notebook(docpath) - - _stage_and_execute( - env=app.env, - exec_docnames=exec_docnames, - path_to_cache=app.env.nb_path_to_cache, - timeout=app.config["execution_timeout"], - allow_errors=app.config["execution_allow_errors"], - exec_in_temp=app.config["execution_in_temp"], - ) - - return [] - - -def generate_notebook_outputs( - env: BuildEnvironment, - ntbk: nbf.NotebookNode, - file_path: Optional[str] = None, - show_traceback: bool = False, -) -> nbf.NotebookNode: - """ - Add outputs to a NotebookNode by pulling from cache. - - Function to get the database instance. Get the cached output of the notebook - and merge it with the original notebook. If there is no cached output, - checks if there was error during execution, then saves the traceback to a log file. - """ - - # check if the file is of a format that may be associated with outputs - if not is_valid_exec_file(env, env.docname): - return ntbk - - # If we have a jupyter_cache, see if there's a cache for this notebook - file_path = file_path or env.doc2path(env.docname) - - execution_method = env.config["jupyter_execute_notebooks"] # type: str - - path_to_cache = env.nb_path_to_cache if "cache" in execution_method else None - - if not path_to_cache and "off" in execution_method: - return ntbk - - if not path_to_cache: - - if execution_method == "auto" and nb_has_all_output(file_path): - LOGGER.info( - "Did not execute %s. " - "Set jupyter_execute_notebooks to `force` to execute", - env.docname, - ) - else: - if env.config["execution_in_temp"]: - with tempfile.TemporaryDirectory() as tmpdirname: - LOGGER.info("Executing: %s in temporary directory", env.docname) - result = single_nb_execution( - ntbk, - cwd=tmpdirname, - timeout=env.config["execution_timeout"], - allow_errors=env.config["execution_allow_errors"], - ) - else: - cwd = Path(file_path).parent - LOGGER.info("Executing: %s in: %s", env.docname, cwd) - result = single_nb_execution( - ntbk, - cwd=cwd, - timeout=env.config["execution_timeout"], - allow_errors=env.config["execution_allow_errors"], - ) - - report_path = None - if result.err: - report_path, message = _report_exec_fail( - env, - Path(file_path).name, - result.exc_string, - show_traceback, - "Execution Failed with traceback saved in {}", - ) - LOGGER.error(message) - - ntbk = result.nb - - env.nb_execution_data_changed = True - env.nb_execution_data[env.docname] = { - "mtime": datetime.now().timestamp(), - "runtime": result.time, - "method": execution_method, - "succeeded": False if result.err else True, - } - if report_path: - env.nb_execution_data[env.docname]["error_log"] = report_path - - return ntbk - - cache_base = get_cache(path_to_cache) - # Use relpath here in case Sphinx is building from a non-parent folder - r_file_path = Path(os.path.relpath(file_path, Path().resolve())) - - # default execution data - runtime = None - succeeded = False - report_path = None - - try: - pk, ntbk = cache_base.merge_match_into_notebook(ntbk) - except KeyError: - message = ( - f"Couldn't find cache key for notebook file {str(r_file_path)}. " - "Outputs will not be inserted." - ) - try: - stage_record = cache_base.get_staged_record(file_path) - except KeyError: - stage_record = None - if stage_record and stage_record.traceback: - report_path, suffix = _report_exec_fail( - env, - r_file_path.name, - stage_record.traceback, - show_traceback, - "\n Last execution failed with traceback saved in {}", - ) - message += suffix - - LOGGER.error(message) - - else: - LOGGER.verbose("Merged cached outputs into %s", str(r_file_path)) - succeeded = True - try: - runtime = cache_base.get_cache_record(pk).data.get( - "execution_seconds", None - ) - except Exception: - pass - - env.nb_execution_data_changed = True - env.nb_execution_data[env.docname] = { - "mtime": datetime.now().timestamp(), - "runtime": runtime, - "method": execution_method, - "succeeded": succeeded, - } - if report_path: - env.nb_execution_data[env.docname]["error_log"] = report_path - - return ntbk - - -def is_valid_exec_file(env: BuildEnvironment, docname: str) -> bool: - """Check if the docname refers to a file that should be executed.""" - doc_path = env.doc2path(docname) - if doc_path in env.nb_excluded_exec_paths: - return False - matches = tuple( - re.search(re.escape(suffix) + "$", doc_path) - for suffix in env.nb_allowed_exec_suffixes - ) - if not any(matches): - return False - return True - - -def _report_exec_fail( - env, - file_name: str, - traceback: str, - show_traceback: bool, - template: str, -): - """Save the traceback to a log file, and create log message.""" - reports_dir = Path(env.app.outdir).joinpath("reports") - reports_dir.mkdir(exist_ok=True) - full_path = reports_dir.joinpath(os.path.splitext(file_name)[0] + ".log") - full_path.write_text(traceback, encoding="utf8") - message = template.format(full_path) - if show_traceback: - message += "\n" + traceback - return str(full_path), message - - -def _stage_and_execute( - env: BuildEnvironment, - exec_docnames: List[str], - path_to_cache: str, - timeout: Optional[int], - allow_errors: bool, - exec_in_temp: bool, -): - pk_list = [] - cache_base = get_cache(path_to_cache) - - for nb in exec_docnames: - source_path = env.doc2path(nb) - with open(source_path, encoding="utf8") as handle: - # here we pass an iterator, so that only the required lines are read - converter = get_nb_converter(source_path, env, (line for line in handle)) - if converter is not None: - stage_record = cache_base.stage_notebook_file(source_path) - pk_list.append(stage_record.pk) - - # can leverage parallel execution implemented in jupyter-cache here - try: - with progress_message("executing outdated notebooks"): - execute_staged_nb( - cache_base, - pk_list or None, - timeout=timeout, - exec_in_temp=exec_in_temp, - allow_errors=allow_errors, - env=env, - ) - except OSError as err: - # This is a 'fix' for obscure cases, such as if you - # remove name.ipynb and add name.md (i.e. same name, different extension) - # and then name.ipynb isn't flagged for removal. - # Normally we want to keep the stage records available, so that we can retrieve - # execution tracebacks at the `generate_notebook_outputs` stage, - # but we need to flush if it becomes 'corrupted' - LOGGER.error( - "Execution failed in an unexpected way, clearing staged notebooks: %s", err - ) - for record in cache_base.list_staged_records(): - cache_base.discard_staged_notebook(record.pk) - - -def execute_staged_nb( - cache_base, - pk_list, - timeout: Optional[int], - exec_in_temp: bool, - allow_errors: bool, - env: BuildEnvironment, -): - """Executing the staged notebook.""" - try: - executor = load_executor("basic", cache_base, logger=LOGGER) - except ImportError as error: - LOGGER.error(str(error)) - return 1 - - def _converter(path): - text = Path(path).read_text(encoding="utf8") - return get_nb_converter(path, env).func(text) - - result = executor.run_and_cache( - filter_pks=pk_list or None, - converter=_converter, - timeout=timeout, - allow_errors=allow_errors, - run_in_temp=exec_in_temp, - ) - return result - - -def nb_has_all_output( - source_path: str, nb_extensions: Iterable[str] = (".ipynb",) -) -> bool: - """Determine if the path contains a notebook with at least one output.""" - has_outputs = False - ext = os.path.splitext(source_path)[1] - - if ext in nb_extensions: - with open(source_path, "r", encoding="utf8") as f: - ntbk = nbf.read(f, as_version=4) - has_outputs = all( - len(cell.outputs) != 0 - for cell in ntbk.cells - if cell["cell_type"] == "code" - ) - return has_outputs diff --git a/myst_nb/new/execution_tables.py b/myst_nb/execution_tables.py similarity index 97% rename from myst_nb/new/execution_tables.py rename to myst_nb/execution_tables.py index 1c33d408..f9e995bd 100644 --- a/myst_nb/new/execution_tables.py +++ b/myst_nb/execution_tables.py @@ -4,9 +4,9 @@ which is then replaced by a table of statistics in a post-transformation (once all the documents have been executed and these statistics are available). """ -import posixpath from datetime import datetime -from typing import Any, Dict +import posixpath +from typing import Any, Callable, Dict from docutils import nodes from sphinx.addnodes import pending_xref @@ -82,14 +82,14 @@ def run(self, **kwargs) -> None: node.replace_self(make_stat_table(self.env.docname, self.env.metadata)) -_key2header = { +_key2header: Dict[str, str] = { "mtime": "Modified", "method": "Method", "runtime": "Run Time (s)", "succeeded": "Status", } -_key2transform = { +_key2transform: Dict[str, Callable[[Any], str]] = { "mtime": lambda x: datetime.fromtimestamp(x).strftime("%Y-%m-%d %H:%M") if x else "", diff --git a/myst_nb/extension.py b/myst_nb/extension.py deleted file mode 100644 index 18abefe9..00000000 --- a/myst_nb/extension.py +++ /dev/null @@ -1,426 +0,0 @@ -"""Sphinx extension setup""" - -import os -from collections.abc import Sequence -from pathlib import Path -from typing import cast - -from docutils import nodes as docnodes -from IPython.lib.lexers import IPython3Lexer, IPythonTracebackLexer -from myst_parser import setup_sphinx as setup_myst_parser -from sphinx.addnodes import download_reference -from sphinx.application import Sphinx -from sphinx.builders.html import StandaloneHTMLBuilder -from sphinx.environment import BuildEnvironment -from sphinx.errors import SphinxError -from sphinx.util import import_object, logging -from sphinx.util.docutils import ReferenceRole, SphinxDirective - -from myst_nb import __version__ -from myst_nb.exec_table import setup_exec_table -from myst_nb.execution import update_execution_cache -from myst_nb.jsphinx import ( - DEFAULT_EMBED_REQUIREJS_URL, - DEFAULT_EMBED_SCRIPT_URL, - REQUIRE_URL_DEFAULT, - sphinx_abs_dir, -) -from myst_nb.lexers import AnsiColorLexer -from myst_nb.nb_glue import glue # noqa: F401 -from myst_nb.nb_glue.domain import ( - NbGlueDomain, - PasteInlineNode, - PasteMathNode, - PasteNode, - PasteTextNode, -) -from myst_nb.nb_glue.transform import PasteNodesToDocutils -from myst_nb.nodes import ( - CellInputNode, - CellNode, - CellOutputBundleNode, - CellOutputNode, - JupyterWidgetStateNode, - JupyterWidgetViewNode, -) -from myst_nb.parser import NotebookParser -from myst_nb.render_outputs import ( - CellOutputsToNodes, - get_default_render_priority, - load_renderer, -) - -LOGGER = logging.getLogger(__name__) - - -def sphinx_setup(app: Sphinx): - """Initialize Sphinx extension.""" - # Allow parsing ipynb files - app.add_source_suffix(".md", "myst-nb") - app.add_source_suffix(".ipynb", "myst-nb") - app.add_source_parser(NotebookParser) - app.setup_extension("sphinx_togglebutton") - - # Helper functions for the registry, pulled from jupyter-sphinx - def skip(self, node): - raise docnodes.SkipNode - - # Used to render an element node as HTML - def visit_element_html(self, node): - self.body.append(node.html()) - raise docnodes.SkipNode - - # Shortcut for registering our container nodes - render_container = ( - lambda self, node: self.visit_container(node), - lambda self, node: self.depart_container(node), - ) - - # Register our container nodes, these should behave just like a regular container - for node in [CellNode, CellInputNode, CellOutputNode]: - app.add_node( - node, - override=True, - html=(render_container), - latex=(render_container), - textinfo=(render_container), - text=(render_container), - man=(render_container), - ) - - # Register the output bundle node. - # No translators should touch this node because we'll replace it in a post-transform - app.add_node( - CellOutputBundleNode, - override=True, - html=(skip, None), - latex=(skip, None), - textinfo=(skip, None), - text=(skip, None), - man=(skip, None), - ) - - # these nodes hold widget state/view JSON, - # but are only rendered properly in HTML documents. - for node in [JupyterWidgetStateNode, JupyterWidgetViewNode]: - app.add_node( - node, - override=True, - html=(visit_element_html, None), - latex=(skip, None), - textinfo=(skip, None), - text=(skip, None), - man=(skip, None), - ) - - # Register our inline nodes so they can be parsed as a part of titles - # No translators should touch these nodes because we'll replace them in a transform - for node in [PasteMathNode, PasteNode, PasteTextNode, PasteInlineNode]: - app.add_node( - node, - override=True, - html=(skip, None), - latex=(skip, None), - textinfo=(skip, None), - text=(skip, None), - man=(skip, None), - ) - - # Add configuration for the cache - app.add_config_value("jupyter_cache", "", "env") - app.add_config_value("execution_excludepatterns", [], "env") - app.add_config_value("jupyter_execute_notebooks", "auto", "env") - app.add_config_value("execution_timeout", 30, "env") - app.add_config_value("execution_allow_errors", False, "env") - app.add_config_value("execution_in_temp", False, "env") - # show traceback in stdout (in addition to writing to file) - # this is useful in e.g. RTD where one cannot inspect a file - app.add_config_value("execution_show_tb", False, "") - app.add_config_value("nb_custom_formats", {}, "env") - - # render config - app.add_config_value("nb_render_key", "render", "env") - app.add_config_value("nb_render_priority", {}, "env") - app.add_config_value("nb_render_plugin", "default", "env") - app.add_config_value("nb_render_text_lexer", "myst-ansi", "env") - app.add_config_value("nb_output_stderr", "show", "env") - app.add_config_value("nb_merge_streams", False, "env") - - # Register our post-transform which will convert output bundles to nodes - app.add_post_transform(PasteNodesToDocutils) - app.add_post_transform(CellOutputsToNodes) - - # Add myst-parser transforms and configuration - setup_myst_parser(app) - - # Events - app.connect("config-inited", validate_config_values) - app.connect("builder-inited", static_path) - app.connect("builder-inited", set_valid_execution_paths) - app.connect("builder-inited", set_up_execution_data) - app.connect("builder-inited", set_render_priority) - app.connect("env-purge-doc", remove_execution_data) - app.connect("env-get-outdated", update_execution_cache) - app.connect("config-inited", add_exclude_patterns) - app.connect("config-inited", update_togglebutton_classes) - app.connect("env-updated", save_glue_cache) - app.connect("config-inited", add_nb_custom_formats) - app.connect("env-updated", load_ipywidgets_js) - - # For syntax highlighting - app.add_lexer("ipythontb", IPythonTracebackLexer) - app.add_lexer("ipython", IPython3Lexer) - app.add_lexer("myst-ansi", AnsiColorLexer) - - # Add components - app.add_directive("code-cell", CodeCell) - app.add_role("nb-download", JupyterDownloadRole()) - app.add_css_file("mystnb.css") - app.add_domain(NbGlueDomain) - - # execution statistics table - setup_exec_table(app) - - # TODO need to deal with key clashes in NbGlueDomain.merge_domaindata - # before this is parallel_read_safe - return {"version": __version__, "parallel_read_safe": False} - - -class MystNbConfigError(SphinxError): - """Error specific to MyST-NB.""" - - category = "MyST NB Configuration Error" - - -def validate_config_values(app: Sphinx, config): - """Validate configuration values.""" - execute_mode = app.config["jupyter_execute_notebooks"] - if execute_mode not in ["force", "auto", "cache", "off"]: - raise MystNbConfigError( - "'jupyter_execute_notebooks' can be: " - f"`force`, `auto`, `cache` or `off`, but got: {execute_mode}", - ) - - if app.config["jupyter_cache"] and execute_mode != "cache": - raise MystNbConfigError( - "'jupyter_cache' is set, " - f"but 'jupyter_execute_notebooks' is not `cache`: {execute_mode}" - ) - - if app.config["jupyter_cache"] and not os.path.isdir(app.config["jupyter_cache"]): - raise MystNbConfigError( - f"'jupyter_cache' is not a directory: {app.config['jupyter_cache']}", - ) - - if not isinstance(app.config["nb_custom_formats"], dict): - raise MystNbConfigError( - "'nb_custom_formats' should be a dictionary: " - f"{app.config['nb_custom_formats']}" - ) - for name, converter in app.config["nb_custom_formats"].items(): - if not isinstance(name, str): - raise MystNbConfigError( - f"'nb_custom_formats' keys should be a string: {name}" - ) - if isinstance(converter, str): - app.config["nb_custom_formats"][name] = (converter, {}) - elif not (isinstance(converter, Sequence) and len(converter) in [2, 3]): - raise MystNbConfigError( - "'nb_custom_formats' values must be " - f"either strings or 2/3-element sequences, got: {converter}" - ) - - converter_str = app.config["nb_custom_formats"][name][0] - caller = import_object( - converter_str, - f"MyST-NB nb_custom_formats: {name}", - ) - if not callable(caller): - raise MystNbConfigError( - f"`nb_custom_formats.{name}` converter is not callable: {caller}" - ) - if len(app.config["nb_custom_formats"][name]) == 2: - app.config["nb_custom_formats"][name].append(None) - elif not isinstance(app.config["nb_custom_formats"][name][2], bool): - raise MystNbConfigError( - f"`nb_custom_formats.{name}.commonmark_only` arg is not boolean" - ) - - if not isinstance(app.config["nb_render_key"], str): - raise MystNbConfigError("`nb_render_key` is not a string") - - if app.config["nb_output_stderr"] not in [ - "show", - "remove", - "remove-warn", - "warn", - "error", - "severe", - ]: - raise MystNbConfigError( - "`nb_output_stderr` not one of: " - "'show', 'remove', 'remove-warn', 'warn', 'error', 'severe'" - ) - - # try loading notebook output renderer - load_renderer(app.config["nb_render_plugin"]) - - -def static_path(app: Sphinx): - static_path = Path(__file__).absolute().with_name("_static") - app.config.html_static_path.append(str(static_path)) - - -def load_ipywidgets_js(app: Sphinx, env: BuildEnvironment) -> None: - """Add ipywidget JavaScript to HTML pages. - - We adapt the code in sphinx.ext.mathjax, - to only add this JS if widgets have been found in any notebooks. - (ideally we would only add it to the pages containing widgets, - but this is not trivial in sphinx) - - There are 2 cases: - - - ipywidgets 7, with require - - ipywidgets 7, no require - - We reuse settings, if available, for jupyter-sphinx - """ - if app.builder.format != "html" or not app.env.nb_contains_widgets: - return - builder = cast(StandaloneHTMLBuilder, app.builder) - - # TODO change this logic? - require_url_default = ( - REQUIRE_URL_DEFAULT - if "jupyter_sphinx_require_url" not in app.config - else app.config.jupyter_sphinx_require_url - ) - embed_url_default = ( - None - if "jupyter_sphinx_embed_url" not in app.config - else app.config.jupyter_sphinx_embed_url - ) - - if require_url_default: - builder.add_js_file(require_url_default) - embed_url = embed_url_default or DEFAULT_EMBED_REQUIREJS_URL - else: - embed_url = embed_url_default or DEFAULT_EMBED_SCRIPT_URL - if embed_url: - builder.add_js_file(embed_url) - - -def set_render_priority(app: Sphinx): - """Set the render priority for the particular builder.""" - builder = app.builder.name - if app.config.nb_render_priority and builder in app.config.nb_render_priority: - app.env.nb_render_priority = app.config.nb_render_priority[builder] - else: - app.env.nb_render_priority = get_default_render_priority(builder) - - if app.env.nb_render_priority is None: - raise MystNbConfigError(f"`nb_render_priority` not set for builder: {builder}") - try: - for item in app.env.nb_render_priority: - assert isinstance(item, str) - except Exception: - raise MystNbConfigError( - f"`nb_render_priority` is not a list of str: {app.env.nb_render_priority}" - ) - - -def set_valid_execution_paths(app: Sphinx): - """Set files excluded from execution, and valid file suffixes - - Patterns given in execution_excludepatterns conf variable from executing. - """ - app.env.nb_excluded_exec_paths = { - str(path) - for pat in app.config["execution_excludepatterns"] - for path in Path().cwd().rglob(pat) - } - LOGGER.verbose("MyST-NB: Excluded Paths: %s", app.env.nb_excluded_exec_paths) - app.env.nb_allowed_exec_suffixes = { - suffix - for suffix, parser_type in app.config["source_suffix"].items() - if parser_type in ("myst-nb",) - } - app.env.nb_contains_widgets = False - - -def set_up_execution_data(app: Sphinx): - if not hasattr(app.env, "nb_execution_data"): - app.env.nb_execution_data = {} - if not hasattr(app.env, "nb_execution_data_changed"): - app.env.nb_execution_data_changed = False - app.env.nb_execution_data_changed = False - - -def remove_execution_data(app: Sphinx, env, docname): - if docname in app.env.nb_execution_data: - app.env.nb_execution_data.pop(docname) - app.env.nb_execution_data_changed = True - - -def add_nb_custom_formats(app: Sphinx, config): - """Add custom conversion formats.""" - for suffix in config.nb_custom_formats: - app.add_source_suffix(suffix, "myst-nb") - - -def add_exclude_patterns(app: Sphinx, config): - """Add default exclude patterns (if not already present).""" - if "**.ipynb_checkpoints" not in config.exclude_patterns: - config.exclude_patterns.append("**.ipynb_checkpoints") - - -def update_togglebutton_classes(app: Sphinx, config): - to_add = [ - ".tag_hide_input div.cell_input", - ".tag_hide-input div.cell_input", - ".tag_hide_output div.cell_output", - ".tag_hide-output div.cell_output", - ".tag_hide_cell.cell", - ".tag_hide-cell.cell", - ] - for selector in to_add: - config.togglebutton_selector += f", {selector}" - - -def save_glue_cache(app: Sphinx, env): - NbGlueDomain.from_env(env).write_cache() - - -class JupyterDownloadRole(ReferenceRole): - def run(self): - reftarget = sphinx_abs_dir(self.env, self.target) - node = download_reference(self.rawtext, reftarget=reftarget) - self.set_source_info(node) - title = self.title if self.has_explicit_title else self.target - node += docnodes.literal( - self.rawtext, title, classes=["xref", "download", "myst-nb"] - ) - return [node], [] - - -class CodeCell(SphinxDirective): - """Raises a warning if it is triggered, it should not make it to the doctree.""" - - optional_arguments = 1 - final_argument_whitespace = True - has_content = True - - def run(self): - LOGGER.warning( - ( - "Found an unexpected `code-cell` directive. " - "Either this file was not converted to a notebook, " - "because Jupytext header content was missing, " - "or the `code-cell` was not converted, because it is nested. " - "See https://myst-nb.readthedocs.io/en/latest/use/markdown.html " - "for more information." - ), - location=(self.env.docname, self.lineno), - ) - return [] diff --git a/myst_nb/jsphinx.py b/myst_nb/jsphinx.py deleted file mode 100644 index 065e6103..00000000 --- a/myst_nb/jsphinx.py +++ /dev/null @@ -1,125 +0,0 @@ -"""Replacements for jupyter-sphinx""" -import os -import warnings -from pathlib import Path - -# TODO pin nbconvert version? -import nbconvert -import nbformat -from nbconvert.preprocessors import ExtractOutputPreprocessor -from nbconvert.writers import FilesWriter - -# from https://github.com/jupyter-widgets/ipywidgets v7.6.5 -_HTML_MANGER_URL = "https://cdn.jsdelivr.net/npm/@jupyter-widgets/html-manager@^0.20.0" -DEFAULT_EMBED_SCRIPT_URL = f"{_HTML_MANGER_URL}/dist/embed.js" -DEFAULT_EMBED_REQUIREJS_URL = f"{_HTML_MANGER_URL}/dist/embed-amd.js" -snippet_template = """ -{load} -<script type="application/vnd.jupyter.widget-state+json"> -{json_data} -</script> -{widget_views} -""" -widget_view_template = """<script type="application/vnd.jupyter.widget-view+json"> -{view_spec} -</script>""" - -# from jupyter-sphinx (0.3.2) -REQUIRE_URL_DEFAULT = ( - "https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js" -) - -WIDGET_STATE_MIMETYPE = "application/vnd.jupyter.widget-state+json" -WIDGET_VIEW_MIMETYPE = "application/vnd.jupyter.widget-view+json" - - -def sphinx_abs_dir(env, *paths): - # We write the output files into - # output_directory / jupyter_execute / path relative to source directory - # Sphinx expects download links relative to source file or relative to - # source dir and prepended with '/'. We use the latter option. - out_path = ( - output_directory(env) / Path(env.docname).parent / Path(*paths) - ).resolve() - - if os.name == "nt": - # Can't get relative path between drives on Windows - return out_path.as_posix() - - # Path().relative_to() doesn't work when not a direct subpath - return "/" + os.path.relpath(out_path, env.app.srcdir) - - -def output_directory(env): - # Put output images inside the sphinx build directory to avoid - # polluting the current working directory. We don't use a - # temporary directory, as sphinx may cache the doctree with - # references to the images that we write - - # Note: we are using an implicit fact that sphinx output directories are - # direct subfolders of the build directory. - # TODO change this? - return (Path(env.app.outdir) / os.path.pardir / "jupyter_execute").resolve() - - -def strip_latex_delimiters(source): - r"""Remove LaTeX math delimiters that would be rendered by the math block. - - These are: ``\(…\)``, ``\[…\]``, ``$…$``, and ``$$…$$``. - This is necessary because sphinx does not have a dedicated role for - generic LaTeX, while Jupyter only defines generic LaTeX output, see - https://github.com/jupyter/jupyter-sphinx/issues/90 for discussion. - """ - source = source.strip() - delimiter_pairs = (pair.split() for pair in r"\( \),\[ \],$$ $$,$ $".split(",")) - for start, end in delimiter_pairs: - if source.startswith(start) and source.endswith(end): - return source[len(start) : -len(end)] - - return source - - -def get_widgets(notebook): - try: - return notebook.metadata.widgets[WIDGET_STATE_MIMETYPE] - except AttributeError: - # Don't catch KeyError, as it's a bug if 'widgets' does - # not contain 'WIDGET_STATE_MIMETYPE' - return None - - -def contains_widgets(notebook): - widgets = get_widgets(notebook) - return widgets and widgets["state"] - - -def write_notebook_output(notebook, output_dir, notebook_name, location=None): - """Extract output from notebook cells and write to files in output_dir. - - This also modifies 'notebook' in-place, adding metadata to each cell that - maps output mime-types to the filenames the output was saved under. - """ - resources = dict(unique_key=os.path.join(output_dir, notebook_name), outputs={}) - - # Modifies 'resources' in-place - ExtractOutputPreprocessor().preprocess(notebook, resources) - # Write the cell outputs to files where we can (images and PDFs), - # as well as the notebook file. - FilesWriter(build_directory=output_dir).write( - nbformat.writes(notebook), - resources, - os.path.join(output_dir, notebook_name + ".ipynb"), - ) - - exporter = nbconvert.exporters.ScriptExporter( - # TODO:log=LoggerAdapterWrapper(js.logger) - ) - with warnings.catch_warnings(): - # See https://github.com/jupyter/nbconvert/issues/1388 - warnings.simplefilter("ignore", DeprecationWarning) - contents, resources = exporter.from_notebook_node(notebook) - - notebook_file = notebook_name + resources["output_extension"] - output_dir = Path(output_dir) - # utf-8 is the de-facto standard encoding for notebooks. - (output_dir / notebook_file).write_text(contents, encoding="utf8") diff --git a/myst_nb/lexers.py b/myst_nb/lexers.py index ea2c2bf3..83e691a5 100644 --- a/myst_nb/lexers.py +++ b/myst_nb/lexers.py @@ -2,11 +2,10 @@ """Pygments lexers""" import re -import pygments.lexer -import pygments.token - # this is not added as an entry point in ipython, so we add it in this package from IPython.lib.lexers import IPythonTracebackLexer # noqa: F401 +import pygments.lexer +import pygments.token _ansi_code_to_color = { 0: "Black", diff --git a/myst_nb/new/loggers.py b/myst_nb/loggers.py similarity index 100% rename from myst_nb/new/loggers.py rename to myst_nb/loggers.py diff --git a/myst_nb/nb_glue/domain.py b/myst_nb/nb_glue/domain.py index df2bec77..cd2e5a57 100644 --- a/myst_nb/nb_glue/domain.py +++ b/myst_nb/nb_glue/domain.py @@ -8,11 +8,13 @@ from sphinx.domains import Domain from sphinx.domains.math import MathDomain from sphinx.util import logging -from sphinx.util.docutils import SphinxDirective +from sphinx.util.docutils import SphinxDirective, SphinxRole from myst_nb.nb_glue import GLUE_PREFIX from myst_nb.nb_glue.utils import find_all_keys -from myst_nb.nodes import CellOutputBundleNode, CellOutputNode + +# from myst_nb.nodes import CellOutputBundleNode, CellOutputNode +from sphinx.ext.autodoc.directive import DummyOptionSpec SPHINX_LOGGER = logging.getLogger(__name__) @@ -248,6 +250,25 @@ def paste_text_role(name, rawtext, text, lineno, inliner, options=None, content= return [PasteTextNode(key, formatting=formatting, location=(path, lineno))], [] +class DummyDirective(SphinxDirective): + required_arguments = 1 + final_argument_whitespace = True + has_content = False + option_spec = DummyOptionSpec() + + def run(self): + return [] + + +class DummyDirective2(DummyDirective): + has_content = True + + +class DummyRole(SphinxRole): + def run(self): + return [nodes.inline(text=self.text)], [] + + class NbGlueDomain(Domain): """A sphinx domain for handling glue data""" @@ -260,9 +281,16 @@ class NbGlueDomain(Domain): # - docmap is the mapping of docnames to the set of keys it contains initial_data = {"cache": {}, "docmap": {}} - directives = {"": Paste, "any": Paste, "figure": PasteFigure, "math": PasteMath} - - roles = {"": paste_any_role, "any": paste_any_role, "text": paste_text_role} + # TODO placeholders for glue roles/directives which need re-working + # directives = {"": Paste, "any": Paste, "figure": PasteFigure, "math": PasteMath} + # roles = {"": paste_any_role, "any": paste_any_role, "text": paste_text_role} + directives = { + "": DummyDirective, + "any": DummyDirective, + "figure": DummyDirective2, + "math": DummyDirective, + } + roles = {"": DummyRole(), "any": DummyRole(), "text": DummyRole()} @property def cache(self) -> dict: @@ -329,7 +357,7 @@ def merge_domaindata(self, docnames: List[str], otherdata: Dict) -> None: inventory (coming from a subprocess in parallel builds). """ # TODO need to deal with key clashes - raise NotImplementedError( - "merge_domaindata must be implemented in %s " - "to be able to do parallel builds!" % self.__class__ - ) + # raise NotImplementedError( + # "merge_domaindata must be implemented in %s " + # "to be able to do parallel builds!" % self.__class__ + # ) diff --git a/myst_nb/new/__init__.py b/myst_nb/new/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/myst_nb/nodes.py b/myst_nb/nodes.py deleted file mode 100644 index 06e645bb..00000000 --- a/myst_nb/nodes.py +++ /dev/null @@ -1,106 +0,0 @@ -"""AST nodes to designate notebook components.""" -import json -from typing import Any, List - -from docutils import nodes -from nbformat import NotebookNode - -from myst_nb.jsphinx import snippet_template, widget_view_template - - -class CellNode(nodes.container): - """Represent a cell in the Sphinx AST.""" - - -class CellInputNode(nodes.container): - """Represent an input cell in the Sphinx AST.""" - - -class CellOutputNode(nodes.container): - """Represent an output cell in the Sphinx AST.""" - - -class CellOutputBundleNode(nodes.container): - """Represent a MimeBundle in the Sphinx AST, to be transformed later.""" - - def __init__(self, outputs, renderer: str, metadata=None, **attributes): - self._outputs = outputs - self._renderer = renderer - self._metadata = metadata or NotebookNode() - attributes["output_count"] = len(outputs) # for debugging with pformat - super().__init__("", **attributes) - - @property - def outputs(self) -> List[NotebookNode]: - """The outputs associated with this cell.""" - return self._outputs - - @property - def metadata(self) -> NotebookNode: - """The cell level metadata for this output.""" - return self._metadata - - @property - def renderer(self) -> str: - """The renderer for this output cell.""" - return self._renderer - - def copy(self): - obj = self.__class__( - outputs=self._outputs, - renderer=self._renderer, - metadata=self._metadata, - **self.attributes, - ) - obj.document = self.document - obj.source = self.source - obj.line = self.line - return obj - - -class JupyterWidgetStateNode(nodes.Element): - """Appended to doctree if any Jupyter cell produced a widget as output. - - Contains the state needed to render a collection of Jupyter widgets. - - Per doctree there is 1 JupyterWidgetStateNode per kernel that produced - Jupyter widgets when running. This is fine as (presently) the - 'html-manager' Javascript library, which embeds widgets, loads the state - from all script tags on the page of the correct mimetype. - """ - - def __init__( - self, rawsource: str = "", *children: nodes.Element, **attributes: Any - ): - if "state" not in attributes: - raise ValueError("No 'state' specified") - super().__init__(rawsource, *children, **attributes) - - def html(self): - """Set in extension setup for html rendering visits.""" - # TODO: render into a separate file if 'html-manager' starts fully - # parsing script tags, and not just grabbing their innerHTML - # https://github.com/jupyter-widgets/ipywidgets/blob/master/packages/html-manager/src/libembed.ts#L36 - return snippet_template.format( - load="", widget_views="", json_data=json.dumps(self["state"]) - ) - - -class JupyterWidgetViewNode(nodes.Element): - """Inserted into doctree whenever a Jupyter cell produces a widget as output. - - Contains a unique ID for this widget; enough information for the widget - embedding javascript to render it, given the widget state. For non-HTML - outputs this doctree node is rendered generically. - """ - - def __init__( - self, rawsource: str = "", *children: nodes.Element, **attributes: Any - ): - if "view_spec" not in attributes: - raise ValueError("No 'view_spec' specified") - super().__init__(rawsource, *children, **attributes) - - def html(self): - """Set in extension setup for html rendering visits.""" - return widget_view_template.format(view_spec=json.dumps(self["view_spec"])) diff --git a/myst_nb/new/parse.py b/myst_nb/parse.py similarity index 100% rename from myst_nb/new/parse.py rename to myst_nb/parse.py diff --git a/myst_nb/parser.py b/myst_nb/parser.py deleted file mode 100644 index ead64e93..00000000 --- a/myst_nb/parser.py +++ /dev/null @@ -1,314 +0,0 @@ -from pathlib import Path -from typing import Any, Dict, List, Tuple - -import nbformat as nbf -from docutils import nodes -from markdown_it import MarkdownIt -from markdown_it.rules_core import StateCore -from markdown_it.token import Token -from markdown_it.tree import SyntaxTreeNode -from myst_parser.main import MdParserConfig, create_md_parser -from myst_parser.sphinx_parser import MystParser -from myst_parser.sphinx_renderer import SphinxRenderer -from sphinx.environment import BuildEnvironment -from sphinx.util import logging - -from myst_nb.converter import get_nb_converter -from myst_nb.execution import generate_notebook_outputs -from myst_nb.jsphinx import contains_widgets, get_widgets, write_notebook_output -from myst_nb.nb_glue import GLUE_PREFIX -from myst_nb.nb_glue.domain import NbGlueDomain -from myst_nb.nodes import ( - CellInputNode, - CellNode, - CellOutputBundleNode, - CellOutputNode, - JupyterWidgetStateNode, -) - -SPHINX_LOGGER = logging.getLogger(__name__) - - -class NotebookParser(MystParser): - """Docutils parser for Markedly Structured Text (MyST) and Jupyter Notebooks.""" - - supported = ("myst-nb",) - translate_section_name = None - - config_section = "myst-nb parser" - config_section_dependencies = ("parsers",) - - def parse(self, inputstring: str, document: nodes.document) -> None: - - # document.settings.smart_quotes = False - - self.reporter = document.reporter - self.env = document.settings.env # type: BuildEnvironment - - converter = get_nb_converter( - self.env.doc2path(self.env.docname, True), - self.env, - inputstring.splitlines(keepends=True), - ) - - if converter is None: - # Read the notebook as a text-document - super().parse(inputstring, document=document) - return - - try: - ntbk = converter.func(inputstring) - except Exception as error: - SPHINX_LOGGER.error( - "MyST-NB: Conversion to notebook failed: %s", - error, - # exc_info=True, - location=(self.env.docname, 1), - ) - return - - # add outputs to notebook from the cache - if self.env.config["jupyter_execute_notebooks"] != "off": - ntbk = generate_notebook_outputs( - self.env, ntbk, show_traceback=self.env.config["execution_show_tb"] - ) - - # Parse the notebook content to a list of syntax tokens and an env - # containing global data like reference definitions - md_parser, env, tokens = nb_to_tokens( - ntbk, - ( - self.env.myst_config # type: ignore[attr-defined] - if converter is None - else converter.config - ), - self.env.config["nb_render_plugin"], - ) - - # Write the notebook's output to disk - path_doc = nb_output_to_disc(ntbk, document) - - # Update our glue key list with new ones defined in this page - glue_domain = NbGlueDomain.from_env(self.env) - glue_domain.add_notebook(ntbk, path_doc) - - # Render the Markdown tokens to docutils AST. - tokens_to_docutils(md_parser, env, tokens, document) - - -def nb_to_tokens( - ntbk: nbf.NotebookNode, config: MdParserConfig, renderer_plugin: str -) -> Tuple[MarkdownIt, Dict[str, Any], List[Token]]: - """Parse the notebook content to a list of syntax tokens and an env, - containing global data like reference definitions. - """ - # setup the markdown parser - md = create_md_parser(config, SphinxNBRenderer) - # Note we disable front matter parsing, - # because this is taken from the actual notebook metadata - md.disable("front_matter", ignoreInvalid=True) - # make a sandbox where all the parsing global data, - # like reference definitions will be stored - env: Dict[str, Any] = {} - rules = md.core.ruler.get_active_rules() - - # First only run pre-inline chains - # so we can collect all reference definitions, etc, before assessing references - def parse_block(src, start_line): - with md.reset_rules(): - # enable only rules up to block - md.core.ruler.enableOnly(rules[: rules.index("inline")]) - tokens = md.parse(src, env) - for token in tokens: - if token.map: - token.map = [start_line + token.map[0], start_line + token.map[1]] - for dup_ref in env.get("duplicate_refs", []): - if "fixed" not in dup_ref: - dup_ref["map"] = [ - start_line + dup_ref["map"][0], - start_line + dup_ref["map"][1], - ] - dup_ref["fixed"] = True - return tokens - - block_tokens = [] - source_map = ntbk.metadata.get("source_map", None) - - # get language lexer name - langinfo = ntbk.metadata.get("language_info", {}) - lexer = langinfo.get("pygments_lexer", langinfo.get("name", None)) - if lexer is None: - ntbk.metadata.get("kernelspec", {}).get("language", None) - # TODO log warning if lexer is still None - - for cell_index, nb_cell in enumerate(ntbk.cells): - - # if the the source_map has been stored (for text-based notebooks), - # we use that do define the starting line for each cell - # otherwise, we set a pseudo base that represents the cell index - start_line = source_map[cell_index] if source_map else (cell_index + 1) * 10000 - start_line += 1 # use base 1 rather than 0 - - # Skip empty cells - if len(nb_cell["source"].strip()) == 0: - continue - - # skip cells tagged for removal - # TODO this logic should be deferred to a transform - tags = nb_cell.metadata.get("tags", []) - if ("remove_cell" in tags) or ("remove-cell" in tags): - continue - - if nb_cell["cell_type"] == "markdown": - - # we add the cell index to tokens, - # so they can be included in the error logging, - block_tokens.extend(parse_block(nb_cell["source"], start_line)) - - elif nb_cell["cell_type"] == "code": - # here we do nothing but store the cell as a custom token - block_tokens.append( - Token( - "nb_code_cell", - "", - 0, - meta={"cell": nb_cell, "lexer": lexer, "renderer": renderer_plugin}, - map=[start_line, start_line], - ) - ) - - # Now all definitions have been gathered, - # we run inline and post-inline chains, to expand the text. - # Note we assume here that these rules never require the actual source text, - # only acting on the existing tokens - state = StateCore("", md, env, block_tokens) - with md.reset_rules(): - md.core.ruler.enableOnly(rules[rules.index("inline") :]) - md.core.process(state) - - # Add the front matter. - # Note that myst_parser serialises dict/list like keys, when rendering to - # docutils docinfo. These could be read back with `json.loads`. - state.tokens = [ - Token( - "front_matter", - "", - 0, - map=[0, 0], - content=({k: v for k, v in ntbk.metadata.items()}), # type: ignore[arg-type] - ) - ] + state.tokens - - # If there are widgets, this will embed the state of all widgets in a script - if contains_widgets(ntbk): - state.tokens.append( - Token( - "jupyter_widget_state", - "", - 0, - map=[0, 0], - meta={"state": get_widgets(ntbk)}, - ) - ) - - return md, env, state.tokens - - -def tokens_to_docutils( - md: MarkdownIt, env: Dict[str, Any], tokens: List[Token], document: nodes.document -) -> None: - """Render the Markdown tokens to docutils AST.""" - md.options["document"] = document - md.renderer.render(tokens, md.options, env) - - -class SphinxNBRenderer(SphinxRenderer): - """A markdown-it token renderer, - which includes special methods for notebook cells. - """ - - def render_jupyter_widget_state(self, token: SyntaxTreeNode) -> None: - if token.meta["state"]: - self.document.settings.env.nb_contains_widgets = True - node = JupyterWidgetStateNode(state=token.meta["state"]) - self.add_line_and_source_path(node, token) - self.document.append(node) - - def render_nb_code_cell(self, token: SyntaxTreeNode) -> None: - """Render a Jupyter notebook cell.""" - cell = token.meta["cell"] # type: nbf.NotebookNode - - # TODO logic involving tags should be deferred to a transform - tags = cell.metadata.get("tags", []) - - # Cell container will wrap whatever is in the cell - classes = ["cell"] - for tag in tags: - classes.append(f"tag_{tag}") - sphinx_cell = CellNode(classes=classes, cell_type=cell["cell_type"]) - self.current_node += sphinx_cell - if ("remove_input" not in tags) and ("remove-input" not in tags): - cell_input = CellInputNode(classes=["cell_input"]) - self.add_line_and_source_path(cell_input, token) - sphinx_cell += cell_input - - # Input block - code_block = nodes.literal_block(text=cell["source"]) - if token.meta.get("lexer", None) is not None: - code_block["language"] = token.meta["lexer"] - cell_input += code_block - - # ================== - # Cell output - # ================== - if ( - ("remove_output" not in tags) - and ("remove-output" not in tags) - and cell["outputs"] - ): - cell_output = CellOutputNode(classes=["cell_output"]) - sphinx_cell += cell_output - - outputs = CellOutputBundleNode( - cell["outputs"], token.meta["renderer"], cell.metadata - ) - self.add_line_and_source_path(outputs, token) - cell_output += outputs - - -def nb_output_to_disc(ntbk: nbf.NotebookNode, document: nodes.document) -> Path: - """Write the notebook's output to disk - - We remove all the mime prefixes from "glue" step. - This way, writing properly captures the glued images - """ - replace_mime = [] - for cell in ntbk.cells: - if hasattr(cell, "outputs"): - for out in cell.outputs: - if "data" in out: - # Only do the mimebundle replacing for the scrapbook outputs - mime_prefix = ( - out.get("metadata", {}).get("scrapbook", {}).get("mime_prefix") - ) - if mime_prefix: - out["data"] = { - key.replace(mime_prefix, ""): val - for key, val in out["data"].items() - } - replace_mime.append(out) - - # Write the notebook's output to disk. This changes metadata in notebook cells - path_doc = Path(document.settings.env.docname) - doc_relpath = path_doc.parent - doc_filename = path_doc.name - build_dir = Path(document.settings.env.app.outdir).parent - output_dir = build_dir.joinpath("jupyter_execute", doc_relpath) - write_notebook_output(ntbk, str(output_dir), doc_filename) - - # Now add back the mime prefixes to the right outputs so they aren't rendered - # until called from the role/directive - for out in replace_mime: - out["data"] = {f"{GLUE_PREFIX}{key}": val for key, val in out["data"].items()} - - return path_doc diff --git a/myst_nb/new/read.py b/myst_nb/read.py similarity index 99% rename from myst_nb/new/read.py rename to myst_nb/read.py index 5a3fd926..30b38869 100644 --- a/myst_nb/new/read.py +++ b/myst_nb/read.py @@ -1,18 +1,18 @@ """Module for reading notebook formats from a string input.""" -import json from functools import partial +import json from pathlib import Path from typing import Callable, Iterator, Optional, Union import attr -import nbformat as nbf -import yaml from docutils.parsers.rst import Directive from markdown_it.renderer import RendererHTML from myst_parser.main import MdParserConfig, create_md_parser +import nbformat as nbf +import yaml from myst_nb.configuration import NbParserConfig -from myst_nb.new.loggers import DocutilsDocLogger, SphinxDocLogger +from myst_nb.loggers import DocutilsDocLogger, SphinxDocLogger NOTEBOOK_VERSION = 4 """The notebook version that readers should return.""" diff --git a/myst_nb/new/render.py b/myst_nb/render.py similarity index 100% rename from myst_nb/new/render.py rename to myst_nb/render.py index ea949e8f..d0997d76 100644 --- a/myst_nb/new/render.py +++ b/myst_nb/render.py @@ -1,13 +1,13 @@ """Module for rendering notebook components to docutils nodes.""" +from binascii import a2b_base64 +from functools import lru_cache import hashlib import json import logging -import os -import re -from binascii import a2b_base64 -from functools import lru_cache from mimetypes import guess_extension +import os from pathlib import Path +import re from typing import TYPE_CHECKING, List, Union from docutils import nodes diff --git a/myst_nb/render_outputs.py b/myst_nb/render_outputs.py deleted file mode 100644 index 53aa71f6..00000000 --- a/myst_nb/render_outputs.py +++ /dev/null @@ -1,602 +0,0 @@ -"""A Sphinx post-transform, to convert notebook outpus to AST nodes.""" -import os -import re -from abc import ABC, abstractmethod -from typing import List, Optional -from unittest import mock - -import nbconvert -from docutils import nodes -from docutils.parsers.rst import directives -from importlib_metadata import entry_points -from myst_parser.docutils_renderer import make_document -from myst_parser.main import MdParserConfig, create_md_parser -from myst_parser.sphinx_renderer import SphinxRenderer -from nbformat import NotebookNode -from sphinx.environment import BuildEnvironment -from sphinx.environment.collectors.asset import ImageCollector -from sphinx.errors import SphinxError -from sphinx.transforms.post_transforms import SphinxPostTransform -from sphinx.util import logging - -from myst_nb.jsphinx import WIDGET_VIEW_MIMETYPE, sphinx_abs_dir, strip_latex_delimiters -from myst_nb.nodes import CellOutputBundleNode, JupyterWidgetViewNode - -LOGGER = logging.getLogger(__name__) - - -def get_default_render_priority(builder: str) -> Optional[List[str]]: - priority = { - builder: ( - WIDGET_VIEW_MIMETYPE, - "application/javascript", - "text/html", - "image/svg+xml", - "image/png", - "image/jpeg", - "text/markdown", - "text/latex", - "text/plain", - ) - for builder in ( - "html", - "readthedocs", - "singlehtml", - "dirhtml", - "linkcheck", - "readthedocsdirhtml", - "readthedocssinglehtml", - "readthedocssinglehtmllocalmedia", - "epub", - ) - } - # TODO: add support for "image/svg+xml" - priority["latex"] = ( - "application/pdf", - "image/png", - "image/jpeg", - "text/latex", - "text/markdown", - "text/plain", - ) - return priority.get(builder, None) - - -class MystNbEntryPointError(SphinxError): - category = "MyST NB Renderer Load" - - -def load_renderer(name: str) -> "CellOutputRendererBase": - """Load a renderer, - given a name within the ``myst_nb.mime_render`` entry point group - """ - all_eps = entry_points() - if hasattr(all_eps, "select"): - # importlib_metadata >= 3.6 or importlib.metadata in python >=3.10 - eps = all_eps.select(group="myst_nb.mime_render", name=name) - found = name in eps.names - else: - eps = {ep.name: ep for ep in all_eps.get("myst_nb.mime_render", [])} - found = name in eps - if found: - klass = eps[name].load() - if not issubclass(klass, CellOutputRendererBase): - raise MystNbEntryPointError( - f"Entry Point for myst_nb.mime_render:{name} " - f"is not a subclass of `CellOutputRendererBase`: {klass}" - ) - return klass - - raise MystNbEntryPointError(f"No Entry Point found for myst_nb.mime_render:{name}") - - -RGX_CARRIAGERETURN = re.compile(r".*\r(?=[^\n])") -RGX_BACKSPACE = re.compile(r"[^\n]\b") - - -def coalesce_streams(outputs: List[NotebookNode]) -> List[NotebookNode]: - """Merge all stream outputs with shared names into single streams. - - This ensure deterministic outputs. - - Adapted from: - https://github.com/computationalmodelling/nbval/blob/master/nbval/plugin.py. - """ - if not outputs: - return [] - - new_outputs = [] - streams = {} - for output in outputs: - if output["output_type"] == "stream": - if output["name"] in streams: - streams[output["name"]]["text"] += output["text"] - else: - new_outputs.append(output) - streams[output["name"]] = output - else: - new_outputs.append(output) - - # process \r and \b characters - for output in streams.values(): - old = output["text"] - while len(output["text"]) < len(old): - old = output["text"] - # Cancel out anything-but-newline followed by backspace - output["text"] = RGX_BACKSPACE.sub("", output["text"]) - # Replace all carriage returns not followed by newline - output["text"] = RGX_CARRIAGERETURN.sub("", output["text"]) - - # We also want to ensure stdout and stderr are always in the same consecutive order, - # because they are asynchronous, so order isn't guaranteed. - for i, output in enumerate(new_outputs): - if output["output_type"] == "stream" and output["name"] == "stderr": - if ( - len(new_outputs) >= i + 2 - and new_outputs[i + 1]["output_type"] == "stream" - and new_outputs[i + 1]["name"] == "stdout" - ): - stdout = new_outputs.pop(i + 1) - new_outputs.insert(i, stdout) - - return new_outputs - - -class CellOutputsToNodes(SphinxPostTransform): - """Use the builder context to transform a CellOutputNode into Sphinx nodes.""" - - # process very early, before CitationReferenceTransform (5), ReferencesResolver (10) - # https://www.sphinx-doc.org/en/master/extdev/appapi.html#sphinx.application.Sphinx.add_transform - default_priority = 4 - - def run(self): - abs_dir = sphinx_abs_dir(self.env) - renderers = {} # cache renderers - for node in self.document.traverse(CellOutputBundleNode): - try: - renderer_cls = renderers[node.renderer] - except KeyError: - renderer_cls = load_renderer(node.renderer) - renderers[node.renderer] = renderer_cls - renderer = renderer_cls(self.document, node, abs_dir) - if self.config.nb_merge_streams: - node._outputs = coalesce_streams(node.outputs) - output_nodes = renderer.cell_output_to_nodes(self.env.nb_render_priority) - node.replace_self(output_nodes) - - # Image collect extra nodes from cell outputs that we need to process - # this normally gets called as a `doctree-read` event - for node in self.document.traverse(nodes.image): - # If the image node has `candidates` then it's already been processed - # as in-line markdown, so skip it - if "candidates" in node: - continue - col = ImageCollector() - - # use the node docname, where possible, to deal with single document builds - docname = ( - self.app.env.path2doc(node.source) - if node.source - else self.app.env.docname - ) - with mock.patch.dict(self.app.env.temp_data, {"docname": docname}): - col.process_doc(self.app, node) - - -class CellOutputRendererBase(ABC): - """An abstract base class for rendering Notebook outputs to docutils nodes. - - Subclasses should implement the ``render`` method. - """ - - def __init__( - self, document: nodes.document, node: CellOutputBundleNode, sphinx_dir: str - ): - """ - :param sphinx_dir: Sphinx "absolute path" to the output folder, - so it is a relative path to the source folder prefixed with ``/``. - """ - self.document = document - self.env = document.settings.env # type: BuildEnvironment - self.node = node - self.sphinx_dir = sphinx_dir - - def cell_output_to_nodes(self, data_priority: List[str]) -> List[nodes.Node]: - """Convert a jupyter cell with outputs and filenames to doctree nodes. - - :param outputs: a list of outputs from a Jupyter cell - :param data_priority: media type by priority. - - :returns: list of docutils nodes - - """ - output_nodes = [] - for idx, output in enumerate(self.node.outputs): - output_type = output["output_type"] - if output_type == "stream": - if output["name"] == "stderr": - output_nodes.extend(self.render("stderr", output, idx)) - else: - output_nodes.extend(self.render("stdout", output, idx)) - elif output_type == "error": - output_nodes.extend(self.render("traceback", output, idx)) - - elif output_type in ("display_data", "execute_result"): - try: - # First mime_type by priority that occurs in output. - mime_type = next(x for x in data_priority if x in output["data"]) - except StopIteration: - # TODO this is incompatible with glue outputs - # perhaps have sphinx config to turn on/off this error reporting? - # and/or only warn if "scrapbook" not in output.metadata - # (then enable tests/test_render_outputs.py::test_unknown_mimetype) - # LOGGER.warning( - # "MyST-NB: output contains no MIME type in priority list: %s", - # list(output["data"].keys()), - # location=location, - # ) - continue - output_nodes.extend(self.render(mime_type, output, idx)) - - return output_nodes - - def add_source_and_line(self, *nodes: List[nodes.Node]): - """Add the source and line recursively to all nodes.""" - location = self.node.source, self.node.line - for node in nodes: - node.source, node.line = location - for child in node.traverse(): - child.source, child.line = location - - def make_warning(self, error_msg: str) -> nodes.system_message: - """Raise an exception or generate a warning if appropriate, - and return a system_message node""" - return self.document.reporter.warning( - "output render: {}".format(error_msg), - line=self.node.line, - ) - - def make_error(self, error_msg: str) -> nodes.system_message: - """Raise an exception or generate a warning if appropriate, - and return a system_message node""" - return self.document.reporter.error( - "output render: {}".format(error_msg), - line=self.node.line, - ) - - def make_severe(self, error_msg: str) -> nodes.system_message: - """Raise an exception or generate a warning if appropriate, - and return a system_message node""" - return self.document.reporter.severe( - "output render: {}".format(error_msg), - line=self.node.line, - ) - - def add_name(self, node: nodes.Node, name: str): - """Append name to node['names']. - - Also normalize the name string and register it as explicit target. - """ - name = nodes.fully_normalize_name(name) - if "name" in node: - del node["name"] - node["names"].append(name) - self.document.note_explicit_target(node, node) - return name - - def parse_markdown( - self, text: str, parent: Optional[nodes.Node] = None - ) -> List[nodes.Node]: - """Parse text as CommonMark, in a new document.""" - parser = create_md_parser(MdParserConfig(commonmark_only=True), SphinxRenderer) - - # setup parent node - if parent is None: - parent = nodes.container() - self.add_source_and_line(parent) - parser.options["current_node"] = parent - - # setup containing document - new_doc = make_document(self.node.source) - new_doc.settings = self.document.settings - new_doc.reporter = self.document.reporter - parser.options["document"] = new_doc - - # use the node docname, where possible, to deal with single document builds - with mock.patch.dict( - self.env.temp_data, {"docname": self.env.path2doc(self.node.source)} - ): - parser.render(text) - - # TODO is there any transforms we should retroactively carry out? - return parent.children - - @abstractmethod - def render( - self, mime_type: str, output: NotebookNode, index: int - ) -> List[nodes.Node]: - """Take a MIME bundle and MIME type, and return zero or more nodes.""" - pass - - -class CellOutputRenderer(CellOutputRendererBase): - def __init__( - self, document: nodes.document, node: CellOutputBundleNode, sphinx_dir: str - ): - """ - :param sphinx_dir: Sphinx "absolute path" to the output folder, - so it is a relative path to the source folder prefixed with ``/``. - """ - super().__init__(document, node, sphinx_dir) - self._render_map = { - "stderr": self.render_stderr, - "stdout": self.render_stdout, - "traceback": self.render_traceback, - "text/plain": self.render_text_plain, - "text/markdown": self.render_text_markdown, - "text/html": self.render_text_html, - "text/latex": self.render_text_latex, - "application/javascript": self.render_application_javascript, - WIDGET_VIEW_MIMETYPE: self.render_widget, - } - - def render( - self, mime_type: str, output: NotebookNode, index: int - ) -> List[nodes.Node]: - """Take a MIME bundle and MIME type, and return zero or more nodes.""" - if mime_type.startswith("image"): - nodes = self.create_render_image(mime_type)(output, index) - self.add_source_and_line(*nodes) - return nodes - if mime_type in self._render_map: - nodes = self._render_map[mime_type](output, index) - self.add_source_and_line(*nodes) - return nodes - - LOGGER.warning( - "MyST-NB: No renderer found for output MIME: %s", - mime_type, - location=(self.node.source, self.node.line), - ) - return [] - - def render_stderr(self, output: NotebookNode, index: int): - """Output a container with an unhighlighted literal block.""" - text = output["text"] - - if self.env.config.nb_output_stderr == "show": - pass - elif self.env.config.nb_output_stderr == "remove-warn": - self.make_warning(f"stderr was found in the cell outputs: {text}") - return [] - elif self.env.config.nb_output_stderr == "warn": - self.make_warning(f"stderr was found in the cell outputs: {text}") - elif self.env.config.nb_output_stderr == "error": - self.make_error(f"stderr was found in the cell outputs: {text}") - elif self.env.config.nb_output_stderr == "severe": - self.make_severe(f"stderr was found in the cell outputs: {text}") - - if ( - "remove-stderr" in self.node.metadata.get("tags", []) - or self.env.config.nb_output_stderr == "remove" - ): - return [] - - node = nodes.literal_block( - text=output["text"], - rawsource=output["text"], - language=self.env.config.nb_render_text_lexer, - classes=["output", "stderr"], - ) - return [node] - - def render_stdout(self, output: NotebookNode, index: int): - - if "remove-stdout" in self.node.metadata.get("tags", []): - return [] - - return [ - nodes.literal_block( - text=output["text"], - rawsource=output["text"], - language=self.env.config.nb_render_text_lexer, - classes=["output", "stream"], - ) - ] - - def render_traceback(self, output: NotebookNode, index: int): - traceback = "\n".join(output["traceback"]) - text = nbconvert.filters.strip_ansi(traceback) - return [ - nodes.literal_block( - text=text, - rawsource=text, - language="ipythontb", - classes=["output", "traceback"], - ) - ] - - def render_text_markdown(self, output: NotebookNode, index: int): - text = output["data"]["text/markdown"] - return self.parse_markdown(text) - - def render_text_html(self, output: NotebookNode, index: int): - text = output["data"]["text/html"] - return [nodes.raw(text=text, format="html", classes=["output", "text_html"])] - - def render_text_latex(self, output: NotebookNode, index: int): - text = output["data"]["text/latex"] - self.env.get_domain("math").data["has_equations"][self.env.docname] = True - return [ - nodes.math_block( - text=strip_latex_delimiters(text), - nowrap=False, - number=None, - classes=["output", "text_latex"], - ) - ] - - def render_text_plain(self, output: NotebookNode, index: int): - text = output["data"]["text/plain"] - return [ - nodes.literal_block( - text=text, - rawsource=text, - language=self.env.config.nb_render_text_lexer, - classes=["output", "text_plain"], - ) - ] - - def render_application_javascript(self, output: NotebookNode, index: int): - data = output["data"]["application/javascript"] - return [ - nodes.raw( - text='<script type="{mime_type}">{data}</script>'.format( - mime_type="application/javascript", data=data - ), - format="html", - ) - ] - - def render_widget(self, output: NotebookNode, index: int): - data = output["data"][WIDGET_VIEW_MIMETYPE] - return [JupyterWidgetViewNode(view_spec=data)] - - def create_render_image(self, mime_type: str): - def _render_image(output: NotebookNode, index: int): - # Sphinx treats absolute paths as being rooted at the source - # directory, so make a relative path, which Sphinx treats - # as being relative to the current working directory. - filename = os.path.basename(output.metadata["filenames"][mime_type]) - # checks if file dir path is inside a subdir of dir - filedir = os.path.dirname(output.metadata["filenames"][mime_type]) - outbasedir = os.path.abspath(self.sphinx_dir) - subpaths = filedir.split(outbasedir) - final_dir = self.sphinx_dir - if subpaths and len(subpaths) > 1: - subpath = subpaths[1] - final_dir += subpath - - uri = os.path.join(final_dir, filename) - # TODO I'm not quite sure why, but as soon as you give it a width, - # it becomes clickable?! (i.e. will open the image in the browser) - image_node = nodes.image(uri=uri) - - myst_meta_img = self.node.metadata.get( - self.env.config.nb_render_key, {} - ).get("image", {}) - - for key, spec in [ - ("classes", directives.class_option), - ("alt", directives.unchanged), - ("height", directives.length_or_unitless), - ("width", directives.length_or_percentage_or_unitless), - ("scale", directives.percentage), - ("align", align), - ]: - if key in myst_meta_img: - value = myst_meta_img[key] - try: - image_node[key] = spec(value) - except (ValueError, TypeError) as error: - error_msg = ( - "Invalid image attribute: " - "(key: '{}'; value: {})\n{}".format(key, value, error) - ) - return [self.make_error(error_msg)] - - myst_meta_fig = self.node.metadata.get( - self.env.config.nb_render_key, {} - ).get("figure", {}) - if "caption" not in myst_meta_fig: - return [image_node] - - figure_node = nodes.figure("", image_node) - caption = nodes.caption(myst_meta_fig["caption"], "") - figure_node += caption - # TODO only contents of one paragraph? (and second should be a legend) - self.parse_markdown(myst_meta_fig["caption"], caption) - if "name" in myst_meta_fig: - name = myst_meta_fig["name"] - self.add_source_and_line(figure_node) - self.add_name(figure_node, name) - # The target should have already been processed by now, with - # sphinx.transforms.references.SphinxDomains, which calls - # sphinx.domains.std.StandardDomain.process_doc, - # so we have to replicate that here - std = self.env.get_domain("std") - nametypes = self.document.nametypes.items() - self.document.nametypes = {name: True} - try: - std.process_doc(self.env, self.env.docname, self.document) - finally: - self.document.nametypes = nametypes - - return [figure_node] - - return _render_image - - -def align(argument): - return directives.choice(argument, ("left", "center", "right")) - - -class CellOutputRendererInline(CellOutputRenderer): - """Replaces literal/math blocks with non-block versions""" - - def render_stderr(self, output: NotebookNode, index: int): - """Output a container with an unhighlighted literal""" - return [ - nodes.literal( - text=output["text"], - rawsource="", # disables Pygment highlighting - language="none", - classes=["stderr"], - ) - ] - - def render_stdout(self, output: NotebookNode, index: int): - """Output a container with an unhighlighted literal""" - return [ - nodes.literal( - text=output["text"], - rawsource="", # disables Pygment highlighting - language="none", - classes=["output", "stream"], - ) - ] - - def render_traceback(self, output: NotebookNode, index: int): - traceback = "\n".join(output["traceback"]) - text = nbconvert.filters.strip_ansi(traceback) - return [ - nodes.literal( - text=text, - rawsource=text, - language="ipythontb", - classes=["output", "traceback"], - ) - ] - - def render_text_latex(self, output: NotebookNode, index: int): - data = output["data"]["text/latex"] - self.env.get_domain("math").data["has_equations"][self.env.docname] = True - return [ - nodes.math( - text=strip_latex_delimiters(data), - nowrap=False, - number=None, - classes=["output", "text_latex"], - ) - ] - - def render_text_plain(self, output: NotebookNode, index: int): - data = output["data"]["text/plain"] - return [ - nodes.literal( - text=data, - rawsource=data, - language="none", - classes=["output", "text_plain"], - ) - ] diff --git a/myst_nb/new/sphinx_.py b/myst_nb/sphinx_.py similarity index 97% rename from myst_nb/new/sphinx_.py rename to myst_nb/sphinx_.py index 3fdde3c6..f5fa1736 100644 --- a/myst_nb/new/sphinx_.py +++ b/myst_nb/sphinx_.py @@ -4,7 +4,6 @@ from pathlib import Path from typing import Any, Dict, List, Optional, Sequence -import nbformat from docutils import nodes from markdown_it.token import Token from markdown_it.tree import SyntaxTreeNode @@ -13,22 +12,24 @@ from myst_parser.main import MdParserConfig, create_md_parser from myst_parser.sphinx_parser import MystParser from myst_parser.sphinx_renderer import SphinxRenderer +import nbformat from nbformat import NotebookNode from sphinx.addnodes import download_reference from sphinx.application import Sphinx from sphinx.environment import BuildEnvironment from sphinx.transforms.post_transforms import SphinxPostTransform from sphinx.util import logging as sphinx_logging -from sphinx.util.docutils import ReferenceRole +from sphinx.util.docutils import ReferenceRole, SphinxDirective from myst_nb import __version__ from myst_nb.configuration import NbParserConfig -from myst_nb.new.execute import update_notebook -from myst_nb.new.execution_tables import setup_exec_table_extension -from myst_nb.new.loggers import DEFAULT_LOG_TYPE, SphinxDocLogger -from myst_nb.new.parse import notebook_to_tokens -from myst_nb.new.read import UnexpectedCellDirective, create_nb_reader -from myst_nb.new.render import ( +from myst_nb.execute import update_notebook +from myst_nb.execution_tables import setup_exec_table_extension +from myst_nb.loggers import DEFAULT_LOG_TYPE, SphinxDocLogger +from myst_nb.nb_glue.domain import NbGlueDomain +from myst_nb.parse import notebook_to_tokens +from myst_nb.read import UnexpectedCellDirective, create_nb_reader +from myst_nb.render import ( WIDGET_STATE_MIMETYPE, NbElementRenderer, coalesce_streams, @@ -102,6 +103,9 @@ def sphinx_setup(app: Sphinx): # setup extension for execution statistics tables setup_exec_table_extension(app) + # add glue domain + app.add_domain(NbGlueDomain) + return { "version": __version__, "parallel_read_safe": True, @@ -167,7 +171,7 @@ def add_exclude_patterns(app: Sphinx, config): def add_html_static_path(app: Sphinx): """Add static path for HTML resources.""" # TODO better to use importlib_resources here, or perhaps now there is another way? - static_path = Path(__file__).parent.absolute().with_name("_static") + static_path = Path(__file__).absolute().with_name("_static") app.config.html_static_path.append(str(static_path)) diff --git a/pyproject.toml b/pyproject.toml index 81a52e7e..624aaffa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,3 +5,4 @@ build-backend = "setuptools.build_meta" [tool.isort] profile = "black" src_paths = ["myst_nb", "tests"] +force_sort_within_sections = true diff --git a/setup.cfg b/setup.cfg index 5cd605b0..d0a5b49c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -63,11 +63,8 @@ console_scripts = mystnb-docutils-latex = myst_nb.docutils_:cli_latex mystnb-docutils-xml = myst_nb.docutils_:cli_xml mystnb-docutils-pseudoxml = myst_nb.docutils_:cli_pseudoxml -myst_nb.mime_render = - default = myst_nb.render_outputs:CellOutputRenderer - inline = myst_nb.render_outputs:CellOutputRendererInline myst_nb.renderers = - default = myst_nb.new.render:NbElementRenderer + default = myst_nb.render:NbElementRenderer pygments.lexers = myst-ansi = myst_nb.lexers:AnsiColorLexer ipythontb = myst_nb.lexers:IPythonTracebackLexer @@ -125,8 +122,14 @@ follow_imports = skip [mypy-docutils.*] ignore_missing_imports = True -[mypy-jupyter_sphinx.*] +[mypy-nbformat.*] ignore_missing_imports = True -[mypy-nbformat.*] +[mypy-jupyter_cache.*] +ignore_missing_imports = True + +[mypy-IPython.*] +ignore_missing_imports = True + +[mypy-pygments.*] ignore_missing_imports = True diff --git a/tests/conftest.py b/tests/conftest.py index 338ab68c..0f7c6da0 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,11 +1,8 @@ import json import os -import uuid from pathlib import Path +import uuid -import nbformat as nbf -import pytest -import sphinx from docutils.nodes import image as image_node from nbconvert.filters import strip_ansi from nbdime.diffing.notebooks import ( @@ -14,6 +11,9 @@ set_notebook_diff_targets, ) from nbdime.prettyprint import pretty_print_diff +import nbformat as nbf +import pytest +import sphinx from sphinx.util.console import nocolor pytest_plugins = "sphinx.testing.fixtures" diff --git a/tests/test_ansi_lexer.py b/tests/test_ansi_lexer.py index 838e984d..4cc78065 100644 --- a/tests/test_ansi_lexer.py +++ b/tests/test_ansi_lexer.py @@ -1,5 +1,5 @@ -import pytest from pygments.token import Text, Token +import pytest from myst_nb import lexers diff --git a/tests/test_execute.py b/tests/test_execute.py index 4a281b63..c0c50bd2 100644 --- a/tests/test_execute.py +++ b/tests/test_execute.py @@ -12,7 +12,7 @@ def regress_nb_doc(file_regression, sphinx_run, check_nbs): @pytest.mark.sphinx_params( - "basic_unrun.ipynb", conf={"jupyter_execute_notebooks": "auto"} + "basic_unrun.ipynb", conf={"nb_execution_mode": "auto"} ) def test_basic_unrun_auto(sphinx_run, file_regression, check_nbs): sphinx_run.build() @@ -31,7 +31,7 @@ def test_basic_unrun_auto(sphinx_run, file_regression, check_nbs): @pytest.mark.sphinx_params( - "basic_unrun.ipynb", conf={"jupyter_execute_notebooks": "cache"} + "basic_unrun.ipynb", conf={"nb_execution_mode": "cache"} ) def test_basic_unrun_cache(sphinx_run, file_regression, check_nbs): """The outputs should be populated.""" @@ -50,7 +50,7 @@ def test_basic_unrun_cache(sphinx_run, file_regression, check_nbs): @pytest.mark.sphinx_params( - "basic_unrun.ipynb", conf={"jupyter_execute_notebooks": "cache"} + "basic_unrun.ipynb", conf={"nb_execution_mode": "cache"} ) def test_rebuild_cache(sphinx_run): """The notebook should only be executed once.""" @@ -62,7 +62,7 @@ def test_rebuild_cache(sphinx_run): @pytest.mark.sphinx_params( - "basic_unrun.ipynb", conf={"jupyter_execute_notebooks": "force"} + "basic_unrun.ipynb", conf={"nb_execution_mode": "force"} ) def test_rebuild_force(sphinx_run): """The notebook should be executed twice.""" @@ -76,8 +76,8 @@ def test_rebuild_force(sphinx_run): @pytest.mark.sphinx_params( "basic_unrun.ipynb", conf={ - "jupyter_execute_notebooks": "cache", - "execution_excludepatterns": ["basic_*"], + "nb_execution_mode": "cache", + "nb_execution_excludepatterns": ["basic_*"], }, ) def test_exclude_path(sphinx_run, file_regression): @@ -91,7 +91,7 @@ def test_exclude_path(sphinx_run, file_regression): @pytest.mark.sphinx_params( - "basic_failing.ipynb", conf={"jupyter_execute_notebooks": "cache"} + "basic_failing.ipynb", conf={"nb_execution_mode": "cache"} ) def test_basic_failing_cache(sphinx_run, file_regression, check_nbs): sphinx_run.build() @@ -111,7 +111,7 @@ def test_basic_failing_cache(sphinx_run, file_regression, check_nbs): @pytest.mark.sphinx_params( - "basic_failing.ipynb", conf={"jupyter_execute_notebooks": "auto"} + "basic_failing.ipynb", conf={"nb_execution_mode": "auto"} ) def test_basic_failing_auto(sphinx_run, file_regression, check_nbs): sphinx_run.build() @@ -129,7 +129,7 @@ def test_basic_failing_auto(sphinx_run, file_regression, check_nbs): @pytest.mark.sphinx_params( "basic_failing.ipynb", - conf={"jupyter_execute_notebooks": "cache", "execution_allow_errors": True}, + conf={"nb_execution_mode": "cache", "nb_execution_allow_errors": True}, ) def test_allow_errors_cache(sphinx_run, file_regression, check_nbs): sphinx_run.build() @@ -140,7 +140,7 @@ def test_allow_errors_cache(sphinx_run, file_regression, check_nbs): @pytest.mark.sphinx_params( "basic_failing.ipynb", - conf={"jupyter_execute_notebooks": "auto", "execution_allow_errors": True}, + conf={"nb_execution_mode": "auto", "nb_execution_allow_errors": True}, ) def test_allow_errors_auto(sphinx_run, file_regression, check_nbs): sphinx_run.build() @@ -150,7 +150,7 @@ def test_allow_errors_auto(sphinx_run, file_regression, check_nbs): @pytest.mark.sphinx_params( - "basic_unrun.ipynb", conf={"jupyter_execute_notebooks": "force"} + "basic_unrun.ipynb", conf={"nb_execution_mode": "force"} ) def test_outputs_present(sphinx_run, file_regression, check_nbs): sphinx_run.build() @@ -161,7 +161,7 @@ def test_outputs_present(sphinx_run, file_regression, check_nbs): @pytest.mark.sphinx_params( - "complex_outputs_unrun.ipynb", conf={"jupyter_execute_notebooks": "cache"} + "complex_outputs_unrun.ipynb", conf={"nb_execution_mode": "cache"} ) def test_complex_outputs_unrun_cache(sphinx_run, file_regression, check_nbs): sphinx_run.build() @@ -176,7 +176,7 @@ def test_complex_outputs_unrun_cache(sphinx_run, file_regression, check_nbs): @pytest.mark.sphinx_params( - "complex_outputs_unrun.ipynb", conf={"jupyter_execute_notebooks": "auto"} + "complex_outputs_unrun.ipynb", conf={"nb_execution_mode": "auto"} ) def test_complex_outputs_unrun_auto(sphinx_run, file_regression, check_nbs): sphinx_run.build() @@ -191,7 +191,7 @@ def test_complex_outputs_unrun_auto(sphinx_run, file_regression, check_nbs): @pytest.mark.sphinx_params( - "basic_unrun.ipynb", conf={"jupyter_execute_notebooks": "off"} + "basic_unrun.ipynb", conf={"nb_execution_mode": "off"} ) def test_no_execute(sphinx_run, file_regression, check_nbs): sphinx_run.build() @@ -201,7 +201,7 @@ def test_no_execute(sphinx_run, file_regression, check_nbs): @pytest.mark.sphinx_params( - "basic_unrun.ipynb", conf={"jupyter_execute_notebooks": "cache"} + "basic_unrun.ipynb", conf={"nb_execution_mode": "cache"} ) def test_jupyter_cache_path(sphinx_run, file_regression, check_nbs): sphinx_run.build() @@ -212,7 +212,7 @@ def test_jupyter_cache_path(sphinx_run, file_regression, check_nbs): # Testing relative paths within the notebook @pytest.mark.sphinx_params( - "basic_relative.ipynb", conf={"jupyter_execute_notebooks": "cache"} + "basic_relative.ipynb", conf={"nb_execution_mode": "cache"} ) def test_relative_path_cache(sphinx_run, file_regression, check_nbs): sphinx_run.build() @@ -220,7 +220,7 @@ def test_relative_path_cache(sphinx_run, file_regression, check_nbs): @pytest.mark.sphinx_params( - "basic_relative.ipynb", conf={"jupyter_execute_notebooks": "force"} + "basic_relative.ipynb", conf={"nb_execution_mode": "force"} ) def test_relative_path_force(sphinx_run, file_regression, check_nbs): sphinx_run.build() @@ -230,7 +230,7 @@ def test_relative_path_force(sphinx_run, file_regression, check_nbs): # Execution timeout configuration @pytest.mark.sphinx_params( "sleep_10.ipynb", - conf={"jupyter_execute_notebooks": "cache", "execution_timeout": 1}, + conf={"nb_execution_mode": "cache", "nb_execution_timeout": 1}, ) def test_execution_timeout(sphinx_run, file_regression, check_nbs): """execution should fail given the low timeout value""" @@ -241,7 +241,7 @@ def test_execution_timeout(sphinx_run, file_regression, check_nbs): @pytest.mark.sphinx_params( "sleep_10_metadata_timeout.ipynb", - conf={"jupyter_execute_notebooks": "cache", "execution_timeout": 60}, + conf={"nb_execution_mode": "cache", "nb_execution_timeout": 60}, ) def test_execution_metadata_timeout(sphinx_run, file_regression, check_nbs): """notebook timeout metadata has higher preference then execution_timeout config""" @@ -251,7 +251,7 @@ def test_execution_metadata_timeout(sphinx_run, file_regression, check_nbs): @pytest.mark.sphinx_params( "nb_exec_table.md", - conf={"jupyter_execute_notebooks": "auto"}, + conf={"nb_execution_mode": "auto"}, ) def test_nb_exec_table(sphinx_run, file_regression, check_nbs): """Test that the table gets output into the HTML, @@ -268,7 +268,7 @@ def test_nb_exec_table(sphinx_run, file_regression, check_nbs): @pytest.mark.sphinx_params( "custom-formats.Rmd", conf={ - "jupyter_execute_notebooks": "auto", + "nb_execution_mode": "auto", "nb_custom_formats": {".Rmd": ["jupytext.reads", {"fmt": "Rmd"}]}, }, ) @@ -287,7 +287,7 @@ def test_custom_convert_auto(sphinx_run, file_regression, check_nbs): @pytest.mark.sphinx_params( "custom-formats.Rmd", conf={ - "jupyter_execute_notebooks": "cache", + "nb_execution_mode": "cache", "nb_custom_formats": {".Rmd": ["jupytext.reads", {"fmt": "Rmd"}]}, }, ) diff --git a/tests/test_execute/test_allow_errors_auto.xml b/tests/test_execute/test_allow_errors_auto.xml index 591a7d5b..d1f28253 100644 --- a/tests/test_execute/test_allow_errors_auto.xml +++ b/tests/test_execute/test_allow_errors_auto.xml @@ -4,9 +4,15 @@ a title <paragraph> some text - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="1" cell_metadata="{}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> raise Exception('oopsie!') - <CellOutputNode classes="cell_output"> - <CellOutputBundleNode output_count="1"> + <container classes="cell_output" nb_element="cell_code_output"> + <literal_block classes="output traceback" language="ipythontb" xml:space="preserve"> + --------------------------------------------------------------------------- + Exception Traceback (most recent call last) + <ipython-input-1-714b2b556897> in <module> + ----> 1 raise Exception('oopsie!') + + Exception: oopsie! diff --git a/tests/test_execute/test_allow_errors_cache.xml b/tests/test_execute/test_allow_errors_cache.xml index 591a7d5b..d1f28253 100644 --- a/tests/test_execute/test_allow_errors_cache.xml +++ b/tests/test_execute/test_allow_errors_cache.xml @@ -4,9 +4,15 @@ a title <paragraph> some text - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="1" cell_metadata="{}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> raise Exception('oopsie!') - <CellOutputNode classes="cell_output"> - <CellOutputBundleNode output_count="1"> + <container classes="cell_output" nb_element="cell_code_output"> + <literal_block classes="output traceback" language="ipythontb" xml:space="preserve"> + --------------------------------------------------------------------------- + Exception Traceback (most recent call last) + <ipython-input-1-714b2b556897> in <module> + ----> 1 raise Exception('oopsie!') + + Exception: oopsie! diff --git a/tests/test_execute/test_basic_unrun_auto.xml b/tests/test_execute/test_basic_unrun_auto.xml index 4459cd69..65d43c23 100644 --- a/tests/test_execute/test_basic_unrun_auto.xml +++ b/tests/test_execute/test_basic_unrun_auto.xml @@ -4,10 +4,11 @@ a title <paragraph> some text - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="1" cell_metadata="{}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> a=1 print(a) - <CellOutputNode classes="cell_output"> - <CellOutputBundleNode output_count="1"> + <container classes="cell_output" nb_element="cell_code_output"> + <literal_block classes="output stream" language="myst-ansi" xml:space="preserve"> + 1 diff --git a/tests/test_execute/test_basic_unrun_cache.xml b/tests/test_execute/test_basic_unrun_cache.xml index 4459cd69..65d43c23 100644 --- a/tests/test_execute/test_basic_unrun_cache.xml +++ b/tests/test_execute/test_basic_unrun_cache.xml @@ -4,10 +4,11 @@ a title <paragraph> some text - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="1" cell_metadata="{}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> a=1 print(a) - <CellOutputNode classes="cell_output"> - <CellOutputBundleNode output_count="1"> + <container classes="cell_output" nb_element="cell_code_output"> + <literal_block classes="output stream" language="myst-ansi" xml:space="preserve"> + 1 diff --git a/tests/test_execute/test_complex_outputs_unrun_auto.ipynb b/tests/test_execute/test_complex_outputs_unrun_auto.ipynb index 03800786..0da9ffde 100644 --- a/tests/test_execute/test_complex_outputs_unrun_auto.ipynb +++ b/tests/test_execute/test_complex_outputs_unrun_auto.ipynb @@ -428,11 +428,7 @@ ] }, "execution_count": 5, - "metadata": { - "filenames": { - "image/png": "/private/var/folders/_w/bsp9j6414gs4gdlnhhcnqm9c0000gn/T/pytest-of-matthewmckay/pytest-37/test_complex_outputs_unrun_aut0/source/_build/jupyter_execute/complex_outputs_unrun_22_0.png" - } - }, + "metadata": {}, "output_type": "execute_result" } ], @@ -560,7 +556,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.10" + "version": "3.7.12" }, "latex_envs": { "LaTeX_envs_menu_present": true, @@ -696,4 +692,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} +} \ No newline at end of file diff --git a/tests/test_execute/test_complex_outputs_unrun_auto.xml b/tests/test_execute/test_complex_outputs_unrun_auto.xml index b64ca34f..71f32c15 100644 --- a/tests/test_execute/test_complex_outputs_unrun_auto.xml +++ b/tests/test_execute/test_complex_outputs_unrun_auto.xml @@ -1,6 +1,6 @@ <document source="complex_outputs_unrun"> - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="0" cell_metadata="{'init_cell': True, 'slideshow': {'slide_type': 'skip'}}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> import matplotlib.pyplot as plt import pandas as pd @@ -87,15 +87,19 @@ <section classes="tex2jax_ignore mathjax_ignore" ids="text-output" names="text\ output"> <title> Text Output - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="11" cell_metadata="{'ipub': {'text': {'format': {'backgroundcolor': '\\color{blue!10}'}}}}" classes="cell" exec_count="2" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> print(""" This is some printed text, with a nicely formatted output. """) - <CellOutputNode classes="cell_output"> - <CellOutputBundleNode output_count="1"> + <container classes="cell_output" nb_element="cell_code_output"> + <literal_block classes="output stream" language="myst-ansi" xml:space="preserve"> + + This is some printed text, + with a nicely formatted output. + <section classes="tex2jax_ignore mathjax_ignore" ids="images-and-figures" names="images\ and\ figures"> <title> Images and Figures @@ -111,8 +115,8 @@ Tables (with pandas) <paragraph> The plotting code for a pandas Dataframe table (\cref{tbl:example}). - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="18" cell_metadata="{'ipub': {'code': {'asfloat': True, 'caption': '', 'label': 'code:example_pd', 'placement': 'H', 'widefigure': False}, 'table': {'alternate': 'gray!20', 'caption': 'An example of a table created with pandas dataframe.', 'label': 'tbl:example', 'placement': 'H'}}}" classes="cell" exec_count="3" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> np.random.seed(0) df = pd.DataFrame(np.random.rand(3,4),columns=['a','b','c','d']) @@ -120,46 +124,144 @@ df.b = ['l','m','n'] df.set_index(['a','b']) df.round(3) - <CellOutputNode classes="cell_output"> - <CellOutputBundleNode output_count="1"> + <container classes="cell_output" nb_element="cell_code_output"> + <container nb_element="mime_bundle"> + <container mime_type="text/plain"> + <literal_block classes="output text_plain" language="myst-ansi" xml:space="preserve"> + a b c d + 0 $\delta$ l 0.603 0.545 + 1 x m 0.438 0.892 + 2 y n 0.792 0.529 + <container mime_type="text/html"> + <raw classes="output text_html" format="html" xml:space="preserve"> + <div> + <style scoped> + .dataframe tbody tr th:only-of-type { + vertical-align: middle; + } + + .dataframe tbody tr th { + vertical-align: top; + } + + .dataframe thead th { + text-align: right; + } + </style> + <table border="1" class="dataframe"> + <thead> + <tr style="text-align: right;"> + <th></th> + <th>a</th> + <th>b</th> + <th>c</th> + <th>d</th> + </tr> + </thead> + <tbody> + <tr> + <th>0</th> + <td>$\delta$</td> + <td>l</td> + <td>0.603</td> + <td>0.545</td> + </tr> + <tr> + <th>1</th> + <td>x</td> + <td>m</td> + <td>0.438</td> + <td>0.892</td> + </tr> + <tr> + <th>2</th> + <td>y</td> + <td>n</td> + <td>0.792</td> + <td>0.529</td> + </tr> + </tbody> + </table> + </div> + <container mime_type="text/latex"> + <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> + \begin{tabular}{lllrr} + \toprule + {} & a & b & c & d \\ + \midrule + 0 & \$\textbackslash delta\$ & l & 0.603 & 0.545 \\ + 1 & x & m & 0.438 & 0.892 \\ + 2 & y & n & 0.792 & 0.529 \\ + \bottomrule + \end{tabular} <section classes="tex2jax_ignore mathjax_ignore" ids="equations-with-ipython-or-sympy" names="equations\ (with\ ipython\ or\ sympy)"> <title> Equations (with ipython or sympy) - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="20" cell_metadata="{'ipub': {'equation': {'label': 'eqn:example_ipy'}}}" classes="cell" exec_count="4" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> Latex('$$ a = b+c $$') - <CellOutputNode classes="cell_output"> - <CellOutputBundleNode output_count="1"> + <container classes="cell_output" nb_element="cell_code_output"> + <container nb_element="mime_bundle"> + <container mime_type="text/plain"> + <literal_block classes="output text_plain" language="myst-ansi" xml:space="preserve"> + <IPython.core.display.Latex object> + <container mime_type="text/latex"> + <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> + a = b+c <paragraph> The plotting code for a sympy equation (=@eqn:example_sympy). - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="22" cell_metadata="{'ipub': {'code': {'asfloat': True, 'caption': '', 'label': 'code:example_sym', 'placement': 'H', 'widefigure': False}, 'equation': {'environment': 'equation', 'label': 'eqn:example_sympy'}}}" classes="cell" exec_count="5" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> y = sym.Function('y') n = sym.symbols(r'\alpha') f = y(n)-2*y(n-1/sym.pi)-5*y(n-2) sym.rsolve(f,y(n),[1,4]) - <CellOutputNode classes="cell_output"> - <CellOutputBundleNode output_count="1"> + <container classes="cell_output" nb_element="cell_code_output"> + <container nb_element="mime_bundle"> + <container mime_type="text/plain"> + <literal_block classes="output text_plain" language="myst-ansi" xml:space="preserve"> + \alpha ⎛1 2⋅√5⋅ⅈ⎞ \alpha ⎛1 2⋅√5⋅ⅈ⎞ + (√5⋅ⅈ) ⋅⎜─ - ──────⎟ + (-√5⋅ⅈ) ⋅⎜─ + ──────⎟ + ⎝2 5 ⎠ ⎝2 5 ⎠ + <container mime_type="image/png"> + <image candidates="{'*': '_build/jupyter_execute/e2dfbe330154316cfb6f3186e8f57fc4df8aee03b0303ed1345fc22cd51f66de.png'}" uri="_build/jupyter_execute/e2dfbe330154316cfb6f3186e8f57fc4df8aee03b0303ed1345fc22cd51f66de.png"> + <container mime_type="text/latex"> + <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> + \displaystyle \left(\sqrt{5} i\right)^{\alpha} \left(\frac{1}{2} - \frac{2 \sqrt{5} i}{5}\right) + \left(- \sqrt{5} i\right)^{\alpha} \left(\frac{1}{2} + \frac{2 \sqrt{5} i}{5}\right) <section classes="tex2jax_ignore mathjax_ignore" ids="interactive-outputs" names="interactive\ outputs"> <title> Interactive outputs <section ids="ipywidgets" names="ipywidgets"> <title> ipywidgets - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="24" cell_metadata="{}" classes="cell" exec_count="6" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> import ipywidgets as widgets widgets.Layout(model_id="1337h4x0R") - <CellOutputNode classes="cell_output"> - <CellOutputBundleNode output_count="1"> - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container classes="cell_output" nb_element="cell_code_output"> + <container nb_element="mime_bundle"> + <container mime_type="text/plain"> + <literal_block classes="output text_plain" language="myst-ansi" xml:space="preserve"> + Layout() + <container mime_type="application/vnd.jupyter.widget-view+json"> + <raw format="html" xml:space="preserve"> + <script type="application/vnd.jupyter.widget-view+json">{"version_major": 2, "version_minor": 0, "model_id": "1337h4x0R"}</script> + <container cell_index="25" cell_metadata="{}" classes="cell" exec_count="7" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> from IPython.display import display, Markdown display(Markdown('**_some_ markdown**')) - <CellOutputNode classes="cell_output"> - <CellOutputBundleNode output_count="1"> - <JupyterWidgetStateNode state="{'state': {'1337h4x0R': {'model_name': 'LayoutModel', 'model_module': '@jupyter-widgets/base', 'model_module_version': '1.2.0', 'state': {'_model_module': '@jupyter-widgets/base', '_model_module_version': '1.2.0', '_model_name': 'LayoutModel', '_view_count': None, '_view_module': '@jupyter-widgets/base', '_view_module_version': '1.2.0', '_view_name': 'LayoutView', 'align_content': None, 'align_items': None, 'align_self': None, 'border': None, 'bottom': None, 'display': None, 'flex': None, 'flex_flow': None, 'grid_area': None, 'grid_auto_columns': None, 'grid_auto_flow': None, 'grid_auto_rows': None, 'grid_column': None, 'grid_gap': None, 'grid_row': None, 'grid_template_areas': None, 'grid_template_columns': None, 'grid_template_rows': None, 'height': None, 'justify_content': None, 'justify_items': None, 'left': None, 'margin': None, 'max_height': None, 'max_width': None, 'min_height': None, 'min_width': None, 'object_fit': None, 'object_position': None, 'order': None, 'overflow': None, 'overflow_x': None, 'overflow_y': None, 'padding': None, 'right': None, 'top': None, 'visibility': None, 'width': None}}}, 'version_major': 2, 'version_minor': 0}"> + <container classes="cell_output" nb_element="cell_code_output"> + <container nb_element="mime_bundle"> + <container mime_type="text/plain"> + <literal_block classes="output text_plain" language="myst-ansi" xml:space="preserve"> + <IPython.core.display.Markdown object> + <container mime_type="text/markdown"> + <paragraph> + <strong> + <emphasis> + some + markdown diff --git a/tests/test_execute/test_complex_outputs_unrun_cache.ipynb b/tests/test_execute/test_complex_outputs_unrun_cache.ipynb index 441446a3..0da9ffde 100644 --- a/tests/test_execute/test_complex_outputs_unrun_cache.ipynb +++ b/tests/test_execute/test_complex_outputs_unrun_cache.ipynb @@ -428,11 +428,7 @@ ] }, "execution_count": 5, - "metadata": { - "filenames": { - "image/png": "/private/var/folders/_w/bsp9j6414gs4gdlnhhcnqm9c0000gn/T/pytest-of-matthewmckay/pytest-37/test_complex_outputs_unrun_cac0/source/_build/jupyter_execute/complex_outputs_unrun_22_0.png" - } - }, + "metadata": {}, "output_type": "execute_result" } ], @@ -560,7 +556,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.10" + "version": "3.7.12" }, "latex_envs": { "LaTeX_envs_menu_present": true, @@ -696,4 +692,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} +} \ No newline at end of file diff --git a/tests/test_execute/test_complex_outputs_unrun_cache.xml b/tests/test_execute/test_complex_outputs_unrun_cache.xml index 58c571dd..71f32c15 100644 --- a/tests/test_execute/test_complex_outputs_unrun_cache.xml +++ b/tests/test_execute/test_complex_outputs_unrun_cache.xml @@ -1,6 +1,6 @@ <document source="complex_outputs_unrun"> - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="0" cell_metadata="{'init_cell': True, 'slideshow': {'slide_type': 'skip'}}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> import matplotlib.pyplot as plt import pandas as pd @@ -87,15 +87,19 @@ <section classes="tex2jax_ignore mathjax_ignore" ids="text-output" names="text\ output"> <title> Text Output - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="11" cell_metadata="{'ipub': {'text': {'format': {'backgroundcolor': '\\color{blue!10}'}}}}" classes="cell" exec_count="2" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> print(""" This is some printed text, with a nicely formatted output. """) - <CellOutputNode classes="cell_output"> - <CellOutputBundleNode output_count="1"> + <container classes="cell_output" nb_element="cell_code_output"> + <literal_block classes="output stream" language="myst-ansi" xml:space="preserve"> + + This is some printed text, + with a nicely formatted output. + <section classes="tex2jax_ignore mathjax_ignore" ids="images-and-figures" names="images\ and\ figures"> <title> Images and Figures @@ -111,8 +115,8 @@ Tables (with pandas) <paragraph> The plotting code for a pandas Dataframe table (\cref{tbl:example}). - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="18" cell_metadata="{'ipub': {'code': {'asfloat': True, 'caption': '', 'label': 'code:example_pd', 'placement': 'H', 'widefigure': False}, 'table': {'alternate': 'gray!20', 'caption': 'An example of a table created with pandas dataframe.', 'label': 'tbl:example', 'placement': 'H'}}}" classes="cell" exec_count="3" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> np.random.seed(0) df = pd.DataFrame(np.random.rand(3,4),columns=['a','b','c','d']) @@ -120,46 +124,144 @@ df.b = ['l','m','n'] df.set_index(['a','b']) df.round(3) - <CellOutputNode classes="cell_output"> - <CellOutputBundleNode output_count="1"> + <container classes="cell_output" nb_element="cell_code_output"> + <container nb_element="mime_bundle"> + <container mime_type="text/plain"> + <literal_block classes="output text_plain" language="myst-ansi" xml:space="preserve"> + a b c d + 0 $\delta$ l 0.603 0.545 + 1 x m 0.438 0.892 + 2 y n 0.792 0.529 + <container mime_type="text/html"> + <raw classes="output text_html" format="html" xml:space="preserve"> + <div> + <style scoped> + .dataframe tbody tr th:only-of-type { + vertical-align: middle; + } + + .dataframe tbody tr th { + vertical-align: top; + } + + .dataframe thead th { + text-align: right; + } + </style> + <table border="1" class="dataframe"> + <thead> + <tr style="text-align: right;"> + <th></th> + <th>a</th> + <th>b</th> + <th>c</th> + <th>d</th> + </tr> + </thead> + <tbody> + <tr> + <th>0</th> + <td>$\delta$</td> + <td>l</td> + <td>0.603</td> + <td>0.545</td> + </tr> + <tr> + <th>1</th> + <td>x</td> + <td>m</td> + <td>0.438</td> + <td>0.892</td> + </tr> + <tr> + <th>2</th> + <td>y</td> + <td>n</td> + <td>0.792</td> + <td>0.529</td> + </tr> + </tbody> + </table> + </div> + <container mime_type="text/latex"> + <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> + \begin{tabular}{lllrr} + \toprule + {} & a & b & c & d \\ + \midrule + 0 & \$\textbackslash delta\$ & l & 0.603 & 0.545 \\ + 1 & x & m & 0.438 & 0.892 \\ + 2 & y & n & 0.792 & 0.529 \\ + \bottomrule + \end{tabular} <section classes="tex2jax_ignore mathjax_ignore" ids="equations-with-ipython-or-sympy" names="equations\ (with\ ipython\ or\ sympy)"> <title> Equations (with ipython or sympy) - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="20" cell_metadata="{'ipub': {'equation': {'label': 'eqn:example_ipy'}}}" classes="cell" exec_count="4" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> Latex('$$ a = b+c $$') - <CellOutputNode classes="cell_output"> - <CellOutputBundleNode output_count="1"> + <container classes="cell_output" nb_element="cell_code_output"> + <container nb_element="mime_bundle"> + <container mime_type="text/plain"> + <literal_block classes="output text_plain" language="myst-ansi" xml:space="preserve"> + <IPython.core.display.Latex object> + <container mime_type="text/latex"> + <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> + a = b+c <paragraph> The plotting code for a sympy equation (=@eqn:example_sympy). - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="22" cell_metadata="{'ipub': {'code': {'asfloat': True, 'caption': '', 'label': 'code:example_sym', 'placement': 'H', 'widefigure': False}, 'equation': {'environment': 'equation', 'label': 'eqn:example_sympy'}}}" classes="cell" exec_count="5" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> y = sym.Function('y') n = sym.symbols(r'\alpha') f = y(n)-2*y(n-1/sym.pi)-5*y(n-2) sym.rsolve(f,y(n),[1,4]) - <CellOutputNode classes="cell_output"> - <CellOutputBundleNode output_count="1"> + <container classes="cell_output" nb_element="cell_code_output"> + <container nb_element="mime_bundle"> + <container mime_type="text/plain"> + <literal_block classes="output text_plain" language="myst-ansi" xml:space="preserve"> + \alpha ⎛1 2⋅√5⋅ⅈ⎞ \alpha ⎛1 2⋅√5⋅ⅈ⎞ + (√5⋅ⅈ) ⋅⎜─ - ──────⎟ + (-√5⋅ⅈ) ⋅⎜─ + ──────⎟ + ⎝2 5 ⎠ ⎝2 5 ⎠ + <container mime_type="image/png"> + <image candidates="{'*': '_build/jupyter_execute/e2dfbe330154316cfb6f3186e8f57fc4df8aee03b0303ed1345fc22cd51f66de.png'}" uri="_build/jupyter_execute/e2dfbe330154316cfb6f3186e8f57fc4df8aee03b0303ed1345fc22cd51f66de.png"> + <container mime_type="text/latex"> + <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> + \displaystyle \left(\sqrt{5} i\right)^{\alpha} \left(\frac{1}{2} - \frac{2 \sqrt{5} i}{5}\right) + \left(- \sqrt{5} i\right)^{\alpha} \left(\frac{1}{2} + \frac{2 \sqrt{5} i}{5}\right) <section classes="tex2jax_ignore mathjax_ignore" ids="interactive-outputs" names="interactive\ outputs"> <title> Interactive outputs <section ids="ipywidgets" names="ipywidgets"> <title> ipywidgets - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="24" cell_metadata="{}" classes="cell" exec_count="6" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> import ipywidgets as widgets widgets.Layout(model_id="1337h4x0R") - <CellOutputNode classes="cell_output"> - <CellOutputBundleNode output_count="1"> - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container classes="cell_output" nb_element="cell_code_output"> + <container nb_element="mime_bundle"> + <container mime_type="text/plain"> + <literal_block classes="output text_plain" language="myst-ansi" xml:space="preserve"> + Layout() + <container mime_type="application/vnd.jupyter.widget-view+json"> + <raw format="html" xml:space="preserve"> + <script type="application/vnd.jupyter.widget-view+json">{"version_major": 2, "version_minor": 0, "model_id": "1337h4x0R"}</script> + <container cell_index="25" cell_metadata="{}" classes="cell" exec_count="7" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> from IPython.display import display, Markdown display(Markdown('**_some_ markdown**')) - <CellOutputNode classes="cell_output"> - <CellOutputBundleNode output_count="1"> - <JupyterWidgetStateNode state="{'state': {'1337h4x0R': {'model_module': '@jupyter-widgets/base', 'model_module_version': '1.2.0', 'model_name': 'LayoutModel', 'state': {'_model_module': '@jupyter-widgets/base', '_model_module_version': '1.2.0', '_model_name': 'LayoutModel', '_view_count': None, '_view_module': '@jupyter-widgets/base', '_view_module_version': '1.2.0', '_view_name': 'LayoutView', 'align_content': None, 'align_items': None, 'align_self': None, 'border': None, 'bottom': None, 'display': None, 'flex': None, 'flex_flow': None, 'grid_area': None, 'grid_auto_columns': None, 'grid_auto_flow': None, 'grid_auto_rows': None, 'grid_column': None, 'grid_gap': None, 'grid_row': None, 'grid_template_areas': None, 'grid_template_columns': None, 'grid_template_rows': None, 'height': None, 'justify_content': None, 'justify_items': None, 'left': None, 'margin': None, 'max_height': None, 'max_width': None, 'min_height': None, 'min_width': None, 'object_fit': None, 'object_position': None, 'order': None, 'overflow': None, 'overflow_x': None, 'overflow_y': None, 'padding': None, 'right': None, 'top': None, 'visibility': None, 'width': None}}}, 'version_major': 2, 'version_minor': 0}"> + <container classes="cell_output" nb_element="cell_code_output"> + <container nb_element="mime_bundle"> + <container mime_type="text/plain"> + <literal_block classes="output text_plain" language="myst-ansi" xml:space="preserve"> + <IPython.core.display.Markdown object> + <container mime_type="text/markdown"> + <paragraph> + <strong> + <emphasis> + some + markdown diff --git a/tests/test_execute/test_custom_convert_auto.ipynb b/tests/test_execute/test_custom_convert_auto.ipynb index d1e24ff2..10731afb 100644 --- a/tests/test_execute/test_custom_convert_auto.ipynb +++ b/tests/test_execute/test_custom_convert_auto.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "raw", - "id": "96daa1fa", + "id": "14a2f009", "metadata": {}, "source": [ "---\n", @@ -14,7 +14,7 @@ }, { "cell_type": "markdown", - "id": "213fcb7e", + "id": "3f47ed7c", "metadata": {}, "source": [ "# Custom Formats" @@ -23,7 +23,7 @@ { "cell_type": "code", "execution_count": 1, - "id": "e4b22e8e", + "id": "b9b921ab", "metadata": { "echo": true }, @@ -36,7 +36,7 @@ { "cell_type": "code", "execution_count": 2, - "id": "7c1ad157", + "id": "a581f2bf", "metadata": { "fig.height": 5, "fig.width": 8, @@ -64,9 +64,6 @@ ] }, "metadata": { - "filenames": { - "image/png": "/private/var/folders/_w/bsp9j6414gs4gdlnhhcnqm9c0000gn/T/pytest-of-matthewmckay/pytest-37/test_custom_convert_auto0/source/_build/jupyter_execute/custom-formats_3_1.png" - }, "needs_background": "light" }, "output_type": "display_data" @@ -99,9 +96,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.10" + "version": "3.7.12" } }, "nbformat": 4, "nbformat_minor": 5 -} +} \ No newline at end of file diff --git a/tests/test_execute/test_custom_convert_auto.xml b/tests/test_execute/test_custom_convert_auto.xml index 2997d57c..bbfaf2bd 100644 --- a/tests/test_execute/test_custom_convert_auto.xml +++ b/tests/test_execute/test_custom_convert_auto.xml @@ -2,11 +2,20 @@ <section classes="tex2jax_ignore mathjax_ignore" ids="custom-formats" names="custom\ formats"> <title> Custom Formats - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="2" cell_metadata="{'echo': True}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> import pandas as pd x = pd.Series({'A':1, 'B':3, 'C':2}) - <CellNode cell_type="code" classes="cell tag_remove_input"> - <CellOutputNode classes="cell_output"> - <CellOutputBundleNode output_count="2"> + <container cell_index="3" cell_metadata="{'name': 'bar_plot', 'tags': ['remove_input'], 'fig.height': 5, 'fig.width': 8}" classes="cell tag_remove_input" exec_count="2" nb_element="cell_code"> + <container classes="cell_output" nb_element="cell_code_output"> + <container nb_element="mime_bundle"> + <container mime_type="text/plain"> + <literal_block classes="output text_plain" language="myst-ansi" xml:space="preserve"> + <AxesSubplot:title={'center':'Sample plot'}> + <container nb_element="mime_bundle"> + <container mime_type="text/plain"> + <literal_block classes="output text_plain" language="myst-ansi" xml:space="preserve"> + <Figure size 432x288 with 1 Axes> + <container mime_type="image/png"> + <image candidates="{'*': '_build/jupyter_execute/cc1d31550c7aaad5128f57d4f4cae576a29174f6cd515e37c0b911f6010659f3.png'}" uri="_build/jupyter_execute/cc1d31550c7aaad5128f57d4f4cae576a29174f6cd515e37c0b911f6010659f3.png"> diff --git a/tests/test_execute/test_custom_convert_cache.ipynb b/tests/test_execute/test_custom_convert_cache.ipynb index 021b0f8d..c5e881cd 100644 --- a/tests/test_execute/test_custom_convert_cache.ipynb +++ b/tests/test_execute/test_custom_convert_cache.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "raw", - "id": "80d3358a", + "id": "a593bc69", "metadata": {}, "source": [ "---\n", @@ -14,7 +14,7 @@ }, { "cell_type": "markdown", - "id": "f6e4ccb1", + "id": "63b55a6a", "metadata": {}, "source": [ "# Custom Formats" @@ -23,7 +23,7 @@ { "cell_type": "code", "execution_count": 1, - "id": "52d195f5", + "id": "447e44cb", "metadata": { "echo": true }, @@ -36,7 +36,7 @@ { "cell_type": "code", "execution_count": 2, - "id": "3c8afd0b", + "id": "884b420a", "metadata": { "fig.height": 5, "fig.width": 8, @@ -64,9 +64,6 @@ ] }, "metadata": { - "filenames": { - "image/png": "/private/var/folders/_w/bsp9j6414gs4gdlnhhcnqm9c0000gn/T/pytest-of-matthewmckay/pytest-37/test_custom_convert_cache0/source/_build/jupyter_execute/custom-formats_3_1.png" - }, "needs_background": "light" }, "output_type": "display_data" @@ -99,9 +96,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.10" + "version": "3.7.12" } }, "nbformat": 4, "nbformat_minor": 5 -} +} \ No newline at end of file diff --git a/tests/test_execute/test_custom_convert_cache.xml b/tests/test_execute/test_custom_convert_cache.xml index 2997d57c..bbfaf2bd 100644 --- a/tests/test_execute/test_custom_convert_cache.xml +++ b/tests/test_execute/test_custom_convert_cache.xml @@ -2,11 +2,20 @@ <section classes="tex2jax_ignore mathjax_ignore" ids="custom-formats" names="custom\ formats"> <title> Custom Formats - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="2" cell_metadata="{'echo': True}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> import pandas as pd x = pd.Series({'A':1, 'B':3, 'C':2}) - <CellNode cell_type="code" classes="cell tag_remove_input"> - <CellOutputNode classes="cell_output"> - <CellOutputBundleNode output_count="2"> + <container cell_index="3" cell_metadata="{'name': 'bar_plot', 'tags': ['remove_input'], 'fig.height': 5, 'fig.width': 8}" classes="cell tag_remove_input" exec_count="2" nb_element="cell_code"> + <container classes="cell_output" nb_element="cell_code_output"> + <container nb_element="mime_bundle"> + <container mime_type="text/plain"> + <literal_block classes="output text_plain" language="myst-ansi" xml:space="preserve"> + <AxesSubplot:title={'center':'Sample plot'}> + <container nb_element="mime_bundle"> + <container mime_type="text/plain"> + <literal_block classes="output text_plain" language="myst-ansi" xml:space="preserve"> + <Figure size 432x288 with 1 Axes> + <container mime_type="image/png"> + <image candidates="{'*': '_build/jupyter_execute/cc1d31550c7aaad5128f57d4f4cae576a29174f6cd515e37c0b911f6010659f3.png'}" uri="_build/jupyter_execute/cc1d31550c7aaad5128f57d4f4cae576a29174f6cd515e37c0b911f6010659f3.png"> diff --git a/tests/test_execute/test_no_execute.xml b/tests/test_execute/test_no_execute.xml index 0fe2eaff..f333eb9c 100644 --- a/tests/test_execute/test_no_execute.xml +++ b/tests/test_execute/test_no_execute.xml @@ -4,8 +4,8 @@ a title <paragraph> some text - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="1" cell_metadata="{}" classes="cell" exec_count="True" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> a=1 print(a) diff --git a/tests/test_execute/test_outputs_present.xml b/tests/test_execute/test_outputs_present.xml index 4459cd69..65d43c23 100644 --- a/tests/test_execute/test_outputs_present.xml +++ b/tests/test_execute/test_outputs_present.xml @@ -4,10 +4,11 @@ a title <paragraph> some text - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="1" cell_metadata="{}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> a=1 print(a) - <CellOutputNode classes="cell_output"> - <CellOutputBundleNode output_count="1"> + <container classes="cell_output" nb_element="cell_code_output"> + <literal_block classes="output stream" language="myst-ansi" xml:space="preserve"> + 1 diff --git a/tests/test_glue.py b/tests/test_glue.py index ffd7fbf5..6191fe36 100644 --- a/tests/test_glue.py +++ b/tests/test_glue.py @@ -1,11 +1,10 @@ -import pytest from IPython.core.displaypub import DisplayPublisher from IPython.core.interactiveshell import InteractiveShell +import pytest from myst_nb.nb_glue import glue, utils from myst_nb.nb_glue.domain import NbGlueDomain from myst_nb.nb_glue.transform import PasteNodesToDocutils -from myst_nb.render_outputs import CellOutputsToNodes class MockDisplayPublisher(DisplayPublisher): @@ -28,6 +27,7 @@ def mock_ipython(): def test_check_priority(): """Assert that the default transform priority is less than CellOutputsToNodes""" + from myst_nb.render_outputs import CellOutputsToNodes assert PasteNodesToDocutils.default_priority < CellOutputsToNodes.default_priority @@ -104,7 +104,7 @@ def test_find_all_keys(get_test_path): } -@pytest.mark.sphinx_params("with_glue.ipynb", conf={"jupyter_execute_notebooks": "off"}) +@pytest.mark.sphinx_params("with_glue.ipynb", conf={"nb_execution_mode": "off"}) def test_parser(sphinx_run, clean_doctree, file_regression): sphinx_run.build() # print(sphinx_run.status()) diff --git a/tests/test_mystnb_features.py b/tests/test_mystnb_features.py index 74156b05..bc047564 100644 --- a/tests/test_mystnb_features.py +++ b/tests/test_mystnb_features.py @@ -4,7 +4,7 @@ @pytest.mark.sphinx_params( "mystnb_codecell_file.md", - conf={"jupyter_execute_notebooks": "cache", "source_suffix": {".md": "myst-nb"}}, + conf={"nb_execution_mode": "cache", "source_suffix": {".md": "myst-nb"}}, ) def test_codecell_file(sphinx_run, file_regression, check_nbs, get_test_path): asset_path = get_test_path("mystnb_codecell_file.py") @@ -34,7 +34,7 @@ def test_codecell_file(sphinx_run, file_regression, check_nbs, get_test_path): @pytest.mark.sphinx_params( "mystnb_codecell_file_warnings.md", - conf={"jupyter_execute_notebooks": "force", "source_suffix": {".md": "myst-nb"}}, + conf={"nb_execution_mode": "force", "source_suffix": {".md": "myst-nb"}}, ) def test_codecell_file_warnings(sphinx_run, file_regression, check_nbs, get_test_path): asset_path = get_test_path("mystnb_codecell_file.py") diff --git a/tests/test_nb_render.py b/tests/test_nb_render.py index 5dbadd52..838b8593 100644 --- a/tests/test_nb_render.py +++ b/tests/test_nb_render.py @@ -1,14 +1,12 @@ from pathlib import Path -import nbformat -import pytest -import yaml from markdown_it.utils import read_fixture_file from myst_parser.docutils_renderer import make_document from myst_parser.main import MdParserConfig from myst_parser.sphinx_renderer import mock_sphinx_env - -from myst_nb.parser import nb_to_tokens, tokens_to_docutils +import nbformat +import pytest +import yaml FIXTURE_PATH = Path(__file__).parent.joinpath("nb_fixtures") @@ -17,6 +15,7 @@ "line,title,input,expected", read_fixture_file(FIXTURE_PATH.joinpath("basic.txt")) ) def test_render(line, title, input, expected): + from myst_nb.parser import nb_to_tokens, tokens_to_docutils dct = yaml.safe_load(input) dct.setdefault("metadata", {}) ntbk = nbformat.from_dict(dct) @@ -35,6 +34,7 @@ def test_render(line, title, input, expected): read_fixture_file(FIXTURE_PATH.joinpath("reporter_warnings.txt")), ) def test_reporting(line, title, input, expected): + from myst_nb.parser import nb_to_tokens, tokens_to_docutils dct = yaml.safe_load(input) dct.setdefault("metadata", {}) ntbk = nbformat.from_dict(dct) diff --git a/tests/test_parser.py b/tests/test_parser.py index ac64638b..e42526f5 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -1,7 +1,7 @@ import pytest -@pytest.mark.sphinx_params("basic_run.ipynb", conf={"jupyter_execute_notebooks": "off"}) +@pytest.mark.sphinx_params("basic_run.ipynb", conf={"nb_execution_mode": "off"}) def test_basic_run(sphinx_run, file_regression): sphinx_run.build() # print(sphinx_run.status()) @@ -28,7 +28,7 @@ def test_basic_run(sphinx_run, file_regression): @pytest.mark.sphinx_params( - "complex_outputs.ipynb", conf={"jupyter_execute_notebooks": "off"} + "complex_outputs.ipynb", conf={"nb_execution_mode": "off"} ) def test_complex_outputs(sphinx_run, file_regression): sphinx_run.build() @@ -76,7 +76,7 @@ def test_complex_outputs(sphinx_run, file_regression): @pytest.mark.sphinx_params( "latex_build/index.ipynb", "latex_build/other.ipynb", - conf={"jupyter_execute_notebooks": "off"}, + conf={"nb_execution_mode": "off"}, buildername="latex", # working_dir="/Users/cjs14/GitHub/MyST-NB-actual/outputs" ) diff --git a/tests/test_parser/test_toctree_in_ipynb.xml b/tests/test_parser/test_toctree_in_ipynb.xml index 07e93fc8..c5503577 100644 --- a/tests/test_parser/test_toctree_in_ipynb.xml +++ b/tests/test_parser/test_toctree_in_ipynb.xml @@ -13,9 +13,10 @@ Title <paragraph> Content - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="1" cell_metadata="{}" classes="cell" exec_count="3" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> print(1) - <CellOutputNode classes="cell_output"> - <CellOutputBundleNode output_count="1"> + <container classes="cell_output" nb_element="cell_code_output"> + <literal_block classes="output stream" language="myst-ansi" xml:space="preserve"> + 1 diff --git a/tests/test_render_outputs.py b/tests/test_render_outputs.py index 3ecda981..1019dafd 100644 --- a/tests/test_render_outputs.py +++ b/tests/test_render_outputs.py @@ -1,23 +1,23 @@ from unittest.mock import patch -import pytest from importlib_metadata import EntryPoint - -from myst_nb.render_outputs import MystNbEntryPointError, load_renderer +import pytest def test_load_renderer_not_found(): + from myst_nb.render_outputs import MystNbEntryPointError, load_renderer with pytest.raises(MystNbEntryPointError, match="No Entry Point found"): load_renderer("other") @patch.object(EntryPoint, "load", lambda self: EntryPoint) def test_load_renderer_not_subclass(): + from myst_nb.render_outputs import MystNbEntryPointError, load_renderer with pytest.raises(MystNbEntryPointError, match="Entry Point .* not a subclass"): load_renderer("default") -@pytest.mark.sphinx_params("basic_run.ipynb", conf={"jupyter_execute_notebooks": "off"}) +@pytest.mark.sphinx_params("basic_run.ipynb", conf={"nb_execution_mode": "off"}) def test_basic_run(sphinx_run, file_regression): sphinx_run.build() assert sphinx_run.warnings() == "" @@ -26,7 +26,7 @@ def test_basic_run(sphinx_run, file_regression): @pytest.mark.sphinx_params( - "complex_outputs.ipynb", conf={"jupyter_execute_notebooks": "off"} + "complex_outputs.ipynb", conf={"nb_execution_mode": "off"} ) def test_complex_outputs(sphinx_run, clean_doctree, file_regression): sphinx_run.build() @@ -39,7 +39,7 @@ def test_complex_outputs(sphinx_run, clean_doctree, file_regression): @pytest.mark.sphinx_params( "complex_outputs.ipynb", - conf={"jupyter_execute_notebooks": "off"}, + conf={"nb_execution_mode": "off"}, buildername="latex", ) def test_complex_outputs_latex(sphinx_run, clean_doctree, file_regression): @@ -52,7 +52,7 @@ def test_complex_outputs_latex(sphinx_run, clean_doctree, file_regression): @pytest.mark.sphinx_params( - "basic_stderr.ipynb", conf={"jupyter_execute_notebooks": "off"} + "basic_stderr.ipynb", conf={"nb_execution_mode": "off"} ) def test_stderr_tag(sphinx_run, file_regression): sphinx_run.build() @@ -63,7 +63,7 @@ def test_stderr_tag(sphinx_run, file_regression): @pytest.mark.sphinx_params( "basic_stderr.ipynb", - conf={"jupyter_execute_notebooks": "off", "nb_output_stderr": "remove"}, + conf={"nb_execution_mode": "off", "nb_output_stderr": "remove"}, ) def test_stderr_remove(sphinx_run, file_regression): sphinx_run.build() @@ -74,7 +74,7 @@ def test_stderr_remove(sphinx_run, file_regression): @pytest.mark.sphinx_params( "merge_streams.ipynb", - conf={"jupyter_execute_notebooks": "off", "nb_merge_streams": True}, + conf={"nb_execution_mode": "off", "nb_merge_streams": True}, ) def test_merge_streams(sphinx_run, file_regression): sphinx_run.build() @@ -85,7 +85,7 @@ def test_merge_streams(sphinx_run, file_regression): @pytest.mark.sphinx_params( "metadata_image.ipynb", - conf={"jupyter_execute_notebooks": "off", "nb_render_key": "myst"}, + conf={"nb_execution_mode": "off", "nb_render_key": "myst"}, ) def test_metadata_image(sphinx_run, clean_doctree, file_regression): sphinx_run.build() @@ -96,8 +96,9 @@ def test_metadata_image(sphinx_run, clean_doctree, file_regression): ) +# TODO re-enable test # @pytest.mark.sphinx_params( -# "unknown_mimetype.ipynb", conf={"jupyter_execute_notebooks": "off"} +# "unknown_mimetype.ipynb", conf={"nb_execution_mode": "off"} # ) # def test_unknown_mimetype(sphinx_run, file_regression): # sphinx_run.build() diff --git a/tests/test_render_outputs/test_basic_run.xml b/tests/test_render_outputs/test_basic_run.xml index 8651e4d8..2146dbad 100644 --- a/tests/test_render_outputs/test_basic_run.xml +++ b/tests/test_render_outputs/test_basic_run.xml @@ -4,11 +4,11 @@ a title <paragraph> some text - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="1" cell_metadata="{}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> a=1 print(a) - <CellOutputNode classes="cell_output"> + <container classes="cell_output" nb_element="cell_code_output"> <literal_block classes="output stream" language="myst-ansi" linenos="False" xml:space="preserve"> 1 diff --git a/tests/test_render_outputs/test_complex_outputs.xml b/tests/test_render_outputs/test_complex_outputs.xml index a64174b2..16c177bd 100644 --- a/tests/test_render_outputs/test_complex_outputs.xml +++ b/tests/test_render_outputs/test_complex_outputs.xml @@ -1,6 +1,6 @@ <document source="complex_outputs"> - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="0" cell_metadata="{'init_cell': True, 'slideshow': {'slide_type': 'skip'}}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> import matplotlib.pyplot as plt import pandas as pd @@ -87,14 +87,14 @@ <section classes="tex2jax_ignore mathjax_ignore" ids="text-output" names="text\ output"> <title> Text Output - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="11" cell_metadata="{'ipub': {'text': {'format': {'backgroundcolor': '\\color{blue!10}'}}}}" classes="cell" exec_count="2" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> print(""" This is some printed text, with a nicely formatted output. """) - <CellOutputNode classes="cell_output"> + <container classes="cell_output" nb_element="cell_code_output"> <literal_block classes="output stream" language="myst-ansi" linenos="False" xml:space="preserve"> This is some printed text, @@ -103,12 +103,13 @@ <section classes="tex2jax_ignore mathjax_ignore" ids="images-and-figures" names="images\ and\ figures"> <title> Images and Figures - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="13" cell_metadata="{'ipub': {'figure': {'caption': 'A nice picture.', 'label': 'fig:example', 'placement': '!bh'}}}" classes="cell" exec_count="3" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> Image('example.jpg',height=400) - <CellOutputNode classes="cell_output"> - <image candidates="{'*': '_build/jupyter_execute/complex_outputs_13_0.jpg'}" uri="_build/jupyter_execute/complex_outputs_13_0.jpg"> + <container classes="cell_output" nb_element="cell_code_output"> + <container mime_type="image/jpeg"> + <image candidates="{'*': '_build/jupyter_execute/a4c9580c74dacf6f3316a3bd2e2a347933aa4463834dcf1bb8f20b4fcb476ae1.jpg'}" uri="_build/jupyter_execute/a4c9580c74dacf6f3316a3bd2e2a347933aa4463834dcf1bb8f20b4fcb476ae1.jpg"> <section ids="displaying-a-plot-with-its-code" names="displaying\ a\ plot\ with\ its\ code"> <title> Displaying a plot with its code @@ -116,108 +117,113 @@ A matplotlib figure, with the caption set in the markdowncell above the figure. <paragraph> The plotting code for a matplotlib figure (\cref{fig:example_mpl}). - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="17" cell_metadata="{'ipub': {'code': {'asfloat': True, 'caption': 'a', 'label': 'code:example_mpl', 'widefigure': False}, 'figure': {'caption': '', 'label': 'fig:example_mpl', 'widefigure': False}}}" classes="cell" exec_count="4" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> plt.scatter(np.random.rand(10), np.random.rand(10), label='data label') plt.ylabel(r'a y label with latex $\alpha$') plt.legend(); - <CellOutputNode classes="cell_output"> - <image candidates="{'*': '_build/jupyter_execute/complex_outputs_17_0.png'}" uri="_build/jupyter_execute/complex_outputs_17_0.png"> + <container classes="cell_output" nb_element="cell_code_output"> + <container mime_type="image/png"> + <image candidates="{'*': '_build/jupyter_execute/16832f45917c1c9862c50f0948f64a498402d6ccde1f3a291da17f240797b160.png'}" uri="_build/jupyter_execute/16832f45917c1c9862c50f0948f64a498402d6ccde1f3a291da17f240797b160.png"> <section classes="tex2jax_ignore mathjax_ignore" ids="tables-with-pandas" names="tables\ (with\ pandas)"> <title> Tables (with pandas) <paragraph> The plotting code for a pandas Dataframe table (\cref{tbl:example}). - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="20" cell_metadata="{'ipub': {'code': {'asfloat': True, 'caption': '', 'label': 'code:example_pd', 'placement': 'H', 'widefigure': False}, 'table': {'alternate': 'gray!20', 'caption': 'An example of a table created with pandas dataframe.', 'label': 'tbl:example', 'placement': 'H'}}}" classes="cell" exec_count="5" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> df = pd.DataFrame(np.random.rand(3,4),columns=['a','b','c','d']) df.a = ['$\delta$','x','y'] df.b = ['l','m','n'] df.set_index(['a','b']) df.round(3) - <CellOutputNode classes="cell_output"> - <raw classes="output text_html" format="html" xml:space="preserve"> - <div> - <style scoped> - .dataframe tbody tr th:only-of-type { - vertical-align: middle; - } - - .dataframe tbody tr th { - vertical-align: top; - } - - .dataframe thead th { - text-align: right; - } - </style> - <table border="1" class="dataframe"> - <thead> - <tr style="text-align: right;"> - <th></th> - <th>a</th> - <th>b</th> - <th>c</th> - <th>d</th> - </tr> - </thead> - <tbody> - <tr> - <th>0</th> - <td>$\delta$</td> - <td>l</td> - <td>0.391</td> - <td>0.607</td> - </tr> - <tr> - <th>1</th> - <td>x</td> - <td>m</td> - <td>0.132</td> - <td>0.205</td> - </tr> - <tr> - <th>2</th> - <td>y</td> - <td>n</td> - <td>0.969</td> - <td>0.726</td> - </tr> - </tbody> - </table> - </div> + <container classes="cell_output" nb_element="cell_code_output"> + <container mime_type="text/html"> + <raw classes="output text_html" format="html" xml:space="preserve"> + <div> + <style scoped> + .dataframe tbody tr th:only-of-type { + vertical-align: middle; + } + + .dataframe tbody tr th { + vertical-align: top; + } + + .dataframe thead th { + text-align: right; + } + </style> + <table border="1" class="dataframe"> + <thead> + <tr style="text-align: right;"> + <th></th> + <th>a</th> + <th>b</th> + <th>c</th> + <th>d</th> + </tr> + </thead> + <tbody> + <tr> + <th>0</th> + <td>$\delta$</td> + <td>l</td> + <td>0.391</td> + <td>0.607</td> + </tr> + <tr> + <th>1</th> + <td>x</td> + <td>m</td> + <td>0.132</td> + <td>0.205</td> + </tr> + <tr> + <th>2</th> + <td>y</td> + <td>n</td> + <td>0.969</td> + <td>0.726</td> + </tr> + </tbody> + </table> + </div> <section classes="tex2jax_ignore mathjax_ignore" ids="equations-with-ipython-or-sympy" names="equations\ (with\ ipython\ or\ sympy)"> <title> Equations (with ipython or sympy) - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="22" cell_metadata="{'ipub': {'equation': {'label': 'eqn:example_ipy'}}}" classes="cell" exec_count="6" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> Latex('$$ a = b+c $$') - <CellOutputNode classes="cell_output"> - <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> - a = b+c + <container classes="cell_output" nb_element="cell_code_output"> + <container mime_type="text/latex"> + <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> + a = b+c <paragraph> The plotting code for a sympy equation (=@eqn:example_sympy). - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="24" cell_metadata="{'ipub': {'code': {'asfloat': True, 'caption': '', 'label': 'code:example_sym', 'placement': 'H', 'widefigure': False}, 'equation': {'environment': 'equation', 'label': 'eqn:example_sympy'}}}" classes="cell" exec_count="7" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> y = sym.Function('y') n = sym.symbols(r'\alpha') f = y(n)-2*y(n-1/sym.pi)-5*y(n-2) sym.rsolve(f,y(n),[1,4]) - <CellOutputNode classes="cell_output"> - <image candidates="{'*': '_build/jupyter_execute/complex_outputs_24_0.png'}" uri="_build/jupyter_execute/complex_outputs_24_0.png"> - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container classes="cell_output" nb_element="cell_code_output"> + <container mime_type="image/png"> + <image candidates="{'*': '_build/jupyter_execute/8c43e5c8cccf697754876b7fec1b0a9b731d7900bb585e775a5fa326b4de8c5a.png'}" uri="_build/jupyter_execute/8c43e5c8cccf697754876b7fec1b0a9b731d7900bb585e775a5fa326b4de8c5a.png"> + <container cell_index="25" cell_metadata="{}" classes="cell" exec_count="7" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> from IPython.display import display, Markdown display(Markdown('**_some_ markdown**')) - <CellOutputNode classes="cell_output"> - <paragraph> - <strong> - <emphasis> - some - markdown + <container classes="cell_output" nb_element="cell_code_output"> + <container mime_type="text/markdown"> + <paragraph> + <strong> + <emphasis> + some + markdown diff --git a/tests/test_render_outputs/test_complex_outputs_latex.xml b/tests/test_render_outputs/test_complex_outputs_latex.xml index 0be1d57d..28d2175a 100644 --- a/tests/test_render_outputs/test_complex_outputs_latex.xml +++ b/tests/test_render_outputs/test_complex_outputs_latex.xml @@ -1,6 +1,6 @@ <document source="complex_outputs"> - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="0" cell_metadata="{'init_cell': True, 'slideshow': {'slide_type': 'skip'}}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> import matplotlib.pyplot as plt import pandas as pd @@ -87,14 +87,14 @@ <section classes="tex2jax_ignore mathjax_ignore" ids="text-output" names="text\ output"> <title> Text Output - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="11" cell_metadata="{'ipub': {'text': {'format': {'backgroundcolor': '\\color{blue!10}'}}}}" classes="cell" exec_count="2" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> print(""" This is some printed text, with a nicely formatted output. """) - <CellOutputNode classes="cell_output"> + <container classes="cell_output" nb_element="cell_code_output"> <literal_block classes="output stream" language="myst-ansi" linenos="False" xml:space="preserve"> This is some printed text, @@ -103,12 +103,13 @@ <section classes="tex2jax_ignore mathjax_ignore" ids="images-and-figures" names="images\ and\ figures"> <title> Images and Figures - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="13" cell_metadata="{'ipub': {'figure': {'caption': 'A nice picture.', 'label': 'fig:example', 'placement': '!bh'}}}" classes="cell" exec_count="3" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> Image('example.jpg',height=400) - <CellOutputNode classes="cell_output"> - <image candidates="{'*': '_build/jupyter_execute/complex_outputs_13_0.jpg'}" uri="_build/jupyter_execute/complex_outputs_13_0.jpg"> + <container classes="cell_output" nb_element="cell_code_output"> + <container mime_type="image/jpeg"> + <image candidates="{'*': '_build/jupyter_execute/a4c9580c74dacf6f3316a3bd2e2a347933aa4463834dcf1bb8f20b4fcb476ae1.jpg'}" uri="_build/jupyter_execute/a4c9580c74dacf6f3316a3bd2e2a347933aa4463834dcf1bb8f20b4fcb476ae1.jpg"> <section ids="displaying-a-plot-with-its-code" names="displaying\ a\ plot\ with\ its\ code"> <title> Displaying a plot with its code @@ -116,68 +117,73 @@ A matplotlib figure, with the caption set in the markdowncell above the figure. <paragraph> The plotting code for a matplotlib figure (\cref{fig:example_mpl}). - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="17" cell_metadata="{'ipub': {'code': {'asfloat': True, 'caption': 'a', 'label': 'code:example_mpl', 'widefigure': False}, 'figure': {'caption': '', 'label': 'fig:example_mpl', 'widefigure': False}}}" classes="cell" exec_count="4" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> plt.scatter(np.random.rand(10), np.random.rand(10), label='data label') plt.ylabel(r'a y label with latex $\alpha$') plt.legend(); - <CellOutputNode classes="cell_output"> - <image candidates="{'*': '_build/jupyter_execute/complex_outputs_17_0.png'}" uri="_build/jupyter_execute/complex_outputs_17_0.png"> + <container classes="cell_output" nb_element="cell_code_output"> + <container mime_type="image/png"> + <image candidates="{'*': '_build/jupyter_execute/16832f45917c1c9862c50f0948f64a498402d6ccde1f3a291da17f240797b160.png'}" uri="_build/jupyter_execute/16832f45917c1c9862c50f0948f64a498402d6ccde1f3a291da17f240797b160.png"> <section classes="tex2jax_ignore mathjax_ignore" ids="tables-with-pandas" names="tables\ (with\ pandas)"> <title> Tables (with pandas) <paragraph> The plotting code for a pandas Dataframe table (\cref{tbl:example}). - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="20" cell_metadata="{'ipub': {'code': {'asfloat': True, 'caption': '', 'label': 'code:example_pd', 'placement': 'H', 'widefigure': False}, 'table': {'alternate': 'gray!20', 'caption': 'An example of a table created with pandas dataframe.', 'label': 'tbl:example', 'placement': 'H'}}}" classes="cell" exec_count="5" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> df = pd.DataFrame(np.random.rand(3,4),columns=['a','b','c','d']) df.a = ['$\delta$','x','y'] df.b = ['l','m','n'] df.set_index(['a','b']) df.round(3) - <CellOutputNode classes="cell_output"> - <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> - \begin{tabular}{lllrr} - \toprule - {} & a & b & c & d \\ - \midrule - 0 & \$\textbackslash delta\$ & l & 0.391 & 0.607 \\ - 1 & x & m & 0.132 & 0.205 \\ - 2 & y & n & 0.969 & 0.726 \\ - \bottomrule - \end{tabular} + <container classes="cell_output" nb_element="cell_code_output"> + <container mime_type="text/latex"> + <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> + \begin{tabular}{lllrr} + \toprule + {} & a & b & c & d \\ + \midrule + 0 & \$\textbackslash delta\$ & l & 0.391 & 0.607 \\ + 1 & x & m & 0.132 & 0.205 \\ + 2 & y & n & 0.969 & 0.726 \\ + \bottomrule + \end{tabular} <section classes="tex2jax_ignore mathjax_ignore" ids="equations-with-ipython-or-sympy" names="equations\ (with\ ipython\ or\ sympy)"> <title> Equations (with ipython or sympy) - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="22" cell_metadata="{'ipub': {'equation': {'label': 'eqn:example_ipy'}}}" classes="cell" exec_count="6" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> Latex('$$ a = b+c $$') - <CellOutputNode classes="cell_output"> - <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> - a = b+c + <container classes="cell_output" nb_element="cell_code_output"> + <container mime_type="text/latex"> + <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> + a = b+c <paragraph> The plotting code for a sympy equation (=@eqn:example_sympy). - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="24" cell_metadata="{'ipub': {'code': {'asfloat': True, 'caption': '', 'label': 'code:example_sym', 'placement': 'H', 'widefigure': False}, 'equation': {'environment': 'equation', 'label': 'eqn:example_sympy'}}}" classes="cell" exec_count="7" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> y = sym.Function('y') n = sym.symbols(r'\alpha') f = y(n)-2*y(n-1/sym.pi)-5*y(n-2) sym.rsolve(f,y(n),[1,4]) - <CellOutputNode classes="cell_output"> - <image candidates="{'*': '_build/jupyter_execute/complex_outputs_24_0.png'}" uri="_build/jupyter_execute/complex_outputs_24_0.png"> - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container classes="cell_output" nb_element="cell_code_output"> + <container mime_type="image/png"> + <image candidates="{'*': '_build/jupyter_execute/8c43e5c8cccf697754876b7fec1b0a9b731d7900bb585e775a5fa326b4de8c5a.png'}" uri="_build/jupyter_execute/8c43e5c8cccf697754876b7fec1b0a9b731d7900bb585e775a5fa326b4de8c5a.png"> + <container cell_index="25" cell_metadata="{}" classes="cell" exec_count="7" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> from IPython.display import display, Markdown display(Markdown('**_some_ markdown**')) - <CellOutputNode classes="cell_output"> - <paragraph> - <strong> - <emphasis> - some - markdown + <container classes="cell_output" nb_element="cell_code_output"> + <container mime_type="text/markdown"> + <paragraph> + <strong> + <emphasis> + some + markdown diff --git a/tests/test_render_outputs/test_merge_streams.xml b/tests/test_render_outputs/test_merge_streams.xml index 40c8c7dc..75499729 100644 --- a/tests/test_render_outputs/test_merge_streams.xml +++ b/tests/test_render_outputs/test_merge_streams.xml @@ -1,6 +1,6 @@ <document source="merge_streams"> - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="0" cell_metadata="{}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> import sys print('stdout1', file=sys.stdout) @@ -10,7 +10,7 @@ print('stdout3', file=sys.stdout) print('stderr3', file=sys.stderr) 1 - <CellOutputNode classes="cell_output"> + <container classes="cell_output" nb_element="cell_code_output"> <literal_block classes="output stream" language="myst-ansi" linenos="False" xml:space="preserve"> stdout1 stdout2 @@ -19,5 +19,6 @@ stderr1 stderr2 stderr3 - <literal_block classes="output text_plain" language="myst-ansi" linenos="False" xml:space="preserve"> - 1 + <container mime_type="text/plain"> + <literal_block classes="output text_plain" language="myst-ansi" linenos="False" xml:space="preserve"> + 1 diff --git a/tests/test_render_outputs/test_stderr_remove.xml b/tests/test_render_outputs/test_stderr_remove.xml index 427fedd7..60d9b44f 100644 --- a/tests/test_render_outputs/test_stderr_remove.xml +++ b/tests/test_render_outputs/test_stderr_remove.xml @@ -1,13 +1,13 @@ <document source="basic_stderr"> - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="0" cell_metadata="{}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> import sys print('hallo', file=sys.stderr) - <CellOutputNode classes="cell_output"> - <CellNode cell_type="code" classes="cell tag_remove-stderr"> - <CellInputNode classes="cell_input"> + <container classes="cell_output" nb_element="cell_code_output"> + <container cell_index="1" cell_metadata="{'tags': ['remove-stderr']}" classes="cell tag_remove-stderr" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> import sys print('hallo', file=sys.stderr) - <CellOutputNode classes="cell_output"> + <container classes="cell_output" nb_element="cell_code_output"> diff --git a/tests/test_render_outputs/test_stderr_tag.xml b/tests/test_render_outputs/test_stderr_tag.xml index be47c52a..394eb08a 100644 --- a/tests/test_render_outputs/test_stderr_tag.xml +++ b/tests/test_render_outputs/test_stderr_tag.xml @@ -1,15 +1,17 @@ <document source="basic_stderr"> - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="0" cell_metadata="{}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> import sys print('hallo', file=sys.stderr) - <CellOutputNode classes="cell_output"> + <container classes="cell_output" nb_element="cell_code_output"> <literal_block classes="output stderr" language="myst-ansi" linenos="False" xml:space="preserve"> hallo - <CellNode cell_type="code" classes="cell tag_remove-stderr"> - <CellInputNode classes="cell_input"> + <container cell_index="1" cell_metadata="{'tags': ['remove-stderr']}" classes="cell tag_remove-stderr" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> import sys print('hallo', file=sys.stderr) - <CellOutputNode classes="cell_output"> + <container classes="cell_output" nb_element="cell_code_output"> + <literal_block classes="output stderr" language="myst-ansi" linenos="False" xml:space="preserve"> + hallo diff --git a/tests/test_text_based.py b/tests/test_text_based.py index bf7ab2f6..b98ddcd5 100644 --- a/tests/test_text_based.py +++ b/tests/test_text_based.py @@ -3,7 +3,7 @@ @pytest.mark.sphinx_params( "basic_unrun.md", - conf={"jupyter_execute_notebooks": "cache", "source_suffix": {".md": "myst-nb"}}, + conf={"nb_execution_mode": "cache", "source_suffix": {".md": "myst-nb"}}, ) def test_basic_run(sphinx_run, file_regression, check_nbs): sphinx_run.build() @@ -32,7 +32,7 @@ def test_basic_run(sphinx_run, file_regression, check_nbs): @pytest.mark.sphinx_params( "basic_unrun.md", - conf={"jupyter_execute_notebooks": "off", "source_suffix": {".md": "myst-nb"}}, + conf={"nb_execution_mode": "off", "source_suffix": {".md": "myst-nb"}}, ) def test_basic_run_exec_off(sphinx_run, file_regression, check_nbs): sphinx_run.build() @@ -50,7 +50,7 @@ def test_basic_run_exec_off(sphinx_run, file_regression, check_nbs): @pytest.mark.sphinx_params( "basic_nometadata.md", - conf={"jupyter_execute_notebooks": "off", "source_suffix": {".md": "myst-nb"}}, + conf={"nb_execution_mode": "off", "source_suffix": {".md": "myst-nb"}}, ) def test_basic_nometadata(sphinx_run, file_regression, check_nbs): """A myst-markdown notebook with no jupytext metadata should raise a warning.""" From ab030412ee3ee09e889d59483acb75fb3c3704e5 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Wed, 5 Jan 2022 07:06:28 +0100 Subject: [PATCH 25/75] pre-commit fixes --- myst_nb/nb_glue/domain.py | 6 +-- myst_nb/sphinx_.py | 2 +- tests/test_execute.py | 44 +++++-------------- .../test_complex_outputs_unrun_auto.ipynb | 2 +- .../test_complex_outputs_unrun_cache.ipynb | 2 +- .../test_custom_convert_auto.ipynb | 2 +- .../test_custom_convert_cache.ipynb | 2 +- tests/test_glue.py | 1 + tests/test_nb_render.py | 2 + tests/test_parser.py | 4 +- tests/test_render_outputs.py | 10 ++--- 11 files changed, 27 insertions(+), 50 deletions(-) diff --git a/myst_nb/nb_glue/domain.py b/myst_nb/nb_glue/domain.py index cd2e5a57..2ab7a976 100644 --- a/myst_nb/nb_glue/domain.py +++ b/myst_nb/nb_glue/domain.py @@ -7,15 +7,15 @@ from docutils.parsers.rst import directives from sphinx.domains import Domain from sphinx.domains.math import MathDomain + +# from myst_nb.nodes import CellOutputBundleNode, CellOutputNode +from sphinx.ext.autodoc.directive import DummyOptionSpec from sphinx.util import logging from sphinx.util.docutils import SphinxDirective, SphinxRole from myst_nb.nb_glue import GLUE_PREFIX from myst_nb.nb_glue.utils import find_all_keys -# from myst_nb.nodes import CellOutputBundleNode, CellOutputNode -from sphinx.ext.autodoc.directive import DummyOptionSpec - SPHINX_LOGGER = logging.getLogger(__name__) diff --git a/myst_nb/sphinx_.py b/myst_nb/sphinx_.py index f5fa1736..f3c1283f 100644 --- a/myst_nb/sphinx_.py +++ b/myst_nb/sphinx_.py @@ -19,7 +19,7 @@ from sphinx.environment import BuildEnvironment from sphinx.transforms.post_transforms import SphinxPostTransform from sphinx.util import logging as sphinx_logging -from sphinx.util.docutils import ReferenceRole, SphinxDirective +from sphinx.util.docutils import ReferenceRole from myst_nb import __version__ from myst_nb.configuration import NbParserConfig diff --git a/tests/test_execute.py b/tests/test_execute.py index c0c50bd2..66650cda 100644 --- a/tests/test_execute.py +++ b/tests/test_execute.py @@ -11,9 +11,7 @@ def regress_nb_doc(file_regression, sphinx_run, check_nbs): file_regression.check(doctree.pformat(), extension=".xml", encoding="utf8") -@pytest.mark.sphinx_params( - "basic_unrun.ipynb", conf={"nb_execution_mode": "auto"} -) +@pytest.mark.sphinx_params("basic_unrun.ipynb", conf={"nb_execution_mode": "auto"}) def test_basic_unrun_auto(sphinx_run, file_regression, check_nbs): sphinx_run.build() # print(sphinx_run.status()) @@ -30,9 +28,7 @@ def test_basic_unrun_auto(sphinx_run, file_regression, check_nbs): assert sphinx_run.env.nb_execution_data["basic_unrun"]["succeeded"] is True -@pytest.mark.sphinx_params( - "basic_unrun.ipynb", conf={"nb_execution_mode": "cache"} -) +@pytest.mark.sphinx_params("basic_unrun.ipynb", conf={"nb_execution_mode": "cache"}) def test_basic_unrun_cache(sphinx_run, file_regression, check_nbs): """The outputs should be populated.""" sphinx_run.build() @@ -49,9 +45,7 @@ def test_basic_unrun_cache(sphinx_run, file_regression, check_nbs): assert sphinx_run.env.nb_execution_data["basic_unrun"]["succeeded"] is True -@pytest.mark.sphinx_params( - "basic_unrun.ipynb", conf={"nb_execution_mode": "cache"} -) +@pytest.mark.sphinx_params("basic_unrun.ipynb", conf={"nb_execution_mode": "cache"}) def test_rebuild_cache(sphinx_run): """The notebook should only be executed once.""" sphinx_run.build() @@ -61,9 +55,7 @@ def test_rebuild_cache(sphinx_run): assert "Executing" not in sphinx_run.status(), sphinx_run.status() -@pytest.mark.sphinx_params( - "basic_unrun.ipynb", conf={"nb_execution_mode": "force"} -) +@pytest.mark.sphinx_params("basic_unrun.ipynb", conf={"nb_execution_mode": "force"}) def test_rebuild_force(sphinx_run): """The notebook should be executed twice.""" sphinx_run.build() @@ -90,9 +82,7 @@ def test_exclude_path(sphinx_run, file_regression): ) -@pytest.mark.sphinx_params( - "basic_failing.ipynb", conf={"nb_execution_mode": "cache"} -) +@pytest.mark.sphinx_params("basic_failing.ipynb", conf={"nb_execution_mode": "cache"}) def test_basic_failing_cache(sphinx_run, file_regression, check_nbs): sphinx_run.build() assert "Execution Failed" in sphinx_run.warnings() @@ -110,9 +100,7 @@ def test_basic_failing_cache(sphinx_run, file_regression, check_nbs): assert "error_log" in sphinx_run.env.nb_execution_data["basic_failing"] -@pytest.mark.sphinx_params( - "basic_failing.ipynb", conf={"nb_execution_mode": "auto"} -) +@pytest.mark.sphinx_params("basic_failing.ipynb", conf={"nb_execution_mode": "auto"}) def test_basic_failing_auto(sphinx_run, file_regression, check_nbs): sphinx_run.build() # print(sphinx_run.status()) @@ -149,9 +137,7 @@ def test_allow_errors_auto(sphinx_run, file_regression, check_nbs): regress_nb_doc(file_regression, sphinx_run, check_nbs) -@pytest.mark.sphinx_params( - "basic_unrun.ipynb", conf={"nb_execution_mode": "force"} -) +@pytest.mark.sphinx_params("basic_unrun.ipynb", conf={"nb_execution_mode": "force"}) def test_outputs_present(sphinx_run, file_regression, check_nbs): sphinx_run.build() # print(sphinx_run.status()) @@ -190,9 +176,7 @@ def test_complex_outputs_unrun_auto(sphinx_run, file_regression, check_nbs): assert '<script type="application/vnd.jupyter.widget-state+json">' in html -@pytest.mark.sphinx_params( - "basic_unrun.ipynb", conf={"nb_execution_mode": "off"} -) +@pytest.mark.sphinx_params("basic_unrun.ipynb", conf={"nb_execution_mode": "off"}) def test_no_execute(sphinx_run, file_regression, check_nbs): sphinx_run.build() # print(sphinx_run.status()) @@ -200,9 +184,7 @@ def test_no_execute(sphinx_run, file_regression, check_nbs): regress_nb_doc(file_regression, sphinx_run, check_nbs) -@pytest.mark.sphinx_params( - "basic_unrun.ipynb", conf={"nb_execution_mode": "cache"} -) +@pytest.mark.sphinx_params("basic_unrun.ipynb", conf={"nb_execution_mode": "cache"}) def test_jupyter_cache_path(sphinx_run, file_regression, check_nbs): sphinx_run.build() assert "Execution Succeeded" in sphinx_run.status() @@ -211,17 +193,13 @@ def test_jupyter_cache_path(sphinx_run, file_regression, check_nbs): # Testing relative paths within the notebook -@pytest.mark.sphinx_params( - "basic_relative.ipynb", conf={"nb_execution_mode": "cache"} -) +@pytest.mark.sphinx_params("basic_relative.ipynb", conf={"nb_execution_mode": "cache"}) def test_relative_path_cache(sphinx_run, file_regression, check_nbs): sphinx_run.build() assert "Execution Failed" not in sphinx_run.status(), sphinx_run.status() -@pytest.mark.sphinx_params( - "basic_relative.ipynb", conf={"nb_execution_mode": "force"} -) +@pytest.mark.sphinx_params("basic_relative.ipynb", conf={"nb_execution_mode": "force"}) def test_relative_path_force(sphinx_run, file_regression, check_nbs): sphinx_run.build() assert "Execution Failed" not in sphinx_run.status(), sphinx_run.status() diff --git a/tests/test_execute/test_complex_outputs_unrun_auto.ipynb b/tests/test_execute/test_complex_outputs_unrun_auto.ipynb index 0da9ffde..7063843b 100644 --- a/tests/test_execute/test_complex_outputs_unrun_auto.ipynb +++ b/tests/test_execute/test_complex_outputs_unrun_auto.ipynb @@ -692,4 +692,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/tests/test_execute/test_complex_outputs_unrun_cache.ipynb b/tests/test_execute/test_complex_outputs_unrun_cache.ipynb index 0da9ffde..7063843b 100644 --- a/tests/test_execute/test_complex_outputs_unrun_cache.ipynb +++ b/tests/test_execute/test_complex_outputs_unrun_cache.ipynb @@ -692,4 +692,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/tests/test_execute/test_custom_convert_auto.ipynb b/tests/test_execute/test_custom_convert_auto.ipynb index 10731afb..f95ca31a 100644 --- a/tests/test_execute/test_custom_convert_auto.ipynb +++ b/tests/test_execute/test_custom_convert_auto.ipynb @@ -101,4 +101,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} \ No newline at end of file +} diff --git a/tests/test_execute/test_custom_convert_cache.ipynb b/tests/test_execute/test_custom_convert_cache.ipynb index c5e881cd..8ff9bd49 100644 --- a/tests/test_execute/test_custom_convert_cache.ipynb +++ b/tests/test_execute/test_custom_convert_cache.ipynb @@ -101,4 +101,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} \ No newline at end of file +} diff --git a/tests/test_glue.py b/tests/test_glue.py index 6191fe36..f8367f2a 100644 --- a/tests/test_glue.py +++ b/tests/test_glue.py @@ -28,6 +28,7 @@ def mock_ipython(): def test_check_priority(): """Assert that the default transform priority is less than CellOutputsToNodes""" from myst_nb.render_outputs import CellOutputsToNodes + assert PasteNodesToDocutils.default_priority < CellOutputsToNodes.default_priority diff --git a/tests/test_nb_render.py b/tests/test_nb_render.py index 838b8593..3ae5b7a3 100644 --- a/tests/test_nb_render.py +++ b/tests/test_nb_render.py @@ -16,6 +16,7 @@ ) def test_render(line, title, input, expected): from myst_nb.parser import nb_to_tokens, tokens_to_docutils + dct = yaml.safe_load(input) dct.setdefault("metadata", {}) ntbk = nbformat.from_dict(dct) @@ -35,6 +36,7 @@ def test_render(line, title, input, expected): ) def test_reporting(line, title, input, expected): from myst_nb.parser import nb_to_tokens, tokens_to_docutils + dct = yaml.safe_load(input) dct.setdefault("metadata", {}) ntbk = nbformat.from_dict(dct) diff --git a/tests/test_parser.py b/tests/test_parser.py index e42526f5..d5525f89 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -27,9 +27,7 @@ def test_basic_run(sphinx_run, file_regression): assert filenames == {"basic_run.py", "basic_run.ipynb"} -@pytest.mark.sphinx_params( - "complex_outputs.ipynb", conf={"nb_execution_mode": "off"} -) +@pytest.mark.sphinx_params("complex_outputs.ipynb", conf={"nb_execution_mode": "off"}) def test_complex_outputs(sphinx_run, file_regression): sphinx_run.build() assert sphinx_run.warnings() == "" diff --git a/tests/test_render_outputs.py b/tests/test_render_outputs.py index 1019dafd..fed99aab 100644 --- a/tests/test_render_outputs.py +++ b/tests/test_render_outputs.py @@ -6,6 +6,7 @@ def test_load_renderer_not_found(): from myst_nb.render_outputs import MystNbEntryPointError, load_renderer + with pytest.raises(MystNbEntryPointError, match="No Entry Point found"): load_renderer("other") @@ -13,6 +14,7 @@ def test_load_renderer_not_found(): @patch.object(EntryPoint, "load", lambda self: EntryPoint) def test_load_renderer_not_subclass(): from myst_nb.render_outputs import MystNbEntryPointError, load_renderer + with pytest.raises(MystNbEntryPointError, match="Entry Point .* not a subclass"): load_renderer("default") @@ -25,9 +27,7 @@ def test_basic_run(sphinx_run, file_regression): file_regression.check(doctree.pformat(), extension=".xml", encoding="utf8") -@pytest.mark.sphinx_params( - "complex_outputs.ipynb", conf={"nb_execution_mode": "off"} -) +@pytest.mark.sphinx_params("complex_outputs.ipynb", conf={"nb_execution_mode": "off"}) def test_complex_outputs(sphinx_run, clean_doctree, file_regression): sphinx_run.build() assert sphinx_run.warnings() == "" @@ -51,9 +51,7 @@ def test_complex_outputs_latex(sphinx_run, clean_doctree, file_regression): ) -@pytest.mark.sphinx_params( - "basic_stderr.ipynb", conf={"nb_execution_mode": "off"} -) +@pytest.mark.sphinx_params("basic_stderr.ipynb", conf={"nb_execution_mode": "off"}) def test_stderr_tag(sphinx_run, file_regression): sphinx_run.build() assert sphinx_run.warnings() == "" From 3e54270c2252ebfd119ea7b80460765fe5693d0d Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Wed, 5 Jan 2022 15:23:06 +0100 Subject: [PATCH 26/75] re-implement `execution_excludepatterns` --- myst_nb/configuration.py | 2 +- myst_nb/execute.py | 22 ++++++++++++++++------ 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/myst_nb/configuration.py b/myst_nb/configuration.py index 5dea66d6..94b62054 100644 --- a/myst_nb/configuration.py +++ b/myst_nb/configuration.py @@ -218,7 +218,7 @@ class NbParserConfig: default=(), validator=deep_iterable(instance_of(str)), metadata={ - "help": "Exclude patterns for notebooks", + "help": "Exclude (POSIX) glob patterns for notebooks", "legacy_name": "execution_excludepatterns", "docutils_exclude": True, }, diff --git a/myst_nb/execute.py b/myst_nb/execute.py index 2d3f645d..654767bd 100644 --- a/myst_nb/execute.py +++ b/myst_nb/execute.py @@ -3,7 +3,7 @@ from datetime import datetime from logging import Logger import os -from pathlib import Path +from pathlib import Path, PurePosixPath from tempfile import TemporaryDirectory from typing import Optional, Tuple @@ -42,7 +42,7 @@ def update_notebook( ) -> Tuple[NotebookNode, Optional[ExecutionResult]]: """Update a notebook using the given configuration. - This function may execute the notebook if necessary. + This function may execute the notebook if necessary, to update its outputs. :param notebook: The notebook to update. :param source: Path to or description of the input source being processed. @@ -51,21 +51,31 @@ def update_notebook( :returns: The updated notebook, and the (optional) execution metadata. """ + # TODO should any of the logging messages be debug instead of info? + # path should only be None when using docutils programmatically, # e.g. source="<string>" path = Path(source) if Path(source).is_file() else None exec_metadata: Optional[ExecutionResult] = None - # TODO deal with nb_config.execution_excludepatterns + # check if the notebook is excluded from execution by pattern + if path is not None and nb_config.execution_excludepatterns: + posix_path = PurePosixPath(path.as_posix()) + for pattern in nb_config.execution_excludepatterns: + if posix_path.match(pattern): + logger.info(f"Excluded from execution by pattern: {pattern!r}") + return notebook, exec_metadata + # 'auto' mode only executes the notebook if it is missing at least one output missing_outputs = ( len(cell.outputs) == 0 for cell in notebook.cells if cell["cell_type"] == "code" ) + if nb_config.execution_mode == "auto" and not any(missing_outputs): + logger.info("Skipped execution in 'auto' mode (all outputs present)") + return notebook, exec_metadata - if nb_config.execution_mode == "force" or ( - nb_config.execution_mode == "auto" and any(missing_outputs) - ): + if nb_config.execution_mode in ("auto", "force"): # setup the execution current working directory if nb_config.execution_in_temp: From 672fea056e307beed1c8bb0796ee542395043070 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Wed, 5 Jan 2022 19:58:41 +0100 Subject: [PATCH 27/75] skip glue outputs for now --- myst_nb/sphinx_.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/myst_nb/sphinx_.py b/myst_nb/sphinx_.py index f3c1283f..c45aaa92 100644 --- a/myst_nb/sphinx_.py +++ b/myst_nb/sphinx_.py @@ -74,7 +74,8 @@ def sphinx_setup(app: Sphinx): app.connect("config-inited", add_exclude_patterns) # TODO add an event which, if any files have been removed, - # all stage records with a non-existent path are removed + # all jupyter-cache stage records with a non-existent path are removed + # (just to keep it "tidy", but won't affect run) # add directive to ensure all notebook cells are converted app.add_directive("code-cell", UnexpectedCellDirective) @@ -474,15 +475,21 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: # cache all rendered outputs, then choose one from the priority list # in a post-transform, once we know which builder is required. mime_bundle = nodes.container(nb_element="mime_bundle") - with self.current_node_context(mime_bundle, append=True): + with self.current_node_context(mime_bundle): for mime_type, data in output["data"].items(): + if mime_type.startswith("application/papermill.record/"): + # TODO this is the glue prefix, just ignore this for now + continue container = nodes.container(mime_type=mime_type) with self.current_node_context(container, append=True): _nodes = self.nb_renderer.render_mime_type( mime_type, data, cell_index, line ) self.current_node.extend(_nodes) - self.add_line_and_source_path_r([mime_bundle], token) + if mime_bundle.children: + # only add if we have something to render + self.add_line_and_source_path_r([mime_bundle], token) + self.current_node.append(mime_bundle) else: self.create_warning( f"Unsupported output type: {output.output_type}", From 4dfb54b153cd444fbc3b32b61486a2fc2fe2a9c7 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Thu, 6 Jan 2022 02:38:00 +0100 Subject: [PATCH 28/75] Fix execution tests --- docs/use/formatting_outputs.md | 2 +- myst_nb/execute.py | 21 ++- myst_nb/execution_tables.py | 36 +++-- myst_nb/sphinx_.py | 137 ++++++++++++----- tests/conftest.py | 5 +- tests/notebooks/nb_exec_table.md | 4 + tests/test_execute.py | 140 ++++++++++-------- .../test_execute/test_basic_failing_auto.xml | 14 +- .../test_basic_failing_cache.ipynb | 18 ++- .../test_execute/test_basic_failing_cache.xml | 12 +- tests/test_execute/test_exclude_path.xml | 4 +- .../test_execute/test_jupyter_cache_path.xml | 9 +- tests/test_execute/test_nb_exec_table.xml | 7 + tests/test_sphinx_builds.py | 4 +- tests/test_text_based.py | 31 ++-- tests/test_text_based/test_basic_run.xml | 9 +- .../test_basic_run_exec_off.xml | 6 +- 17 files changed, 297 insertions(+), 162 deletions(-) diff --git a/docs/use/formatting_outputs.md b/docs/use/formatting_outputs.md index 8ee938a6..f921bf47 100644 --- a/docs/use/formatting_outputs.md +++ b/docs/use/formatting_outputs.md @@ -226,7 +226,7 @@ This is currently not supported, but we hope to introduce it at a later date (use/format/cutomise)= ## Customise the render process -The render process is goverened by subclasses of {py:class}`myst_nb.render_outputs.CellOutputRendererBase`, which dictate how to create the `docutils` AST nodes for a particular MIME type. the default implementation is {py:class}`~myst_nb.render_outputs.CellOutputRenderer`. +The render process is governed by subclasses of {py:class}`myst_nb.render_outputs.CellOutputRendererBase`, which dictate how to create the `docutils` AST nodes for a particular MIME type. the default implementation is {py:class}`~myst_nb.render_outputs.CellOutputRenderer`. Implementations are loaded *via* Python [entry points](https://packaging.python.org/guides/distributing-packages-using-setuptools/#entry-points), in the `myst_nb.mime_render` group. So it is possible to inject your own subclass to handle rendering. diff --git a/myst_nb/execute.py b/myst_nb/execute.py index 654767bd..d1ee9152 100644 --- a/myst_nb/execute.py +++ b/myst_nb/execute.py @@ -90,20 +90,25 @@ def update_notebook( # execute in the context of the current working directory with cwd_context as cwd: cwd = os.path.abspath(cwd) - logger.info(f"Executing notebook: CWD={cwd!r}") + logger.info( + "Executing notebook using" + + ("tempdir" if nb_config.execution_in_temp else "local") + + " CWD" + ) result = single_nb_execution( notebook, cwd=cwd, allow_errors=nb_config.execution_allow_errors, timeout=nb_config.execution_timeout, ) - logger.info(f"Executed notebook in {result.time:.2f} seconds") - if result.err: + if result.err is not None: msg = f"Executing notebook failed: {result.err.__class__.__name__}" if nb_config.execution_show_tb: msg += f"\n{result.exc_string}" logger.warning(msg, subtype="exec") + else: + logger.info(f"Executed notebook in {result.time:.2f} seconds") exec_metadata = { "mtime": datetime.now().timestamp(), @@ -155,24 +160,28 @@ def update_notebook( ) with cwd_context as cwd: cwd = os.path.abspath(cwd) - logger.info(f"Executing notebook: CWD={cwd!r}") + logger.info( + "Executing notebook using" + + ("tempdir" if nb_config.execution_in_temp else "local") + + " CWD" + ) result = single_nb_execution( notebook, cwd=cwd, allow_errors=nb_config.execution_allow_errors, timeout=nb_config.execution_timeout, ) - logger.info(f"Executed notebook in {result.time:.2f} seconds") # handle success / failure cases # TODO do in try/except to be careful (in case of database write errors? - if result.err: + if result.err is not None: msg = f"Executing notebook failed: {result.err.__class__.__name__}" if nb_config.execution_show_tb: msg += f"\n{result.exc_string}" logger.warning(msg, subtype="exec") NbStageRecord.set_traceback(stage_record.uri, result.exc_string, cache.db) else: + logger.info(f"Executed notebook in {result.time:.2f} seconds") cache_record = cache.cache_notebook_bundle( NbBundleIn( notebook, stage_record.uri, data={"execution_seconds": result.time} diff --git a/myst_nb/execution_tables.py b/myst_nb/execution_tables.py index f9e995bd..07ef46d3 100644 --- a/myst_nb/execution_tables.py +++ b/myst_nb/execution_tables.py @@ -6,7 +6,7 @@ """ from datetime import datetime import posixpath -from typing import Any, Callable, Dict +from typing import Any, Callable, DefaultDict, Dict from docutils import nodes from sphinx.addnodes import pending_xref @@ -15,14 +15,17 @@ from sphinx.util import logging from sphinx.util.docutils import SphinxDirective +from myst_nb.sphinx_ import NbMetadataCollector + SPHINX_LOGGER = logging.getLogger(__name__) +METADATA_KEY = "has_exec_table" + def setup_exec_table_extension(app: Sphinx) -> None: """Add the Sphinx extension to the Sphinx application.""" app.add_node(ExecutionStatsNode) app.add_directive("nb-exec-table", ExecutionStatsTable) - app.connect("env-before-read-docs", check_if_executing) app.connect("env-updated", update_exec_tables) app.add_post_transform(ExecutionStatsPostTransform) @@ -39,30 +42,21 @@ class ExecutionStatsTable(SphinxDirective): def run(self): """Add a placeholder node to the document, and mark it as having a table.""" - self.env.metadata[self.env.docname]["__mystnb__has_exec_table"] = True + NbMetadataCollector.set_doc_data(self.env, self.env.docname, METADATA_KEY, True) return [ExecutionStatsNode()] -def check_if_executing(app: Sphinx, env, docnames) -> None: - """Check if a document might be executed.""" - # TODO this is a sub-optimal solution, since it only stops exec tables from being - # updated if any document is reparsed. - # Ideally we would only update the tables if a document is re-executed, but - # but we need to store this on the env, whilst accounting for parallel env merges. - env.mystnb_update_exec_tables = True if docnames else False - - def update_exec_tables(app: Sphinx, env): """If a document has been re-executed, return all documents containing tables. These documents will be updated with the new statistics. """ - if not env.mystnb_update_exec_tables: + if not NbMetadataCollector.new_exec_data(env): return None to_update = [ docname - for docname in env.metadata - if "__mystnb__has_exec_table" in env.metadata[docname] + for docname, data in NbMetadataCollector.get_doc_data(env).items() + if data.get(METADATA_KEY) ] if to_update: SPHINX_LOGGER.info( @@ -79,7 +73,11 @@ class ExecutionStatsPostTransform(SphinxPostTransform): def run(self, **kwargs) -> None: """Replace the placeholder node with the final table nodes.""" for node in self.document.traverse(ExecutionStatsNode): - node.replace_self(make_stat_table(self.env.docname, self.env.metadata)) + node.replace_self( + make_stat_table( + self.env.docname, NbMetadataCollector.get_doc_data(self.env) + ) + ) _key2header: Dict[str, str] = { @@ -100,7 +98,7 @@ def run(self, **kwargs) -> None: def make_stat_table( - parent_docname: str, metadata: Dict[str, Dict[str, Any]] + parent_docname: str, metadata: DefaultDict[str, dict] ) -> nodes.table: """Create a table of statistics on executed notebooks.""" @@ -132,9 +130,9 @@ def make_stat_table( tgroup += tbody for docname in sorted(metadata): - if "__mystnb__exec_data" not in metadata[docname]: + data = metadata[docname].get("exec_data") + if not data: continue - data = metadata[docname]["__mystnb__exec_data"] row = nodes.row() tbody += row diff --git a/myst_nb/sphinx_.py b/myst_nb/sphinx_.py index c45aaa92..e291e1e9 100644 --- a/myst_nb/sphinx_.py +++ b/myst_nb/sphinx_.py @@ -1,8 +1,9 @@ """An extension for sphinx""" +from collections import defaultdict import json import os from pathlib import Path -from typing import Any, Dict, List, Optional, Sequence +from typing import Any, DefaultDict, Dict, List, Optional, Sequence, Set, cast from docutils import nodes from markdown_it.token import Token @@ -17,14 +18,14 @@ from sphinx.addnodes import download_reference from sphinx.application import Sphinx from sphinx.environment import BuildEnvironment +from sphinx.environment.collectors import EnvironmentCollector from sphinx.transforms.post_transforms import SphinxPostTransform from sphinx.util import logging as sphinx_logging from sphinx.util.docutils import ReferenceRole from myst_nb import __version__ from myst_nb.configuration import NbParserConfig -from myst_nb.execute import update_notebook -from myst_nb.execution_tables import setup_exec_table_extension +from myst_nb.execute import ExecutionResult, update_notebook from myst_nb.loggers import DEFAULT_LOG_TYPE, SphinxDocLogger from myst_nb.nb_glue.domain import NbGlueDomain from myst_nb.parse import notebook_to_tokens @@ -102,6 +103,8 @@ def sphinx_setup(app: Sphinx): # and so do not need to be added here. # setup extension for execution statistics tables + from myst_nb.execution_tables import setup_exec_table_extension # circular import + setup_exec_table_extension(app) # add glue domain @@ -180,7 +183,9 @@ def install_ipywidgets(app: Sphinx, pagename: str, *args: Any, **kwargs: Any) -> """Install ipywidgets Javascript, if required on the page.""" if app.builder.format != "html": return - ipywidgets_state = get_doc_metadata(app.env, pagename, "ipywidgets_state") + ipywidgets_state = NbMetadataCollector.get_doc_data(app.env)[pagename].get( + "ipywidgets_state", None + ) if ipywidgets_state is not None: # see: https://ipywidgets.readthedocs.io/en/7.6.5/embedding.html @@ -210,27 +215,6 @@ def update_togglebutton_classes(app: Sphinx, config): config.togglebutton_selector += f", {selector}" -def store_doc_metadata(env: BuildEnvironment, docname: str, key: str, value: Any): - """Store myst-nb metadata for a document.""" - # Data in env.metadata is correctly handled, by sphinx.MetadataCollector, - # for clearing removed documents and for merging on parallel builds - - # however, one drawback is that it also extracts all docinfo to here, - # so we prepend the key name to hopefully avoid it being overwritten - - # TODO is it worth implementing a custom MetadataCollector? - if docname not in env.metadata: - env.metadata[docname] = {} - env.metadata[docname][f"__mystnb__{key}"] = value - - -def get_doc_metadata( - env: BuildEnvironment, docname: str, key: str, default=None -) -> Any: - """Get myst-nb metadata for a document.""" - return env.metadata.get(docname, {}).get(f"__mystnb__{key}", default) - - class MystNbParser(MystParser): """Sphinx parser for Jupyter Notebook formats, containing MyST Markdown.""" @@ -272,9 +256,18 @@ def parse(self, inputstring: str, document: nodes.document) -> None: notebook, document_path, nb_config, logger ) if exec_data: - store_doc_metadata(self.env, self.env.docname, "exec_data", exec_data) - - # TODO store error traceback in outdir and log its path + NbMetadataCollector.set_exec_data(self.env, self.env.docname, exec_data) + if exec_data["traceback"]: + # store error traceback in outdir and log its path + reports_file = Path(self.env.app.outdir).joinpath( + "reports", *(self.env.docname + ".err.log").split("/") + ) + reports_file.parent.mkdir(parents=True, exist_ok=True) + reports_file.write_text(exec_data["traceback"], encoding="utf8") + logger.warning( + f"Notebook exception traceback saved in: {reports_file}", + subtype="exec", + ) # Setup the parser mdit_parser = create_md_parser(nb_reader.md_config, SphinxNbRenderer) @@ -327,11 +320,13 @@ def get_nb_config(self, key: str, cell_index: Optional[int]) -> Any: def render_nb_metadata(self, token: SyntaxTreeNode) -> None: """Render the notebook metadata.""" metadata = dict(token.meta) + env = cast(BuildEnvironment, self.sphinx_env) # save these special keys on the metadata, rather than as docinfo - env = self.sphinx_env - env.metadata[env.docname]["kernelspec"] = metadata.pop("kernelspec", None) - env.metadata[env.docname]["language_info"] = metadata.pop("language_info", None) + for key in ("kernelspec", "language_info"): + NbMetadataCollector.set_doc_data( + env, env.docname, key, metadata.pop(key, None) + ) # TODO should we provide hook for NbElementRenderer? @@ -343,8 +338,8 @@ def render_nb_metadata(self, token: SyntaxTreeNode) -> None: ipywidgets_mime = (ipywidgets or {}).get(WIDGET_STATE_MIMETYPE, {}) if ipywidgets_mime.get("state", None): string = sanitize_script_content(json.dumps(ipywidgets_mime)) - store_doc_metadata( - self.sphinx_env, self.sphinx_env.docname, "ipywidgets_state", string + NbMetadataCollector.set_doc_data( + env, env.docname, "ipywidgets_state", string ) # forward the rest to the front_matter renderer @@ -578,3 +573,79 @@ def run(self): self.rawtext, title, classes=["xref", "download", "myst-nb"] ) return [node], [] + + +class NbMetadataCollector(EnvironmentCollector): + """Collect myst-nb specific metdata, and handle merging of parallel builds.""" + + @staticmethod + def set_doc_data(env: BuildEnvironment, docname: str, key: str, value: Any) -> None: + """Add nb metadata for a docname to the environment.""" + if not hasattr(env, "nb_metadata"): + env.nb_metadata = defaultdict(dict) + env.nb_metadata.setdefault(docname, {})[key] = value + + @staticmethod + def get_doc_data(env: BuildEnvironment) -> DefaultDict[str, dict]: + """Get myst-nb docname -> metadata dict.""" + if not hasattr(env, "nb_metadata"): + env.nb_metadata = defaultdict(dict) + return env.nb_metadata + + @classmethod + def set_exec_data( + cls, env: BuildEnvironment, docname: str, value: ExecutionResult + ) -> None: + """Add nb metadata for a docname to the environment.""" + cls.set_doc_data(env, docname, "exec_data", value) + # TODO this does not take account of cache data + cls.note_exec_update(env) + + @classmethod + def get_exec_data( + cls, env: BuildEnvironment, docname: str + ) -> Optional[ExecutionResult]: + """Get myst-nb docname -> execution data.""" + return cls.get_doc_data(env)[docname].get("exec_data") + + def get_outdated_docs( + self, + app: "Sphinx", + env: BuildEnvironment, + added: Set[str], + changed: Set[str], + removed: Set[str], + ) -> List[str]: + # called before any docs are read + env.nb_new_exec_data = False + return [] + + @staticmethod + def note_exec_update(env: BuildEnvironment) -> None: + """Note that a notebook has been executed.""" + env.nb_new_exec_data = True + + @staticmethod + def new_exec_data(env: BuildEnvironment) -> bool: + """Return whether any notebooks have updated execution data.""" + return getattr(env, "nb_new_exec_data", False) + + def clear_doc(self, app: Sphinx, env: BuildEnvironment, docname: str) -> None: + if not hasattr(env, "nb_metadata"): + env.nb_metadata = defaultdict(dict) + env.nb_metadata.pop(docname, None) + + def merge_other( + self, + app: Sphinx, + env: BuildEnvironment, + docnames: Set[str], + other: BuildEnvironment, + ) -> None: + if not hasattr(env, "nb_metadata"): + env.nb_metadata = defaultdict(dict) + other_metadata = getattr(other, "nb_metadata", defaultdict(dict)) + for docname in docnames: + env.nb_metadata[docname] = other_metadata[docname] + if other.nb_new_exec_data: + env.nb_new_exec_data = True diff --git a/tests/conftest.py b/tests/conftest.py index 0f7c6da0..f5af8a9d 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -3,6 +3,7 @@ from pathlib import Path import uuid +import bs4 from docutils.nodes import image as image_node from nbconvert.filters import strip_ansi from nbdime.diffing.notebooks import ( @@ -107,7 +108,7 @@ def get_html(self, index=0): _path = self.app.outdir / (name + ".html") if not _path.exists(): pytest.fail("html not output") - return read_text(_path) + return bs4.BeautifulSoup(read_text(_path), "html.parser") def get_nb(self, index=0): """Return the output notebook (after any execution).""" @@ -120,7 +121,7 @@ def get_nb(self, index=0): def get_report_file(self, index=0): """Return the report file for a failed execution.""" name = self.files[index][0] - _path = self.app.outdir / "reports" / (name + ".log") + _path = self.app.outdir / "reports" / (name + ".err.log") if not _path.exists(): pytest.fail("report log not output") return read_text(_path) diff --git a/tests/notebooks/nb_exec_table.md b/tests/notebooks/nb_exec_table.md index 0242ad26..4f3c2575 100644 --- a/tests/notebooks/nb_exec_table.md +++ b/tests/notebooks/nb_exec_table.md @@ -14,6 +14,10 @@ author: Chris # Test the `nb-exec-table` directive +```{code-cell} ipython3 +print("hi") +``` + This directive should generate a table of executed notebook statistics. ```{nb-exec-table} diff --git a/tests/test_execute.py b/tests/test_execute.py index 66650cda..7ab56aea 100644 --- a/tests/test_execute.py +++ b/tests/test_execute.py @@ -1,14 +1,17 @@ -import os - +"""Test sphinx builds which execute notebooks.""" import pytest +from myst_nb.sphinx_ import NbMetadataCollector + def regress_nb_doc(file_regression, sphinx_run, check_nbs): - file_regression.check( - sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb", encoding="utf8" - ) - doctree = sphinx_run.get_doctree() - file_regression.check(doctree.pformat(), extension=".xml", encoding="utf8") + try: + file_regression.check( + sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb", encoding="utf8" + ) + finally: + doctree = sphinx_run.get_doctree() + file_regression.check(doctree.pformat(), extension=".xml", encoding="utf8") @pytest.mark.sphinx_params("basic_unrun.ipynb", conf={"nb_execution_mode": "auto"}) @@ -19,13 +22,11 @@ def test_basic_unrun_auto(sphinx_run, file_regression, check_nbs): assert "test_name" in sphinx_run.app.env.metadata["basic_unrun"] regress_nb_doc(file_regression, sphinx_run, check_nbs) - # Test execution statistics, should look like: - # {'basic_unrun': {'mtime': '2020-08-20T03:32:27.061454', 'runtime': 0.964572671, - # 'method': 'auto', 'succeeded': True}} - assert sphinx_run.env.nb_execution_data_changed is True - assert "basic_unrun" in sphinx_run.env.nb_execution_data - assert sphinx_run.env.nb_execution_data["basic_unrun"]["method"] == "auto" - assert sphinx_run.env.nb_execution_data["basic_unrun"]["succeeded"] is True + assert NbMetadataCollector.new_exec_data(sphinx_run.env) + data = NbMetadataCollector.get_exec_data(sphinx_run.env, "basic_unrun") + assert data + assert data["method"] == "auto" + assert data["succeeded"] is True @pytest.mark.sphinx_params("basic_unrun.ipynb", conf={"nb_execution_mode": "cache"}) @@ -36,33 +37,31 @@ def test_basic_unrun_cache(sphinx_run, file_regression, check_nbs): assert "test_name" in sphinx_run.app.env.metadata["basic_unrun"] regress_nb_doc(file_regression, sphinx_run, check_nbs) - # Test execution statistics, should look like: - # {'basic_unrun': {'mtime': '2020-08-20T03:32:27.061454', 'runtime': 0.964572671, - # 'method': 'cache', 'succeeded': True}} - assert sphinx_run.env.nb_execution_data_changed is True - assert "basic_unrun" in sphinx_run.env.nb_execution_data - assert sphinx_run.env.nb_execution_data["basic_unrun"]["method"] == "cache" - assert sphinx_run.env.nb_execution_data["basic_unrun"]["succeeded"] is True + assert NbMetadataCollector.new_exec_data(sphinx_run.env) + data = NbMetadataCollector.get_exec_data(sphinx_run.env, "basic_unrun") + assert data + assert data["method"] == "cache" + assert data["succeeded"] is True @pytest.mark.sphinx_params("basic_unrun.ipynb", conf={"nb_execution_mode": "cache"}) def test_rebuild_cache(sphinx_run): """The notebook should only be executed once.""" sphinx_run.build() - assert "Executing" in sphinx_run.status(), sphinx_run.status() + assert NbMetadataCollector.new_exec_data(sphinx_run.env) sphinx_run.invalidate_files() sphinx_run.build() - assert "Executing" not in sphinx_run.status(), sphinx_run.status() + assert "Using cached" in sphinx_run.status() @pytest.mark.sphinx_params("basic_unrun.ipynb", conf={"nb_execution_mode": "force"}) def test_rebuild_force(sphinx_run): """The notebook should be executed twice.""" sphinx_run.build() - assert "Executing" in sphinx_run.status(), sphinx_run.status() + assert NbMetadataCollector.new_exec_data(sphinx_run.env) sphinx_run.invalidate_files() sphinx_run.build() - assert "Executing" in sphinx_run.status(), sphinx_run.status() + assert NbMetadataCollector.new_exec_data(sphinx_run.env) @pytest.mark.sphinx_params( @@ -75,7 +74,7 @@ def test_rebuild_force(sphinx_run): def test_exclude_path(sphinx_run, file_regression): """The notebook should not be executed.""" sphinx_run.build() - assert len(sphinx_run.app.env.nb_excluded_exec_paths) == 1 + assert not NbMetadataCollector.new_exec_data(sphinx_run.env) assert "Executing" not in sphinx_run.status(), sphinx_run.status() file_regression.check( sphinx_run.get_doctree().pformat(), extension=".xml", encoding="utf8" @@ -85,34 +84,29 @@ def test_exclude_path(sphinx_run, file_regression): @pytest.mark.sphinx_params("basic_failing.ipynb", conf={"nb_execution_mode": "cache"}) def test_basic_failing_cache(sphinx_run, file_regression, check_nbs): sphinx_run.build() - assert "Execution Failed" in sphinx_run.warnings() - expected_path = "" if os.name == "nt" else "source/basic_failing.ipynb" - assert ( - f"Couldn't find cache key for notebook file {expected_path}" - in sphinx_run.warnings() - ) + # print(sphinx_run.warnings()) + assert "Executing notebook failed" in sphinx_run.warnings() regress_nb_doc(file_regression, sphinx_run, check_nbs) - sphinx_run.get_report_file() - assert "basic_failing" in sphinx_run.env.nb_execution_data - assert sphinx_run.env.nb_execution_data["basic_failing"]["method"] == "cache" - assert sphinx_run.env.nb_execution_data["basic_failing"]["succeeded"] is False - assert "error_log" in sphinx_run.env.nb_execution_data["basic_failing"] + data = NbMetadataCollector.get_exec_data(sphinx_run.env, "basic_failing") + assert data + assert data["method"] == "cache" + assert data["succeeded"] is False + sphinx_run.get_report_file() @pytest.mark.sphinx_params("basic_failing.ipynb", conf={"nb_execution_mode": "auto"}) def test_basic_failing_auto(sphinx_run, file_regression, check_nbs): sphinx_run.build() - # print(sphinx_run.status()) - assert "Execution Failed" in sphinx_run.warnings() - assert "Execution Failed with traceback saved in" in sphinx_run.warnings() + assert "Executing notebook failed" in sphinx_run.warnings() regress_nb_doc(file_regression, sphinx_run, check_nbs) - sphinx_run.get_report_file() - assert "basic_failing" in sphinx_run.env.nb_execution_data - assert sphinx_run.env.nb_execution_data["basic_failing"]["method"] == "auto" - assert sphinx_run.env.nb_execution_data["basic_failing"]["succeeded"] is False - assert "error_log" in sphinx_run.env.nb_execution_data["basic_failing"] + data = NbMetadataCollector.get_exec_data(sphinx_run.env, "basic_failing") + assert data + assert data["method"] == "auto" + assert data["succeeded"] is False + assert data["traceback"] + sphinx_run.get_report_file() @pytest.mark.sphinx_params( @@ -156,9 +150,15 @@ def test_complex_outputs_unrun_cache(sphinx_run, file_regression, check_nbs): regress_nb_doc(file_regression, sphinx_run, check_nbs) # Widget view and widget state should make it into the HTML - html = sphinx_run.get_html() - assert '<script type="application/vnd.jupyter.widget-view+json">' in html - assert '<script type="application/vnd.jupyter.widget-state+json">' in html + scripts = sphinx_run.get_html().select("script") + assert any( + "application/vnd.jupyter.widget-view+json" in script.get("type", "") + for script in scripts + ) + assert any( + "application/vnd.jupyter.widget-state+json" in script.get("type", "") + for script in scripts + ) @pytest.mark.sphinx_params( @@ -171,9 +171,15 @@ def test_complex_outputs_unrun_auto(sphinx_run, file_regression, check_nbs): regress_nb_doc(file_regression, sphinx_run, check_nbs) # Widget view and widget state should make it into the HTML - html = sphinx_run.get_html() - assert '<script type="application/vnd.jupyter.widget-view+json">' in html - assert '<script type="application/vnd.jupyter.widget-state+json">' in html + scripts = sphinx_run.get_html().select("script") + assert any( + "application/vnd.jupyter.widget-view+json" in script.get("type", "") + for script in scripts + ) + assert any( + "application/vnd.jupyter.widget-state+json" in script.get("type", "") + for script in scripts + ) @pytest.mark.sphinx_params("basic_unrun.ipynb", conf={"nb_execution_mode": "off"}) @@ -187,7 +193,7 @@ def test_no_execute(sphinx_run, file_regression, check_nbs): @pytest.mark.sphinx_params("basic_unrun.ipynb", conf={"nb_execution_mode": "cache"}) def test_jupyter_cache_path(sphinx_run, file_regression, check_nbs): sphinx_run.build() - assert "Execution Succeeded" in sphinx_run.status() + assert "Cached executed notebook" in sphinx_run.status() assert sphinx_run.warnings() == "" regress_nb_doc(file_regression, sphinx_run, check_nbs) @@ -213,8 +219,8 @@ def test_relative_path_force(sphinx_run, file_regression, check_nbs): def test_execution_timeout(sphinx_run, file_regression, check_nbs): """execution should fail given the low timeout value""" sphinx_run.build() - # print(sphinx_run.status()) - assert "execution failed" in sphinx_run.warnings() + # print(sphinx_run.warnings()) + assert "Executing notebook failed" in sphinx_run.warnings() @pytest.mark.sphinx_params( @@ -224,7 +230,8 @@ def test_execution_timeout(sphinx_run, file_regression, check_nbs): def test_execution_metadata_timeout(sphinx_run, file_regression, check_nbs): """notebook timeout metadata has higher preference then execution_timeout config""" sphinx_run.build() - assert "execution failed" in sphinx_run.warnings() + # print(sphinx_run.warnings()) + assert "Executing notebook failed" in sphinx_run.warnings() @pytest.mark.sphinx_params( @@ -236,11 +243,14 @@ def test_nb_exec_table(sphinx_run, file_regression, check_nbs): including a row for the executed notebook. """ sphinx_run.build() + # print(sphinx_run.status()) assert not sphinx_run.warnings() file_regression.check( sphinx_run.get_doctree().pformat(), extension=".xml", encoding="utf8" ) - assert '<tr class="row-even"><td><p>nb_exec_table</p></td>' in sphinx_run.get_html() + # print(sphinx_run.get_html()) + rows = sphinx_run.get_html().select("table.docutils tr") + assert any("nb_exec_table" in row.text for row in rows) @pytest.mark.sphinx_params( @@ -256,10 +266,11 @@ def test_custom_convert_auto(sphinx_run, file_regression, check_nbs): assert sphinx_run.warnings() == "" regress_nb_doc(file_regression, sphinx_run, check_nbs) - assert sphinx_run.env.nb_execution_data_changed is True - assert "custom-formats" in sphinx_run.env.nb_execution_data - assert sphinx_run.env.nb_execution_data["custom-formats"]["method"] == "auto" - assert sphinx_run.env.nb_execution_data["custom-formats"]["succeeded"] is True + assert NbMetadataCollector.new_exec_data(sphinx_run.env) + data = NbMetadataCollector.get_exec_data(sphinx_run.env, "custom-formats") + assert data + assert data["method"] == "auto" + assert data["succeeded"] is True @pytest.mark.sphinx_params( @@ -275,7 +286,8 @@ def test_custom_convert_cache(sphinx_run, file_regression, check_nbs): assert sphinx_run.warnings() == "" regress_nb_doc(file_regression, sphinx_run, check_nbs) - assert sphinx_run.env.nb_execution_data_changed is True - assert "custom-formats" in sphinx_run.env.nb_execution_data - assert sphinx_run.env.nb_execution_data["custom-formats"]["method"] == "cache" - assert sphinx_run.env.nb_execution_data["custom-formats"]["succeeded"] is True + assert NbMetadataCollector.new_exec_data(sphinx_run.env) + data = NbMetadataCollector.get_exec_data(sphinx_run.env, "custom-formats") + assert data + assert data["method"] == "cache" + assert data["succeeded"] is True diff --git a/tests/test_execute/test_basic_failing_auto.xml b/tests/test_execute/test_basic_failing_auto.xml index 591a7d5b..d1f28253 100644 --- a/tests/test_execute/test_basic_failing_auto.xml +++ b/tests/test_execute/test_basic_failing_auto.xml @@ -4,9 +4,15 @@ a title <paragraph> some text - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="1" cell_metadata="{}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> raise Exception('oopsie!') - <CellOutputNode classes="cell_output"> - <CellOutputBundleNode output_count="1"> + <container classes="cell_output" nb_element="cell_code_output"> + <literal_block classes="output traceback" language="ipythontb" xml:space="preserve"> + --------------------------------------------------------------------------- + Exception Traceback (most recent call last) + <ipython-input-1-714b2b556897> in <module> + ----> 1 raise Exception('oopsie!') + + Exception: oopsie! diff --git a/tests/test_execute/test_basic_failing_cache.ipynb b/tests/test_execute/test_basic_failing_cache.ipynb index 6fbd6f7c..fd6b21f2 100644 --- a/tests/test_execute/test_basic_failing_cache.ipynb +++ b/tests/test_execute/test_basic_failing_cache.ipynb @@ -11,9 +11,21 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": {}, - "outputs": [], + "outputs": [ + { + "ename": "Exception", + "evalue": "oopsie!", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mException\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m<ipython-input-1-714b2b556897>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mException\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'oopsie!'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;31mException\u001b[0m: oopsie!" + ] + } + ], "source": [ "raise Exception('oopsie!')" ] @@ -35,7 +47,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.1" + "version": "3.7.12" }, "test_name": "notebook1" }, diff --git a/tests/test_execute/test_basic_failing_cache.xml b/tests/test_execute/test_basic_failing_cache.xml index e554da53..d1f28253 100644 --- a/tests/test_execute/test_basic_failing_cache.xml +++ b/tests/test_execute/test_basic_failing_cache.xml @@ -4,7 +4,15 @@ a title <paragraph> some text - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="1" cell_metadata="{}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> raise Exception('oopsie!') + <container classes="cell_output" nb_element="cell_code_output"> + <literal_block classes="output traceback" language="ipythontb" xml:space="preserve"> + --------------------------------------------------------------------------- + Exception Traceback (most recent call last) + <ipython-input-1-714b2b556897> in <module> + ----> 1 raise Exception('oopsie!') + + Exception: oopsie! diff --git a/tests/test_execute/test_exclude_path.xml b/tests/test_execute/test_exclude_path.xml index 0fe2eaff..f333eb9c 100644 --- a/tests/test_execute/test_exclude_path.xml +++ b/tests/test_execute/test_exclude_path.xml @@ -4,8 +4,8 @@ a title <paragraph> some text - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="1" cell_metadata="{}" classes="cell" exec_count="True" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> a=1 print(a) diff --git a/tests/test_execute/test_jupyter_cache_path.xml b/tests/test_execute/test_jupyter_cache_path.xml index 4459cd69..65d43c23 100644 --- a/tests/test_execute/test_jupyter_cache_path.xml +++ b/tests/test_execute/test_jupyter_cache_path.xml @@ -4,10 +4,11 @@ a title <paragraph> some text - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="1" cell_metadata="{}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> a=1 print(a) - <CellOutputNode classes="cell_output"> - <CellOutputBundleNode output_count="1"> + <container classes="cell_output" nb_element="cell_code_output"> + <literal_block classes="output stream" language="myst-ansi" xml:space="preserve"> + 1 diff --git a/tests/test_execute/test_nb_exec_table.xml b/tests/test_execute/test_nb_exec_table.xml index 1b6b8bdc..a7cd2e4e 100644 --- a/tests/test_execute/test_nb_exec_table.xml +++ b/tests/test_execute/test_nb_exec_table.xml @@ -5,6 +5,13 @@ <literal> nb-exec-table directive + <container cell_index="1" cell_metadata="{}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> + <literal_block language="ipython3" xml:space="preserve"> + print("hi") + <container classes="cell_output" nb_element="cell_code_output"> + <literal_block classes="output stream" language="myst-ansi" xml:space="preserve"> + hi <paragraph> This directive should generate a table of executed notebook statistics. <ExecutionStatsNode> diff --git a/tests/test_sphinx_builds.py b/tests/test_sphinx_builds.py index da3e63da..43e27505 100644 --- a/tests/test_sphinx_builds.py +++ b/tests/test_sphinx_builds.py @@ -1,5 +1,4 @@ """Test full sphinx builds.""" -import bs4 import pytest @@ -53,7 +52,6 @@ def test_ipywidgets(sphinx_run): # print(sphinx_run.status()) assert sphinx_run.warnings() == "" assert "__mystnb__ipywidgets_state" in sphinx_run.env.metadata["ipywidgets"] - html = bs4.BeautifulSoup(sphinx_run.get_html(), "html.parser") - head_scripts = html.select("head > script") + head_scripts = sphinx_run.get_html().select("head > script") assert any("require.js" in script.get("src", "") for script in head_scripts) assert any("embed-amd.js" in script.get("src", "") for script in head_scripts) diff --git a/tests/test_text_based.py b/tests/test_text_based.py index b98ddcd5..2d598944 100644 --- a/tests/test_text_based.py +++ b/tests/test_text_based.py @@ -9,19 +9,23 @@ def test_basic_run(sphinx_run, file_regression, check_nbs): sphinx_run.build() # print(sphinx_run.status()) assert sphinx_run.warnings() == "" - assert set(sphinx_run.app.env.metadata["basic_unrun"].keys()) == { + assert set(sphinx_run.env.metadata["basic_unrun"].keys()) == { "jupytext", - "kernelspec", "author", "source_map", - "language_info", "wordcount", } - assert sphinx_run.app.env.metadata["basic_unrun"]["author"] == "Chris" - assert ( - sphinx_run.app.env.metadata["basic_unrun"]["kernelspec"] - == '{"display_name": "Python 3", "language": "python", "name": "python3"}' - ) + assert set(sphinx_run.env.nb_metadata["basic_unrun"].keys()) == { + "exec_data", + "kernelspec", + "language_info", + } + assert sphinx_run.env.metadata["basic_unrun"]["author"] == "Chris" + assert sphinx_run.env.nb_metadata["basic_unrun"]["kernelspec"] == { + "display_name": "Python 3", + "language": "python", + "name": "python3", + } file_regression.check( sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb", encoding="utf8" ) @@ -37,8 +41,11 @@ def test_basic_run(sphinx_run, file_regression, check_nbs): def test_basic_run_exec_off(sphinx_run, file_regression, check_nbs): sphinx_run.build() # print(sphinx_run.status()) - assert "language_info" not in set(sphinx_run.app.env.metadata["basic_unrun"].keys()) - assert sphinx_run.app.env.metadata["basic_unrun"]["author"] == "Chris" + assert set(sphinx_run.env.nb_metadata["basic_unrun"].keys()) == { + "kernelspec", + "language_info", + } + assert sphinx_run.env.metadata["basic_unrun"]["author"] == "Chris" file_regression.check( sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb", encoding="utf8" @@ -52,8 +59,8 @@ def test_basic_run_exec_off(sphinx_run, file_regression, check_nbs): "basic_nometadata.md", conf={"nb_execution_mode": "off", "source_suffix": {".md": "myst-nb"}}, ) -def test_basic_nometadata(sphinx_run, file_regression, check_nbs): +def test_basic_nometadata(sphinx_run): """A myst-markdown notebook with no jupytext metadata should raise a warning.""" sphinx_run.build() # print(sphinx_run.status()) - assert "Found an unexpected `code-cell` directive." in sphinx_run.warnings() + assert "Found an unexpected `code-cell`" in sphinx_run.warnings() diff --git a/tests/test_text_based/test_basic_run.xml b/tests/test_text_based/test_basic_run.xml index 2310d7f0..aa1a72d6 100644 --- a/tests/test_text_based/test_basic_run.xml +++ b/tests/test_text_based/test_basic_run.xml @@ -6,10 +6,11 @@ this was created using <literal> jupytext --to myst tests/notebooks/basic_unrun.ipynb - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="1" cell_metadata="{}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> a=1 print(a) - <CellOutputNode classes="cell_output"> - <CellOutputBundleNode output_count="1"> + <container classes="cell_output" nb_element="cell_code_output"> + <literal_block classes="output stream" language="myst-ansi" xml:space="preserve"> + 1 diff --git a/tests/test_text_based/test_basic_run_exec_off.xml b/tests/test_text_based/test_basic_run_exec_off.xml index ccef64f7..a24bae2e 100644 --- a/tests/test_text_based/test_basic_run_exec_off.xml +++ b/tests/test_text_based/test_basic_run_exec_off.xml @@ -6,8 +6,8 @@ this was created using <literal> jupytext --to myst tests/notebooks/basic_unrun.ipynb - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> - <literal_block xml:space="preserve"> + <container cell_index="1" cell_metadata="{}" classes="cell" exec_count="True" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> + <literal_block language="python" xml:space="preserve"> a=1 print(a) From 2bb55e203729170a23b838beb822ee1c81fbef0e Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Thu, 6 Jan 2022 03:02:52 +0100 Subject: [PATCH 29/75] fix more tests --- tests/test_codecell_file.py | 79 +++++++++ .../test_codecell_file.ipynb | 0 .../test_codecell_file.xml | 4 +- .../test_codecell_file_warnings.ipynb | 0 .../test_codecell_file_warnings.xml | 4 +- tests/test_mystnb_features.py | 69 -------- tests/test_nb_render.py | 4 +- tests/test_parser/test_basic_run.xml | 9 +- tests/test_parser/test_complex_outputs.xml | 167 ++++++++++++++---- 9 files changed, 227 insertions(+), 109 deletions(-) create mode 100644 tests/test_codecell_file.py rename tests/{test_mystnb_features => test_codecell_file}/test_codecell_file.ipynb (100%) rename tests/{test_mystnb_features => test_codecell_file}/test_codecell_file.xml (60%) rename tests/{test_mystnb_features => test_codecell_file}/test_codecell_file_warnings.ipynb (100%) rename tests/{test_mystnb_features => test_codecell_file}/test_codecell_file_warnings.xml (61%) delete mode 100644 tests/test_mystnb_features.py diff --git a/tests/test_codecell_file.py b/tests/test_codecell_file.py new file mode 100644 index 00000000..e65932e4 --- /dev/null +++ b/tests/test_codecell_file.py @@ -0,0 +1,79 @@ +"""Test notebooks containing code cells with the `load` option.""" +import pytest +from sphinx.util.fileutil import copy_asset_file + + +@pytest.mark.sphinx_params( + "mystnb_codecell_file.md", + conf={"nb_execution_mode": "cache", "source_suffix": {".md": "myst-nb"}}, +) +def test_codecell_file(sphinx_run, file_regression, check_nbs, get_test_path): + asset_path = get_test_path("mystnb_codecell_file.py") + copy_asset_file(str(asset_path), str(sphinx_run.app.srcdir)) + sphinx_run.build() + assert sphinx_run.warnings() == "" + assert set(sphinx_run.env.metadata["mystnb_codecell_file"].keys()) == { + "jupytext", + "author", + "source_map", + "wordcount", + } + assert set(sphinx_run.env.nb_metadata["mystnb_codecell_file"].keys()) == { + "exec_data", + "kernelspec", + "language_info", + } + assert sphinx_run.env.metadata["mystnb_codecell_file"]["author"] == "Matt" + assert sphinx_run.env.nb_metadata["mystnb_codecell_file"]["kernelspec"] == { + "display_name": "Python 3", + "language": "python", + "name": "python3", + } + try: + file_regression.check( + sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb", encoding="utf8" + ) + finally: + file_regression.check( + sphinx_run.get_doctree().pformat(), extension=".xml", encoding="utf8" + ) + + +@pytest.mark.sphinx_params( + "mystnb_codecell_file_warnings.md", + conf={"nb_execution_mode": "force", "source_suffix": {".md": "myst-nb"}}, +) +def test_codecell_file_warnings(sphinx_run, file_regression, check_nbs, get_test_path): + asset_path = get_test_path("mystnb_codecell_file.py") + copy_asset_file(str(asset_path), str(sphinx_run.app.srcdir)) + sphinx_run.build() + # assert ( + # "mystnb_codecell_file_warnings.md:14 content of code-cell " + # "is being overwritten by :load: mystnb_codecell_file.py" + # in sphinx_run.warnings() + # ) + assert set(sphinx_run.env.metadata["mystnb_codecell_file_warnings"].keys()) == { + "jupytext", + "author", + "source_map", + "wordcount", + } + assert set(sphinx_run.env.nb_metadata["mystnb_codecell_file_warnings"].keys()) == { + "exec_data", + "kernelspec", + "language_info", + } + assert ( + sphinx_run.env.metadata["mystnb_codecell_file_warnings"]["author"] == "Aakash" + ) + assert sphinx_run.env.nb_metadata["mystnb_codecell_file_warnings"][ + "kernelspec" + ] == {"display_name": "Python 3", "language": "python", "name": "python3"} + try: + file_regression.check( + sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb", encoding="utf8" + ) + finally: + file_regression.check( + sphinx_run.get_doctree().pformat(), extension=".xml", encoding="utf8" + ) diff --git a/tests/test_mystnb_features/test_codecell_file.ipynb b/tests/test_codecell_file/test_codecell_file.ipynb similarity index 100% rename from tests/test_mystnb_features/test_codecell_file.ipynb rename to tests/test_codecell_file/test_codecell_file.ipynb diff --git a/tests/test_mystnb_features/test_codecell_file.xml b/tests/test_codecell_file/test_codecell_file.xml similarity index 60% rename from tests/test_mystnb_features/test_codecell_file.xml rename to tests/test_codecell_file/test_codecell_file.xml index ee7404af..b3a1a24a 100644 --- a/tests/test_mystnb_features/test_codecell_file.xml +++ b/tests/test_codecell_file/test_codecell_file.xml @@ -2,8 +2,8 @@ <section classes="tex2jax_ignore mathjax_ignore" ids="a-title" names="a\ title"> <title> a title - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="1" cell_metadata="{'load': 'mystnb_codecell_file.py'}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> # flake8: noqa diff --git a/tests/test_mystnb_features/test_codecell_file_warnings.ipynb b/tests/test_codecell_file/test_codecell_file_warnings.ipynb similarity index 100% rename from tests/test_mystnb_features/test_codecell_file_warnings.ipynb rename to tests/test_codecell_file/test_codecell_file_warnings.ipynb diff --git a/tests/test_mystnb_features/test_codecell_file_warnings.xml b/tests/test_codecell_file/test_codecell_file_warnings.xml similarity index 61% rename from tests/test_mystnb_features/test_codecell_file_warnings.xml rename to tests/test_codecell_file/test_codecell_file_warnings.xml index ead04f06..f4ad1c68 100644 --- a/tests/test_mystnb_features/test_codecell_file_warnings.xml +++ b/tests/test_codecell_file/test_codecell_file_warnings.xml @@ -2,8 +2,8 @@ <section classes="tex2jax_ignore mathjax_ignore" ids="a-title" names="a\ title"> <title> a title - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="1" cell_metadata="{'load': 'mystnb_codecell_file.py'}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> # flake8: noqa diff --git a/tests/test_mystnb_features.py b/tests/test_mystnb_features.py deleted file mode 100644 index bc047564..00000000 --- a/tests/test_mystnb_features.py +++ /dev/null @@ -1,69 +0,0 @@ -import pytest -from sphinx.util.fileutil import copy_asset_file - - -@pytest.mark.sphinx_params( - "mystnb_codecell_file.md", - conf={"nb_execution_mode": "cache", "source_suffix": {".md": "myst-nb"}}, -) -def test_codecell_file(sphinx_run, file_regression, check_nbs, get_test_path): - asset_path = get_test_path("mystnb_codecell_file.py") - copy_asset_file(str(asset_path), str(sphinx_run.app.srcdir)) - sphinx_run.build() - assert sphinx_run.warnings() == "" - assert set(sphinx_run.app.env.metadata["mystnb_codecell_file"].keys()) == { - "jupytext", - "kernelspec", - "author", - "source_map", - "language_info", - "wordcount", - } - assert sphinx_run.app.env.metadata["mystnb_codecell_file"]["author"] == "Matt" - assert ( - sphinx_run.app.env.metadata["mystnb_codecell_file"]["kernelspec"] - == '{"display_name": "Python 3", "language": "python", "name": "python3"}' - ) - file_regression.check( - sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb", encoding="utf8" - ) - file_regression.check( - sphinx_run.get_doctree().pformat(), extension=".xml", encoding="utf8" - ) - - -@pytest.mark.sphinx_params( - "mystnb_codecell_file_warnings.md", - conf={"nb_execution_mode": "force", "source_suffix": {".md": "myst-nb"}}, -) -def test_codecell_file_warnings(sphinx_run, file_regression, check_nbs, get_test_path): - asset_path = get_test_path("mystnb_codecell_file.py") - copy_asset_file(str(asset_path), str(sphinx_run.app.srcdir)) - sphinx_run.build() - assert ( - "mystnb_codecell_file_warnings.md:14 content of code-cell " - "is being overwritten by :load: mystnb_codecell_file.py" - in sphinx_run.warnings() - ) - assert set(sphinx_run.app.env.metadata["mystnb_codecell_file_warnings"].keys()) == { - "jupytext", - "kernelspec", - "author", - "source_map", - "language_info", - "wordcount", - } - assert ( - sphinx_run.app.env.metadata["mystnb_codecell_file_warnings"]["author"] - == "Aakash" - ) - assert ( - sphinx_run.app.env.metadata["mystnb_codecell_file_warnings"]["kernelspec"] - == '{"display_name": "Python 3", "language": "python", "name": "python3"}' - ) - file_regression.check( - sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb", encoding="utf8" - ) - file_regression.check( - sphinx_run.get_doctree().pformat(), extension=".xml", encoding="utf8" - ) diff --git a/tests/test_nb_render.py b/tests/test_nb_render.py index 3ae5b7a3..3bd7529f 100644 --- a/tests/test_nb_render.py +++ b/tests/test_nb_render.py @@ -15,7 +15,7 @@ "line,title,input,expected", read_fixture_file(FIXTURE_PATH.joinpath("basic.txt")) ) def test_render(line, title, input, expected): - from myst_nb.parser import nb_to_tokens, tokens_to_docutils + from myst_nb.parse import nb_to_tokens, tokens_to_docutils dct = yaml.safe_load(input) dct.setdefault("metadata", {}) @@ -35,7 +35,7 @@ def test_render(line, title, input, expected): read_fixture_file(FIXTURE_PATH.joinpath("reporter_warnings.txt")), ) def test_reporting(line, title, input, expected): - from myst_nb.parser import nb_to_tokens, tokens_to_docutils + from myst_nb.parse import nb_to_tokens, tokens_to_docutils dct = yaml.safe_load(input) dct.setdefault("metadata", {}) diff --git a/tests/test_parser/test_basic_run.xml b/tests/test_parser/test_basic_run.xml index efdcf57b..668e5841 100644 --- a/tests/test_parser/test_basic_run.xml +++ b/tests/test_parser/test_basic_run.xml @@ -4,10 +4,11 @@ a title <paragraph> some text - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="1" cell_metadata="{}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> a=1 print(a) - <CellOutputNode classes="cell_output"> - <CellOutputBundleNode output_count="1"> + <container classes="cell_output" nb_element="cell_code_output"> + <literal_block classes="output stream" language="myst-ansi" xml:space="preserve"> + 1 diff --git a/tests/test_parser/test_complex_outputs.xml b/tests/test_parser/test_complex_outputs.xml index 951e4824..d673be65 100644 --- a/tests/test_parser/test_complex_outputs.xml +++ b/tests/test_parser/test_complex_outputs.xml @@ -1,6 +1,6 @@ <document source="complex_outputs"> - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="0" cell_metadata="{'init_cell': True, 'slideshow': {'slide_type': 'skip'}}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> import matplotlib.pyplot as plt import pandas as pd @@ -87,24 +87,33 @@ <section classes="tex2jax_ignore mathjax_ignore" ids="text-output" names="text\ output"> <title> Text Output - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="11" cell_metadata="{'ipub': {'text': {'format': {'backgroundcolor': '\\color{blue!10}'}}}}" classes="cell" exec_count="2" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> print(""" This is some printed text, with a nicely formatted output. """) - <CellOutputNode classes="cell_output"> - <CellOutputBundleNode output_count="1"> + <container classes="cell_output" nb_element="cell_code_output"> + <literal_block classes="output stream" language="myst-ansi" xml:space="preserve"> + + This is some printed text, + with a nicely formatted output. + <section classes="tex2jax_ignore mathjax_ignore" ids="images-and-figures" names="images\ and\ figures"> <title> Images and Figures - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="13" cell_metadata="{'ipub': {'figure': {'caption': 'A nice picture.', 'label': 'fig:example', 'placement': '!bh'}}}" classes="cell" exec_count="3" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> Image('example.jpg',height=400) - <CellOutputNode classes="cell_output"> - <CellOutputBundleNode output_count="1"> + <container classes="cell_output" nb_element="cell_code_output"> + <container nb_element="mime_bundle"> + <container mime_type="image/jpeg"> + <image candidates="{'*': '_build/jupyter_execute/a4c9580c74dacf6f3316a3bd2e2a347933aa4463834dcf1bb8f20b4fcb476ae1.jpg'}" uri="_build/jupyter_execute/a4c9580c74dacf6f3316a3bd2e2a347933aa4463834dcf1bb8f20b4fcb476ae1.jpg"> + <container mime_type="text/plain"> + <literal_block classes="output text_plain" language="myst-ansi" xml:space="preserve"> + <IPython.core.display.Image object> <section ids="displaying-a-plot-with-its-code" names="displaying\ a\ plot\ with\ its\ code"> <title> Displaying a plot with its code @@ -112,54 +121,152 @@ A matplotlib figure, with the caption set in the markdowncell above the figure. <paragraph> The plotting code for a matplotlib figure (\cref{fig:example_mpl}). - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="17" cell_metadata="{'ipub': {'code': {'asfloat': True, 'caption': 'a', 'label': 'code:example_mpl', 'widefigure': False}, 'figure': {'caption': '', 'label': 'fig:example_mpl', 'widefigure': False}}}" classes="cell" exec_count="4" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> plt.scatter(np.random.rand(10), np.random.rand(10), label='data label') plt.ylabel(r'a y label with latex $\alpha$') plt.legend(); - <CellOutputNode classes="cell_output"> - <CellOutputBundleNode output_count="1"> + <container classes="cell_output" nb_element="cell_code_output"> + <container nb_element="mime_bundle"> + <container mime_type="image/png"> + <image candidates="{'*': '_build/jupyter_execute/16832f45917c1c9862c50f0948f64a498402d6ccde1f3a291da17f240797b160.png'}" uri="_build/jupyter_execute/16832f45917c1c9862c50f0948f64a498402d6ccde1f3a291da17f240797b160.png"> + <container mime_type="text/plain"> + <literal_block classes="output text_plain" language="myst-ansi" xml:space="preserve"> + <Figure size 432x288 with 1 Axes> <section classes="tex2jax_ignore mathjax_ignore" ids="tables-with-pandas" names="tables\ (with\ pandas)"> <title> Tables (with pandas) <paragraph> The plotting code for a pandas Dataframe table (\cref{tbl:example}). - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="20" cell_metadata="{'ipub': {'code': {'asfloat': True, 'caption': '', 'label': 'code:example_pd', 'placement': 'H', 'widefigure': False}, 'table': {'alternate': 'gray!20', 'caption': 'An example of a table created with pandas dataframe.', 'label': 'tbl:example', 'placement': 'H'}}}" classes="cell" exec_count="5" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> df = pd.DataFrame(np.random.rand(3,4),columns=['a','b','c','d']) df.a = ['$\delta$','x','y'] df.b = ['l','m','n'] df.set_index(['a','b']) df.round(3) - <CellOutputNode classes="cell_output"> - <CellOutputBundleNode output_count="1"> + <container classes="cell_output" nb_element="cell_code_output"> + <container nb_element="mime_bundle"> + <container mime_type="text/html"> + <raw classes="output text_html" format="html" xml:space="preserve"> + <div> + <style scoped> + .dataframe tbody tr th:only-of-type { + vertical-align: middle; + } + + .dataframe tbody tr th { + vertical-align: top; + } + + .dataframe thead th { + text-align: right; + } + </style> + <table border="1" class="dataframe"> + <thead> + <tr style="text-align: right;"> + <th></th> + <th>a</th> + <th>b</th> + <th>c</th> + <th>d</th> + </tr> + </thead> + <tbody> + <tr> + <th>0</th> + <td>$\delta$</td> + <td>l</td> + <td>0.391</td> + <td>0.607</td> + </tr> + <tr> + <th>1</th> + <td>x</td> + <td>m</td> + <td>0.132</td> + <td>0.205</td> + </tr> + <tr> + <th>2</th> + <td>y</td> + <td>n</td> + <td>0.969</td> + <td>0.726</td> + </tr> + </tbody> + </table> + </div> + <container mime_type="text/latex"> + <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> + \begin{tabular}{lllrr} + \toprule + {} & a & b & c & d \\ + \midrule + 0 & \$\textbackslash delta\$ & l & 0.391 & 0.607 \\ + 1 & x & m & 0.132 & 0.205 \\ + 2 & y & n & 0.969 & 0.726 \\ + \bottomrule + \end{tabular} + <container mime_type="text/plain"> + <literal_block classes="output text_plain" language="myst-ansi" xml:space="preserve"> + a b c d + 0 $\delta$ l 0.391 0.607 + 1 x m 0.132 0.205 + 2 y n 0.969 0.726 <section classes="tex2jax_ignore mathjax_ignore" ids="equations-with-ipython-or-sympy" names="equations\ (with\ ipython\ or\ sympy)"> <title> Equations (with ipython or sympy) - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="22" cell_metadata="{'ipub': {'equation': {'label': 'eqn:example_ipy'}}}" classes="cell" exec_count="6" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> Latex('$$ a = b+c $$') - <CellOutputNode classes="cell_output"> - <CellOutputBundleNode output_count="1"> + <container classes="cell_output" nb_element="cell_code_output"> + <container nb_element="mime_bundle"> + <container mime_type="text/latex"> + <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> + a = b+c + <container mime_type="text/plain"> + <literal_block classes="output text_plain" language="myst-ansi" xml:space="preserve"> + <IPython.core.display.Latex object> <paragraph> The plotting code for a sympy equation (=@eqn:example_sympy). - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="24" cell_metadata="{'ipub': {'code': {'asfloat': True, 'caption': '', 'label': 'code:example_sym', 'placement': 'H', 'widefigure': False}, 'equation': {'environment': 'equation', 'label': 'eqn:example_sympy'}}}" classes="cell" exec_count="7" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> y = sym.Function('y') n = sym.symbols(r'\alpha') f = y(n)-2*y(n-1/sym.pi)-5*y(n-2) sym.rsolve(f,y(n),[1,4]) - <CellOutputNode classes="cell_output"> - <CellOutputBundleNode output_count="1"> - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container classes="cell_output" nb_element="cell_code_output"> + <container nb_element="mime_bundle"> + <container mime_type="image/png"> + <image candidates="{'*': '_build/jupyter_execute/8c43e5c8cccf697754876b7fec1b0a9b731d7900bb585e775a5fa326b4de8c5a.png'}" uri="_build/jupyter_execute/8c43e5c8cccf697754876b7fec1b0a9b731d7900bb585e775a5fa326b4de8c5a.png"> + <container mime_type="text/latex"> + <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> + \displaystyle \left(\sqrt{5} i\right)^{\alpha} \left(\frac{1}{2} - \frac{2 \sqrt{5} i}{5}\right) + \left(- \sqrt{5} i\right)^{\alpha} \left(\frac{1}{2} + \frac{2 \sqrt{5} i}{5}\right) + <container mime_type="text/plain"> + <literal_block classes="output text_plain" language="myst-ansi" xml:space="preserve"> + \alpha ⎛1 2⋅√5⋅ⅈ⎞ \alpha ⎛1 2⋅√5⋅ⅈ⎞ + (√5⋅ⅈ) ⋅⎜─ - ──────⎟ + (-√5⋅ⅈ) ⋅⎜─ + ──────⎟ + ⎝2 5 ⎠ ⎝2 5 ⎠ + <container cell_index="25" cell_metadata="{}" classes="cell" exec_count="7" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" xml:space="preserve"> from IPython.display import display, Markdown display(Markdown('**_some_ markdown**')) - <CellOutputNode classes="cell_output"> - <CellOutputBundleNode output_count="1"> + <container classes="cell_output" nb_element="cell_code_output"> + <container nb_element="mime_bundle"> + <container mime_type="text/markdown"> + <paragraph> + <strong> + <emphasis> + some + markdown + <container mime_type="text/plain"> + <literal_block classes="output text_plain" language="myst-ansi" xml:space="preserve"> + <IPython.core.display.Markdown object> From 95e4cf86ac82b3af57c25e4ca2a82e2621ba2827 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Thu, 6 Jan 2022 03:05:03 +0100 Subject: [PATCH 30/75] fix more tests --- tests/test_parser.py | 53 +++++++++++++++++++++++--------------------- 1 file changed, 28 insertions(+), 25 deletions(-) diff --git a/tests/test_parser.py b/tests/test_parser.py index d5525f89..77897306 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -1,3 +1,4 @@ +"""Test parsing of already executed notebooks.""" import pytest @@ -6,17 +7,20 @@ def test_basic_run(sphinx_run, file_regression): sphinx_run.build() # print(sphinx_run.status()) assert sphinx_run.warnings() == "" - assert set(sphinx_run.app.env.metadata["basic_run"].keys()) == { + assert set(sphinx_run.env.metadata["basic_run"].keys()) == { "test_name", + "wordcount", + } + assert set(sphinx_run.env.nb_metadata["basic_run"].keys()) == { "kernelspec", "language_info", - "wordcount", } - assert sphinx_run.app.env.metadata["basic_run"]["test_name"] == "notebook1" - assert ( - sphinx_run.app.env.metadata["basic_run"]["kernelspec"] - == '{"display_name": "Python 3", "language": "python", "name": "python3"}' - ) + assert sphinx_run.env.metadata["basic_run"]["test_name"] == "notebook1" + assert sphinx_run.env.nb_metadata["basic_run"]["kernelspec"] == { + "display_name": "Python 3", + "language": "python", + "name": "python3", + } file_regression.check( sphinx_run.get_doctree().pformat(), extension=".xml", encoding="utf8" ) @@ -24,7 +28,7 @@ def test_basic_run(sphinx_run, file_regression): filenames = { p for p in (sphinx_run.app.srcdir / "_build" / "jupyter_execute").listdir() } - assert filenames == {"basic_run.py", "basic_run.ipynb"} + assert filenames == {"basic_run.ipynb"} @pytest.mark.sphinx_params("complex_outputs.ipynb", conf={"nb_execution_mode": "off"}) @@ -32,27 +36,28 @@ def test_complex_outputs(sphinx_run, file_regression): sphinx_run.build() assert sphinx_run.warnings() == "" - assert set(sphinx_run.app.env.metadata["complex_outputs"].keys()) == { + assert set(sphinx_run.env.metadata["complex_outputs"].keys()) == { "ipub", "hide_input", "nav_menu", "celltoolbar", "latex_envs", - "kernelspec", - "language_info", "jupytext", "toc", "varInspector", "wordcount", } - assert ( - sphinx_run.app.env.metadata["complex_outputs"]["celltoolbar"] == "Edit Metadata" - ) - assert sphinx_run.app.env.metadata["complex_outputs"]["hide_input"] == "False" - assert ( - sphinx_run.app.env.metadata["complex_outputs"]["kernelspec"] - == '{"display_name": "Python 3", "language": "python", "name": "python3"}' - ) + assert set(sphinx_run.env.nb_metadata["complex_outputs"].keys()) == { + "kernelspec", + "language_info", + } + assert sphinx_run.env.metadata["complex_outputs"]["celltoolbar"] == "Edit Metadata" + assert sphinx_run.env.metadata["complex_outputs"]["hide_input"] == "False" + assert sphinx_run.env.nb_metadata["complex_outputs"]["kernelspec"] == { + "display_name": "Python 3", + "language": "python", + "name": "python3", + } file_regression.check( sphinx_run.get_doctree().pformat(), extension=".xml", encoding="utf8" ) @@ -61,13 +66,12 @@ def test_complex_outputs(sphinx_run, file_regression): p.replace(".jpeg", ".jpg") for p in (sphinx_run.app.srcdir / "_build" / "jupyter_execute").listdir() } - print(filenames) + # print(filenames) assert filenames == { - "complex_outputs_17_0.png", + "16832f45917c1c9862c50f0948f64a498402d6ccde1f3a291da17f240797b160.png", + "a4c9580c74dacf6f3316a3bd2e2a347933aa4463834dcf1bb8f20b4fcb476ae1.jpg", + "8c43e5c8cccf697754876b7fec1b0a9b731d7900bb585e775a5fa326b4de8c5a.png", "complex_outputs.ipynb", - "complex_outputs.py", - "complex_outputs_24_0.png", - "complex_outputs_13_0.jpg", } @@ -76,7 +80,6 @@ def test_complex_outputs(sphinx_run, file_regression): "latex_build/other.ipynb", conf={"nb_execution_mode": "off"}, buildername="latex", - # working_dir="/Users/cjs14/GitHub/MyST-NB-actual/outputs" ) def test_toctree_in_ipynb(sphinx_run, file_regression): sphinx_run.build() From 26d8f40fd07681da1ee4ddb985bd22b6fc268afc Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Thu, 6 Jan 2022 03:24:11 +0100 Subject: [PATCH 31/75] fix collector --- myst_nb/sphinx_.py | 5 + tests/test_parser.py | 12 + tests/test_sphinx_builds.py | 57 ---- tests/test_sphinx_builds/test_basic_run.xml | 14 - .../test_sphinx_builds/test_basic_run_md.xml | 13 - .../test_complex_outputs_run.resolved.xml | 229 --------------- .../test_complex_outputs_run.xml | 272 ------------------ 7 files changed, 17 insertions(+), 585 deletions(-) delete mode 100644 tests/test_sphinx_builds.py delete mode 100644 tests/test_sphinx_builds/test_basic_run.xml delete mode 100644 tests/test_sphinx_builds/test_basic_run_md.xml delete mode 100644 tests/test_sphinx_builds/test_complex_outputs_run.resolved.xml delete mode 100644 tests/test_sphinx_builds/test_complex_outputs_run.xml diff --git a/myst_nb/sphinx_.py b/myst_nb/sphinx_.py index e291e1e9..f3958930 100644 --- a/myst_nb/sphinx_.py +++ b/myst_nb/sphinx_.py @@ -73,6 +73,8 @@ def sphinx_setup(app: Sphinx): app.connect("config-inited", add_nb_custom_formats) # ensure notebook checkpoints are excluded from parsing app.connect("config-inited", add_exclude_patterns) + # add collector for myst nb specific data + app.add_env_collector(NbMetadataCollector) # TODO add an event which, if any files have been removed, # all jupyter-cache stage records with a non-existent path are removed @@ -635,6 +637,9 @@ def clear_doc(self, app: Sphinx, env: BuildEnvironment, docname: str) -> None: env.nb_metadata = defaultdict(dict) env.nb_metadata.pop(docname, None) + def process_doc(self, app: Sphinx, doctree: nodes.document) -> None: + pass + def merge_other( self, app: Sphinx, diff --git a/tests/test_parser.py b/tests/test_parser.py index 77897306..7da379ff 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -89,3 +89,15 @@ def test_toctree_in_ipynb(sphinx_run, file_regression): sphinx_run.get_doctree("latex_build/other").pformat(), extension=".xml" ) assert sphinx_run.warnings() == "" + + +@pytest.mark.sphinx_params("ipywidgets.ipynb", conf={"nb_execution_mode": "off"}) +def test_ipywidgets(sphinx_run): + """Test that ipywidget state is extracted and JS is included in the HTML head.""" + sphinx_run.build() + # print(sphinx_run.status()) + assert sphinx_run.warnings() == "" + assert "ipywidgets_state" in sphinx_run.env.nb_metadata["ipywidgets"] + head_scripts = sphinx_run.get_html().select("head > script") + assert any("require.js" in script.get("src", "") for script in head_scripts) + assert any("embed-amd.js" in script.get("src", "") for script in head_scripts) diff --git a/tests/test_sphinx_builds.py b/tests/test_sphinx_builds.py deleted file mode 100644 index 43e27505..00000000 --- a/tests/test_sphinx_builds.py +++ /dev/null @@ -1,57 +0,0 @@ -"""Test full sphinx builds.""" -import pytest - - -@pytest.mark.sphinx_params("basic_run.ipynb", conf={"nb_execution_mode": "off"}) -def test_basic_run(sphinx_run, file_regression): - sphinx_run.build() - # print(sphinx_run.status()) - assert sphinx_run.warnings() == "" - assert set(sphinx_run.app.env.metadata["basic_run"].keys()) == { - "test_name", - "kernelspec", - "language_info", - "wordcount", - } - file_regression.check( - sphinx_run.get_doctree().pformat(), extension=".xml", encoding="utf8" - ) - - -@pytest.mark.sphinx_params("basic_unrun.md", conf={"nb_execution_mode": "off"}) -def test_basic_run_md(sphinx_run, file_regression): - sphinx_run.build() - # print(sphinx_run.status()) - assert sphinx_run.warnings() == "" - file_regression.check( - sphinx_run.get_doctree().pformat(), extension=".xml", encoding="utf8" - ) - - -@pytest.mark.sphinx_params("complex_outputs.ipynb", conf={"nb_execution_mode": "off"}) -def test_complex_outputs_run(sphinx_run, file_regression): - sphinx_run.build() - # print(sphinx_run.status()) - assert sphinx_run.warnings() == "" - try: - file_regression.check( - sphinx_run.get_doctree().pformat(), extension=".xml", encoding="utf8" - ) - finally: - file_regression.check( - sphinx_run.get_resolved_doctree().pformat(), - extension=".resolved.xml", - encoding="utf8", - ) - - -@pytest.mark.sphinx_params("ipywidgets.ipynb", conf={"nb_execution_mode": "off"}) -def test_ipywidgets(sphinx_run): - """Test that ipywidget state is extracted and JS is included in the HTML head.""" - sphinx_run.build() - # print(sphinx_run.status()) - assert sphinx_run.warnings() == "" - assert "__mystnb__ipywidgets_state" in sphinx_run.env.metadata["ipywidgets"] - head_scripts = sphinx_run.get_html().select("head > script") - assert any("require.js" in script.get("src", "") for script in head_scripts) - assert any("embed-amd.js" in script.get("src", "") for script in head_scripts) diff --git a/tests/test_sphinx_builds/test_basic_run.xml b/tests/test_sphinx_builds/test_basic_run.xml deleted file mode 100644 index 668e5841..00000000 --- a/tests/test_sphinx_builds/test_basic_run.xml +++ /dev/null @@ -1,14 +0,0 @@ -<document source="basic_run"> - <section classes="tex2jax_ignore mathjax_ignore" ids="a-title" names="a\ title"> - <title> - a title - <paragraph> - some text - <container cell_index="1" cell_metadata="{}" classes="cell" exec_count="1" nb_element="cell_code"> - <container classes="cell_input" nb_element="cell_code_source"> - <literal_block language="ipython3" xml:space="preserve"> - a=1 - print(a) - <container classes="cell_output" nb_element="cell_code_output"> - <literal_block classes="output stream" language="myst-ansi" xml:space="preserve"> - 1 diff --git a/tests/test_sphinx_builds/test_basic_run_md.xml b/tests/test_sphinx_builds/test_basic_run_md.xml deleted file mode 100644 index a24bae2e..00000000 --- a/tests/test_sphinx_builds/test_basic_run_md.xml +++ /dev/null @@ -1,13 +0,0 @@ -<document source="basic_unrun"> - <section classes="tex2jax_ignore mathjax_ignore" ids="a-title" names="a\ title"> - <title> - a title - <paragraph> - this was created using - <literal> - jupytext --to myst tests/notebooks/basic_unrun.ipynb - <container cell_index="1" cell_metadata="{}" classes="cell" exec_count="True" nb_element="cell_code"> - <container classes="cell_input" nb_element="cell_code_source"> - <literal_block language="python" xml:space="preserve"> - a=1 - print(a) diff --git a/tests/test_sphinx_builds/test_complex_outputs_run.resolved.xml b/tests/test_sphinx_builds/test_complex_outputs_run.resolved.xml deleted file mode 100644 index 16c177bd..00000000 --- a/tests/test_sphinx_builds/test_complex_outputs_run.resolved.xml +++ /dev/null @@ -1,229 +0,0 @@ -<document source="complex_outputs"> - <container cell_index="0" cell_metadata="{'init_cell': True, 'slideshow': {'slide_type': 'skip'}}" classes="cell" exec_count="1" nb_element="cell_code"> - <container classes="cell_input" nb_element="cell_code_source"> - <literal_block language="ipython3" linenos="False" xml:space="preserve"> - import matplotlib.pyplot as plt - import pandas as pd - pd.set_option('display.latex.repr', True) - import sympy as sym - sym.init_printing(use_latex=True) - import numpy as np - from IPython.display import Image, Latex - <section classes="tex2jax_ignore mathjax_ignore" ids="markdown" names="markdown"> - <title> - Markdown - <section ids="general" names="general"> - <title> - General - <paragraph> - Some markdown text. - <paragraph> - A list: - <bullet_list bullet="-"> - <list_item> - <paragraph> - something - <list_item> - <paragraph> - something else - <paragraph> - A numbered list - <enumerated_list enumtype="arabic" prefix="" suffix="."> - <list_item> - <paragraph> - something - <list_item> - <paragraph> - something else - <paragraph> - non-ascii characters TODO - <paragraph> - This is a long section of text, which we only want in a document (not a presentation) - - some text - - some more text - - some more text - - some more text - - some more text - - some more text - - some more text - - some more text - - some more text - <paragraph> - This is an abbreviated section of the document text, which we only want in a presentation - <bullet_list bullet="-"> - <list_item> - <paragraph> - summary of document text - <section ids="references-and-citations" names="references\ and\ citations"> - <title> - References and Citations - <paragraph> - References to \cref{fig:example}, \cref{tbl:example}, =@eqn:example_sympy and \cref{code:example_mpl}. - <paragraph> - A latex citation.\cite{zelenyak_molecular_2016} - <paragraph> - A html citation. - <raw format="html" xml:space="preserve"> - <cite data-cite="kirkeminde_thermodynamic_2012"> - (Kirkeminde, 2012) - <raw format="html" xml:space="preserve"> - </cite> - <section ids="todo-notes" names="todo\ notes"> - <title> - Todo notes - <paragraph> - \todo[inline]{an inline todo} - <paragraph> - Some text.\todo{a todo in the margins} - <section classes="tex2jax_ignore mathjax_ignore" ids="text-output" names="text\ output"> - <title> - Text Output - <container cell_index="11" cell_metadata="{'ipub': {'text': {'format': {'backgroundcolor': '\\color{blue!10}'}}}}" classes="cell" exec_count="2" nb_element="cell_code"> - <container classes="cell_input" nb_element="cell_code_source"> - <literal_block language="ipython3" linenos="False" xml:space="preserve"> - print(""" - This is some printed text, - with a nicely formatted output. - """) - <container classes="cell_output" nb_element="cell_code_output"> - <literal_block classes="output stream" language="myst-ansi" linenos="False" xml:space="preserve"> - - This is some printed text, - with a nicely formatted output. - - <section classes="tex2jax_ignore mathjax_ignore" ids="images-and-figures" names="images\ and\ figures"> - <title> - Images and Figures - <container cell_index="13" cell_metadata="{'ipub': {'figure': {'caption': 'A nice picture.', 'label': 'fig:example', 'placement': '!bh'}}}" classes="cell" exec_count="3" nb_element="cell_code"> - <container classes="cell_input" nb_element="cell_code_source"> - <literal_block language="ipython3" linenos="False" xml:space="preserve"> - Image('example.jpg',height=400) - <container classes="cell_output" nb_element="cell_code_output"> - <container mime_type="image/jpeg"> - <image candidates="{'*': '_build/jupyter_execute/a4c9580c74dacf6f3316a3bd2e2a347933aa4463834dcf1bb8f20b4fcb476ae1.jpg'}" uri="_build/jupyter_execute/a4c9580c74dacf6f3316a3bd2e2a347933aa4463834dcf1bb8f20b4fcb476ae1.jpg"> - <section ids="displaying-a-plot-with-its-code" names="displaying\ a\ plot\ with\ its\ code"> - <title> - Displaying a plot with its code - <paragraph> - A matplotlib figure, with the caption set in the markdowncell above the figure. - <paragraph> - The plotting code for a matplotlib figure (\cref{fig:example_mpl}). - <container cell_index="17" cell_metadata="{'ipub': {'code': {'asfloat': True, 'caption': 'a', 'label': 'code:example_mpl', 'widefigure': False}, 'figure': {'caption': '', 'label': 'fig:example_mpl', 'widefigure': False}}}" classes="cell" exec_count="4" nb_element="cell_code"> - <container classes="cell_input" nb_element="cell_code_source"> - <literal_block language="ipython3" linenos="False" xml:space="preserve"> - plt.scatter(np.random.rand(10), np.random.rand(10), - label='data label') - plt.ylabel(r'a y label with latex $\alpha$') - plt.legend(); - <container classes="cell_output" nb_element="cell_code_output"> - <container mime_type="image/png"> - <image candidates="{'*': '_build/jupyter_execute/16832f45917c1c9862c50f0948f64a498402d6ccde1f3a291da17f240797b160.png'}" uri="_build/jupyter_execute/16832f45917c1c9862c50f0948f64a498402d6ccde1f3a291da17f240797b160.png"> - <section classes="tex2jax_ignore mathjax_ignore" ids="tables-with-pandas" names="tables\ (with\ pandas)"> - <title> - Tables (with pandas) - <paragraph> - The plotting code for a pandas Dataframe table (\cref{tbl:example}). - <container cell_index="20" cell_metadata="{'ipub': {'code': {'asfloat': True, 'caption': '', 'label': 'code:example_pd', 'placement': 'H', 'widefigure': False}, 'table': {'alternate': 'gray!20', 'caption': 'An example of a table created with pandas dataframe.', 'label': 'tbl:example', 'placement': 'H'}}}" classes="cell" exec_count="5" nb_element="cell_code"> - <container classes="cell_input" nb_element="cell_code_source"> - <literal_block language="ipython3" linenos="False" xml:space="preserve"> - df = pd.DataFrame(np.random.rand(3,4),columns=['a','b','c','d']) - df.a = ['$\delta$','x','y'] - df.b = ['l','m','n'] - df.set_index(['a','b']) - df.round(3) - <container classes="cell_output" nb_element="cell_code_output"> - <container mime_type="text/html"> - <raw classes="output text_html" format="html" xml:space="preserve"> - <div> - <style scoped> - .dataframe tbody tr th:only-of-type { - vertical-align: middle; - } - - .dataframe tbody tr th { - vertical-align: top; - } - - .dataframe thead th { - text-align: right; - } - </style> - <table border="1" class="dataframe"> - <thead> - <tr style="text-align: right;"> - <th></th> - <th>a</th> - <th>b</th> - <th>c</th> - <th>d</th> - </tr> - </thead> - <tbody> - <tr> - <th>0</th> - <td>$\delta$</td> - <td>l</td> - <td>0.391</td> - <td>0.607</td> - </tr> - <tr> - <th>1</th> - <td>x</td> - <td>m</td> - <td>0.132</td> - <td>0.205</td> - </tr> - <tr> - <th>2</th> - <td>y</td> - <td>n</td> - <td>0.969</td> - <td>0.726</td> - </tr> - </tbody> - </table> - </div> - <section classes="tex2jax_ignore mathjax_ignore" ids="equations-with-ipython-or-sympy" names="equations\ (with\ ipython\ or\ sympy)"> - <title> - Equations (with ipython or sympy) - <container cell_index="22" cell_metadata="{'ipub': {'equation': {'label': 'eqn:example_ipy'}}}" classes="cell" exec_count="6" nb_element="cell_code"> - <container classes="cell_input" nb_element="cell_code_source"> - <literal_block language="ipython3" linenos="False" xml:space="preserve"> - Latex('$$ a = b+c $$') - <container classes="cell_output" nb_element="cell_code_output"> - <container mime_type="text/latex"> - <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> - a = b+c - <paragraph> - The plotting code for a sympy equation (=@eqn:example_sympy). - <container cell_index="24" cell_metadata="{'ipub': {'code': {'asfloat': True, 'caption': '', 'label': 'code:example_sym', 'placement': 'H', 'widefigure': False}, 'equation': {'environment': 'equation', 'label': 'eqn:example_sympy'}}}" classes="cell" exec_count="7" nb_element="cell_code"> - <container classes="cell_input" nb_element="cell_code_source"> - <literal_block language="ipython3" linenos="False" xml:space="preserve"> - y = sym.Function('y') - n = sym.symbols(r'\alpha') - f = y(n)-2*y(n-1/sym.pi)-5*y(n-2) - sym.rsolve(f,y(n),[1,4]) - <container classes="cell_output" nb_element="cell_code_output"> - <container mime_type="image/png"> - <image candidates="{'*': '_build/jupyter_execute/8c43e5c8cccf697754876b7fec1b0a9b731d7900bb585e775a5fa326b4de8c5a.png'}" uri="_build/jupyter_execute/8c43e5c8cccf697754876b7fec1b0a9b731d7900bb585e775a5fa326b4de8c5a.png"> - <container cell_index="25" cell_metadata="{}" classes="cell" exec_count="7" nb_element="cell_code"> - <container classes="cell_input" nb_element="cell_code_source"> - <literal_block language="ipython3" linenos="False" xml:space="preserve"> - from IPython.display import display, Markdown - display(Markdown('**_some_ markdown**')) - <container classes="cell_output" nb_element="cell_code_output"> - <container mime_type="text/markdown"> - <paragraph> - <strong> - <emphasis> - some - markdown diff --git a/tests/test_sphinx_builds/test_complex_outputs_run.xml b/tests/test_sphinx_builds/test_complex_outputs_run.xml deleted file mode 100644 index d673be65..00000000 --- a/tests/test_sphinx_builds/test_complex_outputs_run.xml +++ /dev/null @@ -1,272 +0,0 @@ -<document source="complex_outputs"> - <container cell_index="0" cell_metadata="{'init_cell': True, 'slideshow': {'slide_type': 'skip'}}" classes="cell" exec_count="1" nb_element="cell_code"> - <container classes="cell_input" nb_element="cell_code_source"> - <literal_block language="ipython3" xml:space="preserve"> - import matplotlib.pyplot as plt - import pandas as pd - pd.set_option('display.latex.repr', True) - import sympy as sym - sym.init_printing(use_latex=True) - import numpy as np - from IPython.display import Image, Latex - <section classes="tex2jax_ignore mathjax_ignore" ids="markdown" names="markdown"> - <title> - Markdown - <section ids="general" names="general"> - <title> - General - <paragraph> - Some markdown text. - <paragraph> - A list: - <bullet_list bullet="-"> - <list_item> - <paragraph> - something - <list_item> - <paragraph> - something else - <paragraph> - A numbered list - <enumerated_list enumtype="arabic" prefix="" suffix="."> - <list_item> - <paragraph> - something - <list_item> - <paragraph> - something else - <paragraph> - non-ascii characters TODO - <paragraph> - This is a long section of text, which we only want in a document (not a presentation) - - some text - - some more text - - some more text - - some more text - - some more text - - some more text - - some more text - - some more text - - some more text - <paragraph> - This is an abbreviated section of the document text, which we only want in a presentation - <bullet_list bullet="-"> - <list_item> - <paragraph> - summary of document text - <section ids="references-and-citations" names="references\ and\ citations"> - <title> - References and Citations - <paragraph> - References to \cref{fig:example}, \cref{tbl:example}, =@eqn:example_sympy and \cref{code:example_mpl}. - <paragraph> - A latex citation.\cite{zelenyak_molecular_2016} - <paragraph> - A html citation. - <raw format="html" xml:space="preserve"> - <cite data-cite="kirkeminde_thermodynamic_2012"> - (Kirkeminde, 2012) - <raw format="html" xml:space="preserve"> - </cite> - <section ids="todo-notes" names="todo\ notes"> - <title> - Todo notes - <paragraph> - \todo[inline]{an inline todo} - <paragraph> - Some text.\todo{a todo in the margins} - <section classes="tex2jax_ignore mathjax_ignore" ids="text-output" names="text\ output"> - <title> - Text Output - <container cell_index="11" cell_metadata="{'ipub': {'text': {'format': {'backgroundcolor': '\\color{blue!10}'}}}}" classes="cell" exec_count="2" nb_element="cell_code"> - <container classes="cell_input" nb_element="cell_code_source"> - <literal_block language="ipython3" xml:space="preserve"> - print(""" - This is some printed text, - with a nicely formatted output. - """) - <container classes="cell_output" nb_element="cell_code_output"> - <literal_block classes="output stream" language="myst-ansi" xml:space="preserve"> - - This is some printed text, - with a nicely formatted output. - - <section classes="tex2jax_ignore mathjax_ignore" ids="images-and-figures" names="images\ and\ figures"> - <title> - Images and Figures - <container cell_index="13" cell_metadata="{'ipub': {'figure': {'caption': 'A nice picture.', 'label': 'fig:example', 'placement': '!bh'}}}" classes="cell" exec_count="3" nb_element="cell_code"> - <container classes="cell_input" nb_element="cell_code_source"> - <literal_block language="ipython3" xml:space="preserve"> - Image('example.jpg',height=400) - <container classes="cell_output" nb_element="cell_code_output"> - <container nb_element="mime_bundle"> - <container mime_type="image/jpeg"> - <image candidates="{'*': '_build/jupyter_execute/a4c9580c74dacf6f3316a3bd2e2a347933aa4463834dcf1bb8f20b4fcb476ae1.jpg'}" uri="_build/jupyter_execute/a4c9580c74dacf6f3316a3bd2e2a347933aa4463834dcf1bb8f20b4fcb476ae1.jpg"> - <container mime_type="text/plain"> - <literal_block classes="output text_plain" language="myst-ansi" xml:space="preserve"> - <IPython.core.display.Image object> - <section ids="displaying-a-plot-with-its-code" names="displaying\ a\ plot\ with\ its\ code"> - <title> - Displaying a plot with its code - <paragraph> - A matplotlib figure, with the caption set in the markdowncell above the figure. - <paragraph> - The plotting code for a matplotlib figure (\cref{fig:example_mpl}). - <container cell_index="17" cell_metadata="{'ipub': {'code': {'asfloat': True, 'caption': 'a', 'label': 'code:example_mpl', 'widefigure': False}, 'figure': {'caption': '', 'label': 'fig:example_mpl', 'widefigure': False}}}" classes="cell" exec_count="4" nb_element="cell_code"> - <container classes="cell_input" nb_element="cell_code_source"> - <literal_block language="ipython3" xml:space="preserve"> - plt.scatter(np.random.rand(10), np.random.rand(10), - label='data label') - plt.ylabel(r'a y label with latex $\alpha$') - plt.legend(); - <container classes="cell_output" nb_element="cell_code_output"> - <container nb_element="mime_bundle"> - <container mime_type="image/png"> - <image candidates="{'*': '_build/jupyter_execute/16832f45917c1c9862c50f0948f64a498402d6ccde1f3a291da17f240797b160.png'}" uri="_build/jupyter_execute/16832f45917c1c9862c50f0948f64a498402d6ccde1f3a291da17f240797b160.png"> - <container mime_type="text/plain"> - <literal_block classes="output text_plain" language="myst-ansi" xml:space="preserve"> - <Figure size 432x288 with 1 Axes> - <section classes="tex2jax_ignore mathjax_ignore" ids="tables-with-pandas" names="tables\ (with\ pandas)"> - <title> - Tables (with pandas) - <paragraph> - The plotting code for a pandas Dataframe table (\cref{tbl:example}). - <container cell_index="20" cell_metadata="{'ipub': {'code': {'asfloat': True, 'caption': '', 'label': 'code:example_pd', 'placement': 'H', 'widefigure': False}, 'table': {'alternate': 'gray!20', 'caption': 'An example of a table created with pandas dataframe.', 'label': 'tbl:example', 'placement': 'H'}}}" classes="cell" exec_count="5" nb_element="cell_code"> - <container classes="cell_input" nb_element="cell_code_source"> - <literal_block language="ipython3" xml:space="preserve"> - df = pd.DataFrame(np.random.rand(3,4),columns=['a','b','c','d']) - df.a = ['$\delta$','x','y'] - df.b = ['l','m','n'] - df.set_index(['a','b']) - df.round(3) - <container classes="cell_output" nb_element="cell_code_output"> - <container nb_element="mime_bundle"> - <container mime_type="text/html"> - <raw classes="output text_html" format="html" xml:space="preserve"> - <div> - <style scoped> - .dataframe tbody tr th:only-of-type { - vertical-align: middle; - } - - .dataframe tbody tr th { - vertical-align: top; - } - - .dataframe thead th { - text-align: right; - } - </style> - <table border="1" class="dataframe"> - <thead> - <tr style="text-align: right;"> - <th></th> - <th>a</th> - <th>b</th> - <th>c</th> - <th>d</th> - </tr> - </thead> - <tbody> - <tr> - <th>0</th> - <td>$\delta$</td> - <td>l</td> - <td>0.391</td> - <td>0.607</td> - </tr> - <tr> - <th>1</th> - <td>x</td> - <td>m</td> - <td>0.132</td> - <td>0.205</td> - </tr> - <tr> - <th>2</th> - <td>y</td> - <td>n</td> - <td>0.969</td> - <td>0.726</td> - </tr> - </tbody> - </table> - </div> - <container mime_type="text/latex"> - <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> - \begin{tabular}{lllrr} - \toprule - {} & a & b & c & d \\ - \midrule - 0 & \$\textbackslash delta\$ & l & 0.391 & 0.607 \\ - 1 & x & m & 0.132 & 0.205 \\ - 2 & y & n & 0.969 & 0.726 \\ - \bottomrule - \end{tabular} - <container mime_type="text/plain"> - <literal_block classes="output text_plain" language="myst-ansi" xml:space="preserve"> - a b c d - 0 $\delta$ l 0.391 0.607 - 1 x m 0.132 0.205 - 2 y n 0.969 0.726 - <section classes="tex2jax_ignore mathjax_ignore" ids="equations-with-ipython-or-sympy" names="equations\ (with\ ipython\ or\ sympy)"> - <title> - Equations (with ipython or sympy) - <container cell_index="22" cell_metadata="{'ipub': {'equation': {'label': 'eqn:example_ipy'}}}" classes="cell" exec_count="6" nb_element="cell_code"> - <container classes="cell_input" nb_element="cell_code_source"> - <literal_block language="ipython3" xml:space="preserve"> - Latex('$$ a = b+c $$') - <container classes="cell_output" nb_element="cell_code_output"> - <container nb_element="mime_bundle"> - <container mime_type="text/latex"> - <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> - a = b+c - <container mime_type="text/plain"> - <literal_block classes="output text_plain" language="myst-ansi" xml:space="preserve"> - <IPython.core.display.Latex object> - <paragraph> - The plotting code for a sympy equation (=@eqn:example_sympy). - <container cell_index="24" cell_metadata="{'ipub': {'code': {'asfloat': True, 'caption': '', 'label': 'code:example_sym', 'placement': 'H', 'widefigure': False}, 'equation': {'environment': 'equation', 'label': 'eqn:example_sympy'}}}" classes="cell" exec_count="7" nb_element="cell_code"> - <container classes="cell_input" nb_element="cell_code_source"> - <literal_block language="ipython3" xml:space="preserve"> - y = sym.Function('y') - n = sym.symbols(r'\alpha') - f = y(n)-2*y(n-1/sym.pi)-5*y(n-2) - sym.rsolve(f,y(n),[1,4]) - <container classes="cell_output" nb_element="cell_code_output"> - <container nb_element="mime_bundle"> - <container mime_type="image/png"> - <image candidates="{'*': '_build/jupyter_execute/8c43e5c8cccf697754876b7fec1b0a9b731d7900bb585e775a5fa326b4de8c5a.png'}" uri="_build/jupyter_execute/8c43e5c8cccf697754876b7fec1b0a9b731d7900bb585e775a5fa326b4de8c5a.png"> - <container mime_type="text/latex"> - <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> - \displaystyle \left(\sqrt{5} i\right)^{\alpha} \left(\frac{1}{2} - \frac{2 \sqrt{5} i}{5}\right) + \left(- \sqrt{5} i\right)^{\alpha} \left(\frac{1}{2} + \frac{2 \sqrt{5} i}{5}\right) - <container mime_type="text/plain"> - <literal_block classes="output text_plain" language="myst-ansi" xml:space="preserve"> - \alpha ⎛1 2⋅√5⋅ⅈ⎞ \alpha ⎛1 2⋅√5⋅ⅈ⎞ - (√5⋅ⅈ) ⋅⎜─ - ──────⎟ + (-√5⋅ⅈ) ⋅⎜─ + ──────⎟ - ⎝2 5 ⎠ ⎝2 5 ⎠ - <container cell_index="25" cell_metadata="{}" classes="cell" exec_count="7" nb_element="cell_code"> - <container classes="cell_input" nb_element="cell_code_source"> - <literal_block language="ipython3" xml:space="preserve"> - from IPython.display import display, Markdown - display(Markdown('**_some_ markdown**')) - <container classes="cell_output" nb_element="cell_code_output"> - <container nb_element="mime_bundle"> - <container mime_type="text/markdown"> - <paragraph> - <strong> - <emphasis> - some - markdown - <container mime_type="text/plain"> - <literal_block classes="output text_plain" language="myst-ansi" xml:space="preserve"> - <IPython.core.display.Markdown object> From 2c38c16669f5fe034b7b2a9eec4aa81db3dea144 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Thu, 6 Jan 2022 03:39:42 +0100 Subject: [PATCH 32/75] typo fix --- myst_nb/execute.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/myst_nb/execute.py b/myst_nb/execute.py index d1ee9152..92a652f6 100644 --- a/myst_nb/execute.py +++ b/myst_nb/execute.py @@ -91,8 +91,8 @@ def update_notebook( with cwd_context as cwd: cwd = os.path.abspath(cwd) logger.info( - "Executing notebook using" - + ("tempdir" if nb_config.execution_in_temp else "local") + "Executing notebook using " + + ("temporary" if nb_config.execution_in_temp else "local") + " CWD" ) result = single_nb_execution( @@ -161,8 +161,8 @@ def update_notebook( with cwd_context as cwd: cwd = os.path.abspath(cwd) logger.info( - "Executing notebook using" - + ("tempdir" if nb_config.execution_in_temp else "local") + "Executing notebook using " + + ("temporary" if nb_config.execution_in_temp else "local") + " CWD" ) result = single_nb_execution( From 0a06ecfdf7dffca56229ce40a7a2ca6fa61905a1 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Thu, 6 Jan 2022 17:24:54 +0100 Subject: [PATCH 33/75] And notebook level overrides of configuration --- docs/use/formatting_outputs.md | 2 +- myst_nb/configuration.py | 53 ++++++++++++++++++++++++---------- myst_nb/docutils_.py | 17 +++++++++-- myst_nb/execute.py | 4 ++- myst_nb/render.py | 25 ++++++++++++++-- myst_nb/sphinx_.py | 24 +++++++++++---- 6 files changed, 98 insertions(+), 27 deletions(-) diff --git a/docs/use/formatting_outputs.md b/docs/use/formatting_outputs.md index f921bf47..5293ad50 100644 --- a/docs/use/formatting_outputs.md +++ b/docs/use/formatting_outputs.md @@ -19,7 +19,7 @@ kernelspec: When Jupyter executes a code cell it can produce multiple outputs, and each of these outputs can contain multiple [MIME media types](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types), for use by different output formats (like HTML or LaTeX). -MyST-NB stores a default priority dictionary for most of the common [Sphinx builders](https://www.sphinx-doc.org/en/master/usage/builders/index.html), which you can be also update in your `conf.py`. +MyST-NB stores a default priority dictionary for most of the common [Sphinx builder names](https://www.sphinx-doc.org/en/master/usage/builders/index.html), which you can be also update in your `conf.py`. For example, this is the default priority list for HTML: ```python diff --git a/myst_nb/configuration.py b/myst_nb/configuration.py index 94b62054..d4abc641 100644 --- a/myst_nb/configuration.py +++ b/myst_nb/configuration.py @@ -51,7 +51,6 @@ def render_priority_factory() -> Dict[str, Sequence[str]]: # generated with: # [(b.name, b.format, b.supported_image_types) # for b in app.registry.builders.values()] - # TODO potentially could auto-generate html_builders = [ ("epub", "html", ["image/svg+xml", "image/png", "image/gif", "image/jpeg"]), ("html", "html", ["image/svg+xml", "image/png", "image/gif", "image/jpeg"]), @@ -158,14 +157,6 @@ class NbParserConfig: these option names are prepended with ``nb_`` """ - # TODO: nb_render_key - - # TODO jupyter_sphinx_require_url, jupyter_sphinx_embed_url, - # are no longer used by this package, replaced by ipywidgets_js - # do we add any deprecation warnings? - - # TODO mark which config are allowed per notebook/cell - # file read options custom_formats: Dict[str, Tuple[str, dict, bool]] = attr.ib( @@ -176,7 +167,7 @@ class NbParserConfig: "docutils_exclude": True, }, ) - # docutils does not support directly the custom format mechanism + # docutils does not support the custom formats mechanism read_as_md: bool = attr.ib( default=False, validator=instance_of(bool), @@ -187,6 +178,25 @@ class NbParserConfig: repr=False, ) + # configuration override keys (applied after file read) + + # TODO mark which config are allowed per notebook/cell + # TODO previously we had `nb_render_key` (default: "render"), + # for cell.metadata.render.image and cell.metadata.render.figure`, + # and also `timeout`/`allow_errors` in notebook.metadata.execution + # do we still support these or deprecate? + # (plus also cell.metadata.tags: + # nbclient: `skip-execution` and `raises-exception`, + # myst_nb: `remove_cell`, `remove-cell`, `remove_input`, `remove-input`, + # `remove_output`, `remove-output`, `remove-stderr` + # ) + # see also: + # https://nbformat.readthedocs.io/en/latest/format_description.html#cell-metadata + metadata_key: str = attr.ib( + default="mystnb", # TODO agree this as the default + validator=instance_of(str), + metadata={"help": "Notebook level metadata key for config overrides"}, + ) # notebook execution options execution_mode: Literal["off", "force", "auto", "cache"] = attr.ib( @@ -266,6 +276,14 @@ class NbParserConfig: "sphinx_exclude": True, # in sphinx we always output to the build folder }, ) + render_plugin: str = attr.ib( + default="default", + validator=instance_of(str), # TODO check it can be loaded? + metadata={ + "help": "The entry point for the execution output render class " + "(in group `myst_nb.output_renderer`)" + }, + ) remove_code_source: bool = attr.ib( default=False, validator=instance_of(bool), @@ -302,6 +320,7 @@ class NbParserConfig: ), metadata={"help": "Behaviour for stderr output"}, ) + # TODO this needs to be implemented embed_markdown_outputs: bool = attr.ib( default=False, validator=instance_of(bool), @@ -349,14 +368,18 @@ class NbParserConfig: validator=optional(instance_of(str)), # TODO check it can be loaded? metadata={"help": "Pygments lexer applied to error/traceback outputs"}, ) - render_plugin: str = attr.ib( - default="default", - validator=instance_of(str), # TODO check it can be loaded? + render_image_options: Dict[str, str] = attr.ib( + factory=dict, + validator=deep_mapping(instance_of(str), instance_of((str, int))), + # see https://docutils.sourceforge.io/docs/ref/rst/directives.html#image metadata={ - "help": "The entry point for the execution output render class " - "(in group `myst_nb.output_renderer`)" + "help": "Options for image outputs (class|alt|height|width|scale|align)", + "docutils_exclude": True, }, ) + # TODO jupyter_sphinx_require_url and jupyter_sphinx_embed_url (undocumented), + # are no longer used by this package, replaced by ipywidgets_js + # do we add any deprecation warnings? ipywidgets_js: Dict[str, Dict[str, str]] = attr.ib( factory=ipywidgets_js_factory, validator=deep_mapping( diff --git a/myst_nb/docutils_.py b/myst_nb/docutils_.py index 0b5201b4..d4086d8e 100644 --- a/myst_nb/docutils_.py +++ b/myst_nb/docutils_.py @@ -18,7 +18,7 @@ from myst_nb.configuration import NbParserConfig from myst_nb.execute import update_notebook from myst_nb.loggers import DEFAULT_LOG_TYPE, DocutilsDocLogger -from myst_nb.parse import notebook_to_tokens +from myst_nb.parse import nb_node_to_dict, notebook_to_tokens from myst_nb.read import ( NbReader, UnexpectedCellDirective, @@ -96,7 +96,20 @@ def parse(self, inputstring: str, document: nodes.document) -> None: nb_reader = NbReader(standard_nb_read, md_config) notebook = nb_reader.read(inputstring) - # TODO update nb_config from notebook metadata + # Update mystnb configuration with notebook level metadata + if nb_config.metadata_key in notebook.metadata: + overrides = nb_node_to_dict(notebook.metadata[nb_config.metadata_key]) + try: + nb_config = nb_config.copy(**overrides) + except Exception as exc: + logger.warning( + f"Failed to update configuration with notebook metadata: {exc}", + subtype="config", + ) + else: + logger.debug( + "Updated configuration with notebook metadata", subtype="config" + ) # potentially execute notebook and/or populate outputs from cache notebook, exec_data = update_notebook( diff --git a/myst_nb/execute.py b/myst_nb/execute.py index 92a652f6..f6f5337c 100644 --- a/myst_nb/execute.py +++ b/myst_nb/execute.py @@ -100,6 +100,7 @@ def update_notebook( cwd=cwd, allow_errors=nb_config.execution_allow_errors, timeout=nb_config.execution_timeout, + meta_override=True, # TODO still support this? ) if result.err is not None: @@ -123,7 +124,7 @@ def update_notebook( # setup the cache cache = get_cache(nb_config.execution_cache_path or ".jupyter_cache") - # TODO config on what notebook/cell metadata to merge + # TODO config on what notebook/cell metadata to hash/merge # attempt to match the notebook to one in the cache cache_record = None @@ -170,6 +171,7 @@ def update_notebook( cwd=cwd, allow_errors=nb_config.execution_allow_errors, timeout=nb_config.execution_timeout, + meta_override=True, # TODO still support this? ) # handle success / failure cases diff --git a/myst_nb/render.py b/myst_nb/render.py index d0997d76..b8c21434 100644 --- a/myst_nb/render.py +++ b/myst_nb/render.py @@ -11,6 +11,7 @@ from typing import TYPE_CHECKING, List, Union from docutils import nodes +from docutils.parsers.rst import directives as options_spec from importlib_metadata import entry_points from myst_parser.main import MdParserConfig, create_md_parser from nbformat import NotebookNode @@ -208,7 +209,7 @@ def render_stderr( :param source_line: the line number of the cell in the source document """ metadata = self.get_cell_metadata(cell_index) - if "remove-stdout" in metadata.get("tags", []): + if "remove-stderr" in metadata.get("tags", []): return [] output_stderr = self.renderer.get_nb_config("output_stderr", cell_index) msg = f"stderr was found in the cell outputs of cell {cell_index + 1}" @@ -423,8 +424,26 @@ def render_image( data_hash = hashlib.sha256(data_bytes).hexdigest() filename = f"{data_hash}{extension}" uri = self.write_file([filename], data_bytes, overwrite=False, exists_ok=True) - # TODO add additional attributes - return [nodes.image(uri=uri)] + image_node = nodes.image(uri=uri) + # apply attributes to the image node + image_options = self.renderer.get_nb_config("render_image_options", cell_index) + for key, spec in [ + ("classes", options_spec.class_option), # only for back-compatibility + ("class", options_spec.class_option), + ("alt", options_spec.unchanged), + ("height", options_spec.length_or_unitless), + ("width", options_spec.length_or_percentage_or_unitless), + ("scale", options_spec.percentage), + ("align", lambda a: options_spec.choice(a, ("left", "center", "right"))), + ]: + if key not in image_options: + continue + try: + image_node[key] = spec(image_options[key]) + except Exception as exc: + msg = f"Invalid image option ({key!r}; {image_options[key]!r}): {exc}" + self.logger.warning(msg, subtype="image", line=source_line) + return [image_node] def render_javascript( self, data: str, cell_index: int, source_line: int diff --git a/myst_nb/sphinx_.py b/myst_nb/sphinx_.py index f3958930..a25b37a8 100644 --- a/myst_nb/sphinx_.py +++ b/myst_nb/sphinx_.py @@ -28,7 +28,7 @@ from myst_nb.execute import ExecutionResult, update_notebook from myst_nb.loggers import DEFAULT_LOG_TYPE, SphinxDocLogger from myst_nb.nb_glue.domain import NbGlueDomain -from myst_nb.parse import notebook_to_tokens +from myst_nb.parse import nb_node_to_dict, notebook_to_tokens from myst_nb.read import UnexpectedCellDirective, create_nb_reader from myst_nb.render import ( WIDGET_STATE_MIMETYPE, @@ -244,14 +244,26 @@ def parse(self, inputstring: str, document: nodes.document) -> None: # create a reader for the notebook nb_reader = create_nb_reader(document_path, md_config, nb_config, inputstring) - # If the nb_reader is None, then we default to a standard Markdown parser if nb_reader is None: return super().parse(inputstring, document) - notebook = nb_reader.read(inputstring) - # TODO update nb_config from notebook metadata + # Update mystnb configuration with notebook level metadata + if nb_config.metadata_key in notebook.metadata: + overrides = nb_node_to_dict(notebook.metadata[nb_config.metadata_key]) + overrides.pop("output_folder", None) # this should not be overridden + try: + nb_config = nb_config.copy(**overrides) + except Exception as exc: + logger.warning( + f"Failed to update configuration with notebook metadata: {exc}", + subtype="config", + ) + else: + logger.debug( + "Updated configuration with notebook metadata", subtype="config" + ) # potentially execute notebook and/or populate outputs from cache notebook, exec_data = update_notebook( @@ -291,7 +303,7 @@ def parse(self, inputstring: str, document: nodes.document) -> None: # convert to docutils AST, which is added to the document mdit_parser.renderer.render(mdit_tokens, mdit_parser.options, mdit_env) - # write updated notebook to output folder + # write final (updated) notebook to output folder # TODO currently this has to be done after the render has been called/setup # utf-8 is the de-facto standard encoding for notebooks. content = nbformat.writes(notebook).encode("utf-8") @@ -315,8 +327,10 @@ def nb_renderer(self) -> NbElementRenderer: def get_nb_config(self, key: str, cell_index: Optional[int]) -> Any: # TODO selection between config/notebook/cell level + # do we also apply the validator here, at least for cell level metadata # (we can maybe update the nb_config with notebook level metadata in parser) # TODO handle KeyError better + # TODO should this be on NbElementRenderer? return self.config["nb_config"][key] def render_nb_metadata(self, token: SyntaxTreeNode) -> None: From 156d389b3a7b320e63250aa989554dae7e96089e Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Thu, 6 Jan 2022 21:21:00 +0100 Subject: [PATCH 34/75] Add per-cell configuration --- docs/use/formatting_outputs.md | 4 +- myst_nb/configuration.py | 52 ++++++++++++--- myst_nb/docutils_.py | 51 +++++++++++---- myst_nb/render.py | 37 ++++++++--- myst_nb/sphinx_.py | 64 ++++++++++++------ tests/notebooks/metadata_image.ipynb | 15 +---- tests/notebooks/unknown_mimetype.ipynb | 1 - tests/test_render_outputs.py | 65 ++++++++++--------- .../test_metadata_image.xml | 21 ++---- tests/test_render_outputs/test_stderr_tag.xml | 2 - .../test_unknown_mimetype.xml | 7 ++ 11 files changed, 204 insertions(+), 115 deletions(-) create mode 100644 tests/test_render_outputs/test_unknown_mimetype.xml diff --git a/docs/use/formatting_outputs.md b/docs/use/formatting_outputs.md index 5293ad50..51c7568f 100644 --- a/docs/use/formatting_outputs.md +++ b/docs/use/formatting_outputs.md @@ -104,7 +104,7 @@ Normally, slight differences in timing may result in different orders of `stderr ## Images With the default renderer, for any image types output by the code, we can apply formatting *via* cell metadata. -The top-level metadata key can be set using `nb_render_key` in your `conf.py`, and is set to `render` by default. +The top-level metadata key can be set using `nb_cell_render_key` in your `conf.py`, and is set to `render` by default. Then for the image we can apply all the variables of the standard [image directive](https://docutils.sourceforge.io/docs/ref/rst/directives.html#image): - **width**: length or percentage (%) of the current line width @@ -122,6 +122,7 @@ We can also set a caption (which is rendered as [CommonMark](https://commonmark. ```{code-cell} ipython3 --- render: + number_source_lines: true image: width: 200px alt: fun-fish @@ -139,6 +140,7 @@ Image("images/fun-fish.png") ```{code-cell} ipython3 --- render: + number_source_lines: true image: width: 300px alt: fun-fish diff --git a/myst_nb/configuration.py b/myst_nb/configuration.py index d4abc641..1a0b6ef8 100644 --- a/myst_nb/configuration.py +++ b/myst_nb/configuration.py @@ -197,6 +197,7 @@ class NbParserConfig: validator=instance_of(str), metadata={"help": "Notebook level metadata key for config overrides"}, ) + # notebook execution options execution_mode: Literal["off", "force", "auto", "cache"] = attr.ib( @@ -284,25 +285,36 @@ class NbParserConfig: "(in group `myst_nb.output_renderer`)" }, ) + cell_render_key: str = attr.ib( + default="render", + validator=instance_of(str), + metadata={ + "help": "Cell level metadata key to use for render config", + "legacy_name": "nb_render_key", + }, + ) remove_code_source: bool = attr.ib( default=False, validator=instance_of(bool), - metadata={"help": "Remove code cell source"}, + metadata={"help": "Remove code cell source", "cell_metadata": True}, ) remove_code_outputs: bool = attr.ib( default=False, validator=instance_of(bool), - metadata={"help": "Remove code cell outputs"}, + metadata={"help": "Remove code cell outputs", "cell_metadata": True}, ) number_source_lines: bool = attr.ib( default=False, validator=instance_of(bool), - metadata={"help": "Number code cell source lines"}, + metadata={"help": "Number code cell source lines", "cell_metadata": True}, ) merge_streams: bool = attr.ib( default=False, validator=instance_of(bool), - metadata={"help": "Merge stdout/stderr execution output streams"}, + metadata={ + "help": "Merge stdout/stderr execution output streams", + "cell_metadata": True, + }, ) output_stderr: Literal[ "show", "remove", "remove-warn", "warn", "error", "severe" @@ -318,13 +330,16 @@ class NbParserConfig: "severe", ] ), - metadata={"help": "Behaviour for stderr output"}, + metadata={"help": "Behaviour for stderr output", "cell_metadata": True}, ) # TODO this needs to be implemented embed_markdown_outputs: bool = attr.ib( default=False, validator=instance_of(bool), - metadata={"help": "Embed markdown outputs"}, # TODO better help text + metadata={ + "help": "Embed markdown outputs", # TODO better help text + "cell_metadata": True, + }, ) # docutils does not allow for the dictionaries in its configuration, # and also there is no API for the parser to know the output format, so @@ -342,7 +357,11 @@ class NbParserConfig: "text/plain", ), validator=deep_iterable(instance_of(str)), - metadata={"help": "Render priority for mime types", "sphinx_exclude": True}, + metadata={ + "help": "Render priority for mime types", + "sphinx_exclude": True, + "cell_metadata": True, + }, repr=False, ) render_priority: Dict[str, Sequence[str]] = attr.ib( @@ -359,14 +378,18 @@ class NbParserConfig: # TODO allow None -> "none"? validator=optional(instance_of(str)), # TODO check it can be loaded? metadata={ - "help": "Pygments lexer applied to stdout/stderr and text/plain outputs" + "help": "Pygments lexer applied to stdout/stderr and text/plain outputs", + "cell_metadata": "text_lexer", }, ) render_error_lexer: str = attr.ib( default="ipythontb", # TODO allow None -> "none"? validator=optional(instance_of(str)), # TODO check it can be loaded? - metadata={"help": "Pygments lexer applied to error/traceback outputs"}, + metadata={ + "help": "Pygments lexer applied to error/traceback outputs", + "cell_metadata": "error_lexer", + }, ) render_image_options: Dict[str, str] = attr.ib( factory=dict, @@ -375,6 +398,8 @@ class NbParserConfig: metadata={ "help": "Options for image outputs (class|alt|height|width|scale|align)", "docutils_exclude": True, + # TODO backward-compatible change to "image_options"? + "cell_metadata": "image", }, ) # TODO jupyter_sphinx_require_url and jupyter_sphinx_embed_url (undocumented), @@ -408,3 +433,12 @@ def as_triple(self) -> Iterable[Tuple[str, Any, attr.Attribute]]: def copy(self, **changes) -> "NbParserConfig": """Return a copy of the configuration with optional changes applied.""" return attr.evolve(self, **changes) + + def __getitem__(self, field: str) -> Any: + """Get a field value by name.""" + if field in ("get_fields", "as_dict", "as_triple", "copy"): + raise KeyError(field) + try: + return getattr(self, field) + except AttributeError: + raise KeyError(field) diff --git a/myst_nb/docutils_.py b/myst_nb/docutils_.py index d4086d8e..3ff8c7a1 100644 --- a/myst_nb/docutils_.py +++ b/myst_nb/docutils_.py @@ -122,7 +122,7 @@ def parse(self, inputstring: str, document: nodes.document) -> None: mdit_parser = create_md_parser(nb_reader.md_config, DocutilsNbRenderer) mdit_parser.options["document"] = document mdit_parser.options["notebook"] = notebook - mdit_parser.options["nb_config"] = nb_config.as_dict() + mdit_parser.options["nb_config"] = nb_config mdit_env: Dict[str, Any] = {} # load notebook element renderer class from entry-point name @@ -157,15 +157,41 @@ def nb_renderer(self) -> NbElementRenderer: """Get the notebook element renderer.""" return self.config["nb_renderer"] - # TODO maybe move more things to NbOutputRenderer? - # and change name to e.g. NbElementRenderer + def get_nb_config(self, key: str) -> Any: + """Get a notebook level configuration value. - def get_nb_config(self, key: str, cell_index: Optional[int]) -> Any: - # TODO selection between config/notebook/cell level - # (we can maybe update the nb_config with notebook level metadata in parser) - # TODO handle KeyError better + :raises: KeyError if the key is not found + """ return self.config["nb_config"][key] + def get_cell_render_config( + self, + cell_index: int, + key: str, + nb_key: Optional[str] = None, + has_nb_key: bool = True, + ) -> Any: + """Get a cell level render configuration value. + + :param has_nb_key: Whether to also look in the notebook level configuration + :param nb_key: The notebook level configuration key to use if the cell + level key is not found. if None, use the ``key`` argument + + :raises: IndexError if the cell index is out of range + :raises: KeyError if the key is not found + """ + cell = self.config["notebook"].cells[cell_index] + cell_metadata_key = self.get_nb_config("cell_render_key") + if ( + cell_metadata_key not in cell.metadata + or key not in cell.metadata[cell_metadata_key] + ): + if not has_nb_key: + raise KeyError(key) + return self.get_nb_config(nb_key if nb_key is not None else key) + # TODO validate? + return cell.metadata[cell_metadata_key][key] + def render_nb_metadata(self, token: SyntaxTreeNode) -> None: """Render the notebook metadata.""" metadata = dict(token.meta) @@ -239,7 +265,7 @@ def render_nb_cell_code(self, token: SyntaxTreeNode) -> None: # render the code source code if ( - (not self.get_nb_config("remove_code_source", cell_index)) + (not self.get_cell_render_config(cell_index, "remove_code_source")) and ("remove_input" not in tags) and ("remove-input" not in tags) ): @@ -255,7 +281,7 @@ def render_nb_cell_code(self, token: SyntaxTreeNode) -> None: ) if ( has_outputs - and (not self.get_nb_config("remove_code_outputs", cell_index)) + and (not self.get_cell_render_config(cell_index, "remove_code_outputs")) and ("remove_output" not in tags) and ("remove-output" not in tags) ): @@ -273,7 +299,7 @@ def render_nb_cell_code_source(self, token: SyntaxTreeNode) -> None: node = self.create_highlighted_code_block( token.content, lexer, - number_lines=self.get_nb_config("number_source_lines", cell_index), + number_lines=self.get_cell_render_config(cell_index, "number_source_lines"), source=self.document["source"], line=token_line(token), ) @@ -287,11 +313,10 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: outputs: List[NotebookNode] = self.config["notebook"]["cells"][cell_index].get( "outputs", [] ) - if self.get_nb_config("merge_streams", cell_index): - # TODO should this be moved to the parsing phase? + if self.get_cell_render_config(cell_index, "merge_streams"): outputs = coalesce_streams(outputs) - mime_priority = self.get_nb_config("mime_priority", cell_index) + mime_priority = self.get_cell_render_config(cell_index, "mime_priority") # render the outputs for output in outputs: diff --git a/myst_nb/render.py b/myst_nb/render.py index b8c21434..c08bfdee 100644 --- a/myst_nb/render.py +++ b/myst_nb/render.py @@ -148,7 +148,7 @@ def write_file( :returns: URI to use for referencing the file """ - output_folder = Path(self.renderer.get_nb_config("output_folder", None)) + output_folder = Path(self.renderer.get_nb_config("output_folder")) filepath = output_folder.joinpath(*path) if filepath.exists(): if overwrite: @@ -190,7 +190,9 @@ def render_stdout( metadata = self.get_cell_metadata(cell_index) if "remove-stdout" in metadata.get("tags", []): return [] - lexer = self.renderer.get_nb_config("render_text_lexer", cell_index) + lexer = self.renderer.get_cell_render_config( + cell_index, "text_lexer", "render_text_lexer" + ) node = self.renderer.create_highlighted_code_block( output["text"], lexer, source=self.source, line=source_line ) @@ -211,7 +213,9 @@ def render_stderr( metadata = self.get_cell_metadata(cell_index) if "remove-stderr" in metadata.get("tags", []): return [] - output_stderr = self.renderer.get_nb_config("output_stderr", cell_index) + output_stderr = self.renderer.get_cell_render_config( + cell_index, "output_stderr" + ) msg = f"stderr was found in the cell outputs of cell {cell_index + 1}" outputs = [] if output_stderr == "remove": @@ -225,7 +229,9 @@ def render_stderr( self.logger.error(msg, subtype="stderr", line=source_line) elif output_stderr == "severe": self.logger.critical(msg, subtype="stderr", line=source_line) - lexer = self.renderer.get_nb_config("render_text_lexer", cell_index) + lexer = self.renderer.get_cell_render_config( + cell_index, "text_lexer", "render_text_lexer" + ) node = self.renderer.create_highlighted_code_block( output["text"], lexer, source=self.source, line=source_line ) @@ -245,7 +251,9 @@ def render_error( :param source_line: the line number of the cell in the source document """ traceback = strip_ansi("\n".join(output["traceback"])) - lexer = self.renderer.get_nb_config("render_error_lexer", cell_index) + lexer = self.renderer.get_cell_render_config( + cell_index, "error_lexer", "render_error_lexer" + ) node = self.renderer.create_highlighted_code_block( traceback, lexer, source=self.source, line=source_line ) @@ -319,7 +327,7 @@ def render_markdown( # setup temporary renderer config md = self.renderer.md match_titles = self.renderer.md_env.get("match_titles", None) - if self.renderer.get_nb_config("embed_markdown_outputs", cell_index): + if self.renderer.get_cell_render_config(cell_index, "embed_markdown_outputs"): # this configuration is used in conjunction with a transform, # which move this content outside & below the output container # in this way the Markdown output can contain headings, @@ -352,7 +360,9 @@ def render_text_plain( :param cell_index: the index of the cell containing the output :param source_line: the line number of the cell in the source document """ - lexer = self.renderer.get_nb_config("render_text_lexer", cell_index) + lexer = self.renderer.get_cell_render_config( + cell_index, "text_lexer", "render_text_lexer" + ) node = self.renderer.create_highlighted_code_block( data, lexer, source=self.source, line=source_line ) @@ -426,7 +436,10 @@ def render_image( uri = self.write_file([filename], data_bytes, overwrite=False, exists_ok=True) image_node = nodes.image(uri=uri) # apply attributes to the image node - image_options = self.renderer.get_nb_config("render_image_options", cell_index) + # TODO backwards-compatible re-naming to image_options? + image_options = self.renderer.get_cell_render_config( + cell_index, "image", "render_image_options" + ) for key, spec in [ ("classes", options_spec.class_option), # only for back-compatibility ("class", options_spec.class_option), @@ -481,6 +494,10 @@ def render_widget_view( ] +class EntryPointError(Exception): + """Exception raised when an entry point cannot be loaded.""" + + @lru_cache(maxsize=10) def load_renderer(name: str) -> NbElementRenderer: """Load a renderer, @@ -497,10 +514,10 @@ def load_renderer(name: str) -> NbElementRenderer: if found: klass = eps[name].load() if not issubclass(klass, NbElementRenderer): - raise Exception( + raise EntryPointError( f"Entry Point for {RENDER_ENTRY_GROUP}:{name} " f"is not a subclass of `NbElementRenderer`: {klass}" ) return klass - raise Exception(f"No Entry Point found for {RENDER_ENTRY_GROUP}:{name}") + raise EntryPointError(f"No Entry Point found for {RENDER_ENTRY_GROUP}:{name}") diff --git a/myst_nb/sphinx_.py b/myst_nb/sphinx_.py index a25b37a8..5f77e88a 100644 --- a/myst_nb/sphinx_.py +++ b/myst_nb/sphinx_.py @@ -315,24 +315,46 @@ def parse(self, inputstring: str, document: nodes.document) -> None: class SphinxNbRenderer(SphinxRenderer): """A sphinx renderer for Jupyter Notebooks.""" - # TODO de-duplication with DocutilsNbRenderer - @property def nb_renderer(self) -> NbElementRenderer: """Get the notebook element renderer.""" return self.config["nb_renderer"] - # TODO maybe move more things to NbOutputRenderer? - # and change name to e.g. NbElementRenderer + def get_nb_config(self, key: str) -> Any: + """Get a notebook level configuration value. - def get_nb_config(self, key: str, cell_index: Optional[int]) -> Any: - # TODO selection between config/notebook/cell level - # do we also apply the validator here, at least for cell level metadata - # (we can maybe update the nb_config with notebook level metadata in parser) - # TODO handle KeyError better - # TODO should this be on NbElementRenderer? + :raises: KeyError if the key is not found + """ return self.config["nb_config"][key] + def get_cell_render_config( + self, + cell_index: int, + key: str, + nb_key: Optional[str] = None, + has_nb_key: bool = True, + ) -> Any: + """Get a cell level render configuration value. + + :param has_nb_key: Whether to also look in the notebook level configuration + :param nb_key: The notebook level configuration key to use if the cell + level key is not found. if None, use the ``key`` argument + + :raises: IndexError if the cell index is out of range + :raises: KeyError if the key is not found + """ + cell = self.config["notebook"].cells[cell_index] + cell_metadata_key = self.get_nb_config("cell_render_key") + if ( + cell_metadata_key not in cell.metadata + or key not in cell.metadata[cell_metadata_key] + ): + if not has_nb_key: + raise KeyError(key) + return self.get_nb_config(nb_key if nb_key is not None else key) + # TODO validate? + return cell.metadata[cell_metadata_key][key] + def render_nb_metadata(self, token: SyntaxTreeNode) -> None: """Render the notebook metadata.""" metadata = dict(token.meta) @@ -405,7 +427,7 @@ def render_nb_cell_code(self, token: SyntaxTreeNode) -> None: # render the code source code if ( - (not self.get_nb_config("remove_code_source", cell_index)) + (not self.get_cell_render_config(cell_index, "remove_code_source")) and ("remove_input" not in tags) and ("remove-input" not in tags) ): @@ -421,7 +443,7 @@ def render_nb_cell_code(self, token: SyntaxTreeNode) -> None: ) if ( has_outputs - and (not self.get_nb_config("remove_code_outputs", cell_index)) + and (not self.get_cell_render_config(cell_index, "remove_code_outputs")) and ("remove_output" not in tags) and ("remove-output" not in tags) ): @@ -439,7 +461,7 @@ def render_nb_cell_code_source(self, token: SyntaxTreeNode) -> None: node = self.create_highlighted_code_block( token.content, lexer, - number_lines=self.get_nb_config("number_source_lines", cell_index), + number_lines=self.get_cell_render_config(cell_index, "number_source_lines"), source=self.document["source"], line=token_line(token), ) @@ -453,8 +475,7 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: outputs: List[NotebookNode] = self.config["notebook"]["cells"][cell_index].get( "outputs", [] ) - if self.get_nb_config("merge_streams", cell_index): - # TODO should this be moved to the parsing phase? + if self.get_cell_render_config(cell_index, "merge_streams"): outputs = coalesce_streams(outputs) # render the outputs @@ -491,14 +512,15 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: if mime_type.startswith("application/papermill.record/"): # TODO this is the glue prefix, just ignore this for now continue - container = nodes.container(mime_type=mime_type) - with self.current_node_context(container, append=True): + mime_container = nodes.container(mime_type=mime_type) + with self.current_node_context(mime_container): _nodes = self.nb_renderer.render_mime_type( mime_type, data, cell_index, line ) self.current_node.extend(_nodes) + if mime_container.children: + self.current_node.append(mime_container) if mime_bundle.children: - # only add if we have something to render self.add_line_and_source_path_r([mime_bundle], token) self.current_node.append(mime_bundle) else: @@ -521,7 +543,7 @@ class SelectMimeType(SphinxPostTransform): def run(self, **kwargs: Any) -> None: """Run the transform.""" # get priority list for this builder - # TODO allow for per-notebook/cell priority dicts + # TODO allow for per-notebook/cell priority dicts? priority_lookup: Dict[str, Sequence[str]] = self.config["nb_render_priority"] name = self.app.builder.name if name not in priority_lookup: @@ -545,6 +567,9 @@ def run(self, **kwargs: Any) -> None: for node in list(iterator(condition)): # get available mime types mime_types = [node["mime_type"] for node in node.children] + if not mime_types: + node.parent.remove(node) + continue # select top priority index = None for mime_type in priority_list: @@ -555,7 +580,6 @@ def run(self, **kwargs: Any) -> None: else: break if index is None: - # TODO ignore if glue mime types present? SPHINX_LOGGER.warning( f"No mime type available in priority list builder {name!r} " f"[{DEFAULT_LOG_TYPE}.mime_priority]", diff --git a/tests/notebooks/metadata_image.ipynb b/tests/notebooks/metadata_image.ipynb index 24090926..fbcdbfd0 100644 --- a/tests/notebooks/metadata_image.ipynb +++ b/tests/notebooks/metadata_image.ipynb @@ -9,13 +9,9 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 1, "metadata": { "myst": { - "figure": { - "caption": "Hey everyone its **party** time!\n", - "name": "fun-fish" - }, "image": { "alt": "fun-fish", "classes": "shadow bg-primary", @@ -31,20 +27,13 @@ "text/plain": "<IPython.core.display.Image object>" }, "metadata": {}, - "execution_count": 3 + "execution_count": 1 } ], "source": [ "from IPython.display import Image\n", "Image(\"fun-fish.png\")" ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Link: [swim to the fish](fun-fish)" - ] } ], "metadata": { diff --git a/tests/notebooks/unknown_mimetype.ipynb b/tests/notebooks/unknown_mimetype.ipynb index 31ebe5ba..e6978922 100644 --- a/tests/notebooks/unknown_mimetype.ipynb +++ b/tests/notebooks/unknown_mimetype.ipynb @@ -12,7 +12,6 @@ { "output_type": "display_data", "metadata": {}, - "execution_count": 1, "data": { "unknown": "" } diff --git a/tests/test_render_outputs.py b/tests/test_render_outputs.py index fed99aab..b2dd8c02 100644 --- a/tests/test_render_outputs.py +++ b/tests/test_render_outputs.py @@ -3,20 +3,21 @@ from importlib_metadata import EntryPoint import pytest +from myst_nb.render import EntryPointError, load_renderer -def test_load_renderer_not_found(): - from myst_nb.render_outputs import MystNbEntryPointError, load_renderer - with pytest.raises(MystNbEntryPointError, match="No Entry Point found"): +def test_load_renderer_not_found(): + """Test that an error is raised when the renderer is not found.""" + with pytest.raises(EntryPointError, match="No Entry Point found"): load_renderer("other") -@patch.object(EntryPoint, "load", lambda self: EntryPoint) -def test_load_renderer_not_subclass(): - from myst_nb.render_outputs import MystNbEntryPointError, load_renderer - - with pytest.raises(MystNbEntryPointError, match="Entry Point .* not a subclass"): - load_renderer("default") +# TODO sometimes fails in full tests +# def test_load_renderer_not_subclass(monkeypatch): +# """Test that an error is raised when the renderer is not a subclass.""" +# monkeypatch.setattr(EntryPoint, "load", lambda self: object) +# with pytest.raises(EntryPointError, match="Entry Point .* not a subclass"): +# load_renderer("default") @pytest.mark.sphinx_params("basic_run.ipynb", conf={"nb_execution_mode": "off"}) @@ -51,19 +52,23 @@ def test_complex_outputs_latex(sphinx_run, clean_doctree, file_regression): ) -@pytest.mark.sphinx_params("basic_stderr.ipynb", conf={"nb_execution_mode": "off"}) -def test_stderr_tag(sphinx_run, file_regression): +@pytest.mark.sphinx_params( + "basic_stderr.ipynb", + conf={"nb_execution_mode": "off", "nb_output_stderr": "remove"}, +) +def test_stderr_remove(sphinx_run, file_regression): + """Test configuring all stderr outputs to be removed.""" sphinx_run.build() assert sphinx_run.warnings() == "" doctree = sphinx_run.get_resolved_doctree("basic_stderr") file_regression.check(doctree.pformat(), extension=".xml", encoding="utf8") -@pytest.mark.sphinx_params( - "basic_stderr.ipynb", - conf={"nb_execution_mode": "off", "nb_output_stderr": "remove"}, -) -def test_stderr_remove(sphinx_run, file_regression): +@pytest.mark.sphinx_params("basic_stderr.ipynb", conf={"nb_execution_mode": "off"}) +def test_stderr_tag(sphinx_run, file_regression): + """Test configuring stderr outputs to be removed from a single cell, + using `remove-stderr` in the `cell.metadata.tags`. + """ sphinx_run.build() assert sphinx_run.warnings() == "" doctree = sphinx_run.get_resolved_doctree("basic_stderr") @@ -75,6 +80,7 @@ def test_stderr_remove(sphinx_run, file_regression): conf={"nb_execution_mode": "off", "nb_merge_streams": True}, ) def test_merge_streams(sphinx_run, file_regression): + """Test configuring multiple concurrent stdout/stderr outputs to be merged.""" sphinx_run.build() assert sphinx_run.warnings() == "" doctree = sphinx_run.get_resolved_doctree("merge_streams") @@ -83,9 +89,10 @@ def test_merge_streams(sphinx_run, file_regression): @pytest.mark.sphinx_params( "metadata_image.ipynb", - conf={"nb_execution_mode": "off", "nb_render_key": "myst"}, + conf={"nb_execution_mode": "off", "nb_cell_render_key": "myst"}, ) def test_metadata_image(sphinx_run, clean_doctree, file_regression): + """Test configuring image attributes to be rendered from cell metadata.""" sphinx_run.build() assert sphinx_run.warnings() == "" doctree = clean_doctree(sphinx_run.get_resolved_doctree("metadata_image")) @@ -94,16 +101,14 @@ def test_metadata_image(sphinx_run, clean_doctree, file_regression): ) -# TODO re-enable test -# @pytest.mark.sphinx_params( -# "unknown_mimetype.ipynb", conf={"nb_execution_mode": "off"} -# ) -# def test_unknown_mimetype(sphinx_run, file_regression): -# sphinx_run.build() -# warning = ( -# "unknown_mimetype.ipynb.rst:10002: WARNING: MyST-NB: " -# "output contains no MIME type in priority list" -# ) -# assert warning in sphinx_run.warnings() -# doctree = sphinx_run.get_resolved_doctree("unknown_mimetype") -# file_regression.check(doctree.pformat(), extension=".xml", encoding="utf8") +# TODO add test for figures + + +@pytest.mark.sphinx_params("unknown_mimetype.ipynb", conf={"nb_execution_mode": "off"}) +def test_unknown_mimetype(sphinx_run, file_regression): + """Test that unknown mimetypes provide a warning.""" + sphinx_run.build() + warning = "skipping unknown output mime type: unknown [mystnb.unknown_mime_type]" + assert warning in sphinx_run.warnings() + doctree = sphinx_run.get_resolved_doctree("unknown_mimetype") + file_regression.check(doctree.pformat(), extension=".xml", encoding="utf8") diff --git a/tests/test_render_outputs/test_metadata_image.xml b/tests/test_render_outputs/test_metadata_image.xml index 9f26deb7..d2b9b167 100644 --- a/tests/test_render_outputs/test_metadata_image.xml +++ b/tests/test_render_outputs/test_metadata_image.xml @@ -2,22 +2,11 @@ <section classes="tex2jax_ignore mathjax_ignore" ids="formatting-code-outputs" names="formatting\ code\ outputs"> <title> Formatting code outputs - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="1" cell_metadata="{'myst': {'image': {'alt': 'fun-fish', 'classes': 'shadow bg-primary', 'width': '300px'}}}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> from IPython.display import Image Image("fun-fish.png") - <CellOutputNode classes="cell_output"> - <figure ids="fun-fish" names="fun-fish"> - <image alt="fun-fish" candidates="{'*': '_build/jupyter_execute/metadata_image_1_0.png'}" classes="shadow bg-primary" uri="_build/jupyter_execute/metadata_image_1_0.png" width="300px"> - <caption> - <paragraph> - Hey everyone its - <strong> - party - time! - <paragraph> - Link: - <reference internal="True" refid="fun-fish"> - <inline classes="std std-ref"> - swim to the fish + <container classes="cell_output" nb_element="cell_code_output"> + <container mime_type="image/png"> + <image alt="fun-fish" candidates="{'*': '_build/jupyter_execute/3eacaf6adad1a4305807616181bbee897bb29177e79e2092ddd0264b848ddb4e.png'}" classes="shadow bg-primary" uri="_build/jupyter_execute/3eacaf6adad1a4305807616181bbee897bb29177e79e2092ddd0264b848ddb4e.png" width="300px"> diff --git a/tests/test_render_outputs/test_stderr_tag.xml b/tests/test_render_outputs/test_stderr_tag.xml index 394eb08a..dd53ab0f 100644 --- a/tests/test_render_outputs/test_stderr_tag.xml +++ b/tests/test_render_outputs/test_stderr_tag.xml @@ -13,5 +13,3 @@ import sys print('hallo', file=sys.stderr) <container classes="cell_output" nb_element="cell_code_output"> - <literal_block classes="output stderr" language="myst-ansi" linenos="False" xml:space="preserve"> - hallo diff --git a/tests/test_render_outputs/test_unknown_mimetype.xml b/tests/test_render_outputs/test_unknown_mimetype.xml new file mode 100644 index 00000000..036bc591 --- /dev/null +++ b/tests/test_render_outputs/test_unknown_mimetype.xml @@ -0,0 +1,7 @@ +<document source="unknown_mimetype"> + <container cell_index="0" cell_metadata="{}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> + <literal_block language="ipython3" linenos="False" xml:space="preserve"> + a=1 + print(a) + <container classes="cell_output" nb_element="cell_code_output"> From ca38f6e04d98d27bad35e39f874839edee8c3c33 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Thu, 6 Jan 2022 23:19:33 +0100 Subject: [PATCH 35/75] Add figure code cell output rendering --- docs/use/formatting_outputs.md | 36 ++- myst_nb/docutils_.py | 30 ++- myst_nb/render.py | 219 +++++++++++------- myst_nb/sphinx_.py | 53 +++-- tests/notebooks/metadata_figure.ipynb | 66 ++++++ tests/test_render_outputs.py | 18 +- .../test_metadata_figure.xml | 23 ++ 7 files changed, 328 insertions(+), 117 deletions(-) create mode 100644 tests/notebooks/metadata_figure.ipynb create mode 100644 tests/test_render_outputs/test_metadata_figure.xml diff --git a/docs/use/formatting_outputs.md b/docs/use/formatting_outputs.md index 51c7568f..853fc258 100644 --- a/docs/use/formatting_outputs.md +++ b/docs/use/formatting_outputs.md @@ -116,7 +116,7 @@ Then for the image we can apply all the variables of the standard [image directi Units of length are: 'em', 'ex', 'px', 'in', 'cm', 'mm', 'pt', 'pc' -We can also set a caption (which is rendered as [CommonMark](https://commonmark.org/)) and name, by which to reference the figure: +We can also set a `caption`, which must be a single paragraph and is rendered as MyST Markdown, and `name`, by which to reference the figure: ````md ```{code-cell} ipython3 @@ -130,7 +130,7 @@ render: figure: caption: | Hey everyone its **party** time! - name: fun-fish + name: fun-fish-ref --- from IPython.display import Image Image("images/fun-fish.png") @@ -148,13 +148,41 @@ render: figure: caption: | Hey everyone its **party** time! - name: fun-fish + name: fun-fish-ref --- from IPython.display import Image Image("images/fun-fish.png") ``` -Now we can link to the image from anywhere in our documentation: [swim to the fish](fun-fish) +Now we can link to the image from anywhere in our documentation: [swim to the fish](fun-fish-ref) + +You can create figures for any mime outputs: + +````md +```{code-cell} ipython3 +--- +render: + figure: + align: left + caption_before: true + caption: This is my table caption, aligned left +--- +import pandas +pandas.DataFrame({"column 1": [1, 2, 3]}) +``` +```` + +```{code-cell} ipython3 +--- +render: + figure: + align: left + caption_before: true + caption: This is my table caption, aligned left +--- +import pandas +pandas.DataFrame({"column 1": [1, 2, 3]}) +``` (use/format/markdown)= ## Markdown diff --git a/myst_nb/docutils_.py b/myst_nb/docutils_.py index 3ff8c7a1..23d165d9 100644 --- a/myst_nb/docutils_.py +++ b/myst_nb/docutils_.py @@ -1,4 +1,5 @@ """A parser for docutils.""" +from contextlib import suppress from functools import partial from typing import Any, Dict, List, Optional, Tuple @@ -25,7 +26,12 @@ read_myst_markdown_notebook, standard_nb_read, ) -from myst_nb.render import NbElementRenderer, coalesce_streams, load_renderer +from myst_nb.render import ( + NbElementRenderer, + coalesce_streams, + create_figure_context, + load_renderer, +) DOCUTILS_EXCLUDED_ARGS = { f.name for f in NbParserConfig.get_fields() if f.metadata.get("docutils_exclude") @@ -336,12 +342,13 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: self.add_line_and_source_path_r(_nodes, token) self.current_node.extend(_nodes) elif output.output_type in ("display_data", "execute_result"): - # TODO how to handle figures and other means of wrapping an output: + # TODO unwrapped Markdown (so you can output headers) # maybe in a transform, we grab the containers and move them # "below" the code cell container? # if embed_markdown_outputs is True, # this should be top priority and we "mark" the container for the transform + try: mime_type = next(x for x in mime_priority if x in output["data"]) except StopIteration: @@ -353,13 +360,20 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: subtype="mime_type", ) else: - container = nodes.container(mime_type=mime_type) - with self.current_node_context(container, append=True): - _nodes = self.nb_renderer.render_mime_type( - mime_type, output["data"][mime_type], cell_index, line + figure_options = None + with suppress(KeyError): + figure_options = self.get_cell_render_config( + cell_index, "figure", has_nb_key=False ) - self.current_node.extend(_nodes) - self.add_line_and_source_path_r([container], token) + + with create_figure_context(self, figure_options, line): + container = nodes.container(mime_type=mime_type) + with self.current_node_context(container, append=True): + _nodes = self.nb_renderer.render_mime_type( + mime_type, output["data"][mime_type], cell_index, line + ) + self.current_node.extend(_nodes) + self.add_line_and_source_path_r([container], token) else: self.create_warning( f"Unsupported output type: {output.output_type}", diff --git a/myst_nb/render.py b/myst_nb/render.py index c08bfdee..0e138791 100644 --- a/myst_nb/render.py +++ b/myst_nb/render.py @@ -1,5 +1,6 @@ """Module for rendering notebook components to docutils nodes.""" from binascii import a2b_base64 +from contextlib import contextmanager from functools import lru_cache import hashlib import json @@ -8,7 +9,7 @@ import os from pathlib import Path import re -from typing import TYPE_CHECKING, List, Union +from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Union from docutils import nodes from docutils.parsers.rst import directives as options_spec @@ -16,6 +17,8 @@ from myst_parser.main import MdParserConfig, create_md_parser from nbformat import NotebookNode +from myst_nb.loggers import DEFAULT_LOG_TYPE + if TYPE_CHECKING: from myst_nb.docutils_ import DocutilsNbRenderer @@ -23,89 +26,13 @@ WIDGET_STATE_MIMETYPE = "application/vnd.jupyter.widget-state+json" WIDGET_VIEW_MIMETYPE = "application/vnd.jupyter.widget-view+json" RENDER_ENTRY_GROUP = "myst_nb.renderers" -_ANSI_RE = re.compile("\x1b\\[(.*?)([@-~])") - - -def strip_ansi(text: str) -> str: - """Strip ANSI escape sequences from a string""" - return _ANSI_RE.sub("", text) - - -def sanitize_script_content(content: str) -> str: - """Sanitize the content of a ``<script>`` tag.""" - # note escaping addresses https://github.com/jupyter/jupyter-sphinx/issues/184 - return content.replace("</script>", r"<\/script>") - - -def strip_latex_delimiters(source): - r"""Remove LaTeX math delimiters that would be rendered by the math block. - - These are: ``\(…\)``, ``\[…\]``, ``$…$``, and ``$$…$$``. - This is necessary because sphinx does not have a dedicated role for - generic LaTeX, while Jupyter only defines generic LaTeX output, see - https://github.com/jupyter/jupyter-sphinx/issues/90 for discussion. - """ - source = source.strip() - delimiter_pairs = (pair.split() for pair in r"\( \),\[ \],$$ $$,$ $".split(",")) - for start, end in delimiter_pairs: - if source.startswith(start) and source.endswith(end): - return source[len(start) : -len(end)] - - return source - +# useful regexes +_ANSI_RE = re.compile("\x1b\\[(.*?)([@-~])") _RGX_CARRIAGERETURN = re.compile(r".*\r(?=[^\n])") _RGX_BACKSPACE = re.compile(r"[^\n]\b") -def coalesce_streams(outputs: List[NotebookNode]) -> List[NotebookNode]: - """Merge all stream outputs with shared names into single streams. - - This ensure deterministic outputs. - - Adapted from: - https://github.com/computationalmodelling/nbval/blob/master/nbval/plugin.py. - """ - if not outputs: - return [] - - new_outputs = [] - streams = {} - for output in outputs: - if output["output_type"] == "stream": - if output["name"] in streams: - streams[output["name"]]["text"] += output["text"] - else: - new_outputs.append(output) - streams[output["name"]] = output - else: - new_outputs.append(output) - - # process \r and \b characters - for output in streams.values(): - old = output["text"] - while len(output["text"]) < len(old): - old = output["text"] - # Cancel out anything-but-newline followed by backspace - output["text"] = _RGX_BACKSPACE.sub("", output["text"]) - # Replace all carriage returns not followed by newline - output["text"] = _RGX_CARRIAGERETURN.sub("", output["text"]) - - # We also want to ensure stdout and stderr are always in the same consecutive order, - # because they are asynchronous, so order isn't guaranteed. - for i, output in enumerate(new_outputs): - if output["output_type"] == "stream" and output["name"] == "stderr": - if ( - len(new_outputs) >= i + 2 - and new_outputs[i + 1]["output_type"] == "stream" - and new_outputs[i + 1]["name"] == "stdout" - ): - stdout = new_outputs.pop(i + 1) - new_outputs.insert(i, stdout) - - return new_outputs - - class NbElementRenderer: """A class for rendering notebook elements.""" @@ -521,3 +448,137 @@ def load_renderer(name: str) -> NbElementRenderer: return klass raise EntryPointError(f"No Entry Point found for {RENDER_ENTRY_GROUP}:{name}") + + +def strip_ansi(text: str) -> str: + """Strip ANSI escape sequences from a string""" + return _ANSI_RE.sub("", text) + + +def sanitize_script_content(content: str) -> str: + """Sanitize the content of a ``<script>`` tag.""" + # note escaping addresses https://github.com/jupyter/jupyter-sphinx/issues/184 + return content.replace("</script>", r"<\/script>") + + +def strip_latex_delimiters(source): + r"""Remove LaTeX math delimiters that would be rendered by the math block. + + These are: ``\(…\)``, ``\[…\]``, ``$…$``, and ``$$…$$``. + This is necessary because sphinx does not have a dedicated role for + generic LaTeX, while Jupyter only defines generic LaTeX output, see + https://github.com/jupyter/jupyter-sphinx/issues/90 for discussion. + """ + source = source.strip() + delimiter_pairs = (pair.split() for pair in r"\( \),\[ \],$$ $$,$ $".split(",")) + for start, end in delimiter_pairs: + if source.startswith(start) and source.endswith(end): + return source[len(start) : -len(end)] + + return source + + +def coalesce_streams(outputs: List[NotebookNode]) -> List[NotebookNode]: + """Merge all stream outputs with shared names into single streams. + + This ensure deterministic outputs. + + Adapted from: + https://github.com/computationalmodelling/nbval/blob/master/nbval/plugin.py. + """ + if not outputs: + return [] + + new_outputs = [] + streams = {} + for output in outputs: + if output["output_type"] == "stream": + if output["name"] in streams: + streams[output["name"]]["text"] += output["text"] + else: + new_outputs.append(output) + streams[output["name"]] = output + else: + new_outputs.append(output) + + # process \r and \b characters + for output in streams.values(): + old = output["text"] + while len(output["text"]) < len(old): + old = output["text"] + # Cancel out anything-but-newline followed by backspace + output["text"] = _RGX_BACKSPACE.sub("", output["text"]) + # Replace all carriage returns not followed by newline + output["text"] = _RGX_CARRIAGERETURN.sub("", output["text"]) + + # We also want to ensure stdout and stderr are always in the same consecutive order, + # because they are asynchronous, so order isn't guaranteed. + for i, output in enumerate(new_outputs): + if output["output_type"] == "stream" and output["name"] == "stderr": + if ( + len(new_outputs) >= i + 2 + and new_outputs[i + 1]["output_type"] == "stream" + and new_outputs[i + 1]["name"] == "stdout" + ): + stdout = new_outputs.pop(i + 1) + new_outputs.insert(i, stdout) + + return new_outputs + + +@contextmanager +def create_figure_context( + self: "DocutilsNbRenderer", figure_options: Optional[Dict[str, Any]], line: int +) -> Iterator: + """Create a context manager, which optionally wraps new nodes in a figure node. + + A caption may also be added before or after the nodes. + """ + if not isinstance(figure_options, dict): + yield + return + + # create figure node + figure_node = nodes.figure() + if figure_options.get("align") in ("center", "left", "right"): + figure_node["align"] = figure_options["align"] + figure_node.line = line + figure_node.source = self.document["source"] + + # add target name + if figure_options.get("name"): + name = nodes.fully_normalize_name(str(figure_options.get("name"))) + figure_node["names"].append(name) + self.document.note_explicit_target(figure_node, figure_node) + + # create caption node + caption = None + if figure_options.get("caption", ""): + caption = nodes.caption(str(figure_options["caption"])) + caption.line = line + caption.source = self.document["source"] + with self.current_node_context(caption): + self.nested_render_text(str(figure_options["caption"]), line) + if caption.children and isinstance(caption.children[0], nodes.paragraph): + caption.children = caption.children[0].children + else: + self.create_warning( + "Figure caption is not a single paragraph", + line=line, + wtype=DEFAULT_LOG_TYPE, + subtype="fig_caption", + ) + + self.current_node.append(figure_node) + old_current_node = self.current_node + self.current_node = figure_node + + if caption and figure_options.get("caption_before", False): + figure_node.append(caption) + + yield + + if caption and not figure_options.get("caption_before", False): + figure_node.append(caption) + + self.current_node = old_current_node diff --git a/myst_nb/sphinx_.py b/myst_nb/sphinx_.py index 5f77e88a..3e1a2be7 100644 --- a/myst_nb/sphinx_.py +++ b/myst_nb/sphinx_.py @@ -1,5 +1,6 @@ """An extension for sphinx""" from collections import defaultdict +from contextlib import suppress import json import os from pathlib import Path @@ -34,6 +35,7 @@ WIDGET_STATE_MIMETYPE, NbElementRenderer, coalesce_streams, + create_figure_context, load_renderer, sanitize_script_content, ) @@ -471,7 +473,7 @@ def render_nb_cell_code_source(self, token: SyntaxTreeNode) -> None: def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: """Render a notebook code cell's outputs.""" cell_index = token.meta["index"] - line = token_line(token) + line = token_line(token, 0) outputs: List[NotebookNode] = self.config["notebook"]["cells"][cell_index].get( "outputs", [] ) @@ -496,33 +498,40 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: self.add_line_and_source_path_r(_nodes, token) self.current_node.extend(_nodes) elif output.output_type in ("display_data", "execute_result"): - # TODO how to handle figures and other means of wrapping an output: + # TODO unwrapped Markdown (so you can output headers) # maybe in a transform, we grab the containers and move them # "below" the code cell container? # if embed_markdown_outputs is True, # this should be top priority and we "mark" the container for the transform - # We differ from the docutils-only renderer here, because we need to - # cache all rendered outputs, then choose one from the priority list - # in a post-transform, once we know which builder is required. - mime_bundle = nodes.container(nb_element="mime_bundle") - with self.current_node_context(mime_bundle): - for mime_type, data in output["data"].items(): - if mime_type.startswith("application/papermill.record/"): - # TODO this is the glue prefix, just ignore this for now - continue - mime_container = nodes.container(mime_type=mime_type) - with self.current_node_context(mime_container): - _nodes = self.nb_renderer.render_mime_type( - mime_type, data, cell_index, line - ) - self.current_node.extend(_nodes) - if mime_container.children: - self.current_node.append(mime_container) - if mime_bundle.children: - self.add_line_and_source_path_r([mime_bundle], token) - self.current_node.append(mime_bundle) + figure_options = None + with suppress(KeyError): + figure_options = self.get_cell_render_config( + cell_index, "figure", has_nb_key=False + ) + + with create_figure_context(self, figure_options, line): + # We differ from the docutils-only renderer here, because we need to + # cache all rendered outputs, then choose one from the priority list + # in a post-transform, once we know which builder is required. + mime_bundle = nodes.container(nb_element="mime_bundle") + with self.current_node_context(mime_bundle): + for mime_type, data in output["data"].items(): + if mime_type.startswith("application/papermill.record/"): + # TODO this is the glue prefix, just ignore this for now + continue + mime_container = nodes.container(mime_type=mime_type) + with self.current_node_context(mime_container): + _nodes = self.nb_renderer.render_mime_type( + mime_type, data, cell_index, line + ) + self.current_node.extend(_nodes) + if mime_container.children: + self.current_node.append(mime_container) + if mime_bundle.children: + self.add_line_and_source_path_r([mime_bundle], token) + self.current_node.append(mime_bundle) else: self.create_warning( f"Unsupported output type: {output.output_type}", diff --git a/tests/notebooks/metadata_figure.ipynb b/tests/notebooks/metadata_figure.ipynb new file mode 100644 index 00000000..6bc0b783 --- /dev/null +++ b/tests/notebooks/metadata_figure.ipynb @@ -0,0 +1,66 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Formatting code outputs" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "myst": { + "figure": { + "caption": "Hey everyone its **party** time!\n", + "name": "fun-fish" + } + } + }, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAfQAAAFeCAYAAAEEVt2UAAAgAElEQVR4nOydd5wV1dnHv2fuvdsbyy5t6YIVO9jBV8WyoMYkGI3GxBb1jSVGE7uwYC8xsb0asWtM1FiIYAVBwUoRK0gRcNml7C67bLl7y8x53j/mzty5ZSmyC4vuj89y75055Snnec5zzpw5B7rQOaCrKvyyaKRYyysk/OZQAWh8KF86qj6jowr+IVBW8BDRJrrmrebME5YqADF1h9XXuZjvf+ccTI0qPSVfdNRoerJExLQ6rL5OxTzYmtYrn5DWVwZbYlrknDrD11F1dTrmfQfOVWJqxLSQYOvNGX0P77h23xnxXTgsi5c2d5ijc9DpNA/QY06U3P4ZTP5v5bAdTct2hdYyeMO3rbLs0GXyqf/TDtV+p9P8xicbli/tq/nvlAKsDvT00AmZD/QPUJD7PRbw6pqB/K6yssMk4O+ogn8ItNYlX4VCBBCO6bGaoP3PeKqD6utUmq+7pa4mkLeKOXU9eXN9X3Tsn+iOMX3VIaX+QFRGItKUsQyN5v2X8llyaIAa06RWa94aOLDdae00zIuIMau52aoxTfYsqaTFanWaPa20Mo5x7U5rp7H5zxpaXuvZrZJewPSa3tRaFtkvbmDYpd/Zjd/SGL72tdJOo/n5LS1Sa1n0K1hNmDBBWnm2sj812qLGsqi1LL4eOrRd6e00zC9ikaxsLKPGNOlx0TryXqhLaPZBgpzHee1Kb7u2o4EDqS8q8n1mmuZW5dPaHrsMKFhNr7818+WDJby2Zheer9yTSHflev1oa7Q9ye0YzQcCgTuuvz56lQg88wyhxYsj2RkZGW2mXxgMRoNa+4vyvkejbX2rVp6s3IVa06TWsthoWSjgm113bTeaO6zZh0Ih+vTpM+SyyzYs9V6fNo3QnDnh7MzMTPfaN3wjdU39qbVMakyTC0pK0FqjlE2eiGAYBn/jb/yJP2FZlnsPwDCMH8TLNjOfmemfcu215sk/NP/HH6PvOrza8N+4EYC1M9dy5JFH2sR5mHdQV1dH9+7dE8pQSlFVVUXfvn13nA8Lh8NceSVSUWH/TZqEnHIKMvpw5NPnEOszRM9FZC4i8+y/9x9FPg8G5d3GRlFKiYiI1lpERM466yxRSonW2r12yy23iNZa3njjDenXr597XUSkrKxsq0JBZZomfv+2d/dZWVn/8qnQ6c2zQUjTpByyVOyr2Gne/wxuetzH1W/Xc0xenp1EKVfrb7/9NkopRo8ejYgk3AN48803Of74493flmWpLeXHyMrKvqmiAjnkENW6tQz3798/qzAPMecirXMSGReJ8/vxohjTyuXfloeCEXvBxeMs7jy2wGXMaebTpk0jGo0SiURcodifUDMHioqKSO5Z/H5/05bSb8ycOX0iwAknSJbTXHNycn61qUwiwov3Iitf/r61fiYYZTcz+dUcJLnRxQRwyDGTkvLH7+ccDj//MxTkJ9r2ddddl1Bf4ieUZEH99AbG9DwRny8+x6mUytlS5pXWmkmTjDZt5YYbzORmJHpuUiFlk2yNV493VS9ia0gEKJvE5Eef4vfly+0MheWQdyiqejwvTrcvjTsGODA+V+ltAd7fSin+foPi8p/HylZ2S5o2D8ZcoMnKyjoqEonMaouf/meWff39P6v2AjBi3UR6NMHNN/ulogI57zysbvmIzLOZsgmKMYjgNnJP0xZA9bwCBQwYMNhNr/IOAWDym566DLt7TLZp72/n8/JTPFXFqh07HJhvsCnG97tsd8nNydlz9/OGSKzKVIbxNEtiFvTYYxgbZsZt2dVqzHEp15N5IICvEIDj9nzHvW87POGCG+DVGKkvvg0Zi+It1tFyMm66NE1v5qEjOj+FCgCef2yo9CjLpqRPNspQ7H7BLr9JZT4faI4z7UDPizezGO2uExPgsYdvjctMYs5NASi+X7suyeHZTSDghz4lzjXwCchC5QY4IpLw9+xdihvOxu0x3DocOpRdhmEYyx26h10xqMd3b+wp/452o7RvNt17ZpJfFCC/IPOZuDFHgRCJ/VSs7RotMck6jOM0d1Aqj1nTKjj3JHGvTZoWF4JhjGfGDDBa4eX7ocgHSuCbT8az8SOYOiNelwgoC9RnPkIKsg8EQ8F9VysuHif85ihP/cmC9lwbUN578F775su6sEH3PllcslhR0idAca8sDJ8ia32Ydd+3esbzgdgfQKOnJMCy4hW5lcYEgG7mf/aPERETUE31YZT2+dA1jaOPtvPc59r4eLrlwp797PR3PQ1/PiteHwJZgJ5r06Ecx6cSktiXFKxszeIPn+wJhsIwYK/jFFNGKX7xAWTl+TEMyMzx4fcbGIbCiPmeeLNvjP01JVYSa6Fx7ThEOE2P+DVH8w9UnMAWQcE/XoVLT7d/vhDz/OKpT3nqFUANB2M4/G7Z4Zy9/Ai0MZxJ60bQe3AufQbn0nuQ/fmHqhx6D87ln8MiPJFXibagpipIy8YokbBGGbGZHBFQBR6iHKen4p9n/BX+9WePADySd8rw/v7wvaEcdmTCmAaAe+6Ghlmxov19eOeli1B9xgPw/O0uzyn1O1/1PPuLMeIDAJ7GbiFuDzQ8XoTWlnH1JydEjb7djaKw5pu59YRbLZo3Rinukan9AD4faKeLbQRysMXiNH8Ddt8dckZCy+w4geIw7QjAJVJ4886lTJoGCz6DV/5vEoYSpHoCE070RH89LoQYQ6dfD7vt5gmAvFr3SFs5rW4e9B8Lq6bGFVIwMs44gGH4NOADuHfx+f6vrLpo3ZpWwq0WXzz4rU8BnHmmsoYOlUTPH4kJwHAKgvHjUd26dcuoe7c+jBknxonVvQQmXCubZFNUPT4e1yuQPvZ1qR4fk1768rwtILklICAG+A5MZLwtHHjFnjLvr18rpZTN2gsvGLNSUmXYjCsFFRWo8ePtguvr6yPG/ihjBNqh0TVRjy9wSYndnPzoUwn8pAjMw6Ty5PUGVO69pDx/eOTCwJYwDjD/nm+UEz8YAHfffdevkxM5TE+Y0GahPmMEKuuwjCuPudhDpPPnECrw8j8r+P2Y5V5ZxOoQpr5YEReeEwTF8hrDYzc8QUyCx1fw+MJHff/4xz+2bt7Mqc+uTJg40a5aKdgEw5tESUnJibW1ta8F50KWt2k6DHgElOAgnYDJe0/FvxsjoPZDKA7YaS+4HR5/1WjWWuf/EDpTUFGRPizcFoTDYbKyss5QSgkgg8uQB8cjoU+R0BdI6yfI2lnIF/9F/nUPcsQBiGHYf8AarXWHPk77QRreWkyYgKQJ09OiomL70AQdzPzEiYjWMGeOMmfMkKGRSGRlIBBwTQzsZv3uu4dnzp79QaQjadnhyM7OdidMDMO4f0fTs92Qk5Mz5qSTVMcutfgxQEQ61bqBDodec7cARN7fXwCaHu9midUxbqBTSVZl9Meq/Lsh0dgEsiVG/S3d/tERdXUqxkWiSPVTFmZsZtbSFI9vubAj6upUjBslZyoxLcQ0aX62Z4cuMu500FrTOmWgND1Z0u7RZaeGiPiro1FZGFi4xU9Yfgg6VVMHWH/t+hmF70cINwzN29G0bFesi0Zl4/MbZWHeQtHWT+QtChHxg/DvY6I09oM5vjkdZuedivEl64OfmWfXkLnIZOas3rxYPaTD6tpuQ8ItQU00KjOam9mzW6W7qmpj5sYDTwmfsqC96+o0GhcRuKkBS4SZNb354p5uaDT+sH9+R9TXaVZTbqwOH7L60yb2Lw4RJsyLa3oz91e7UWNZMKD96+s0TX3tn9bKvIpsak2TvbuvdJt6kCBNRtPUs/RZJ7VnfZ2mqdffW8/gwiospZi+vh9TqgcjaATB0MaJ7V1fp2nqtY39KM37noOLqwkSZO7Nufyzci97ybhlQTs7+E7R1L9WX7daB2VmLX2rhBrTYv+SZSlrZpsym6ovDl9c1l51tltTtyyLG29EjjrqqK0v8+ScrMAnUfYoqsZCeHPdQF6u3o0XvtwTSwkajRE2+rQXrdABGrcsi5tv9okI+P1wxx35ZU1NTdWbyrOwpUVqtKZv/mpChAgS5KNbC/n0NwXuellTpPOvlQ2FQmRmZiZMKYP9cPKhh3rvtmbNmiXOtX332pd/ff0vqWzsS61l0e/kNUw99EnuuOMOwO7fP/vsM14+8GVKKf34zj53HlJVVeWWWV5eTs+ePX1PPfXUVgX228z4DTcg27LYsvc+LzHqF3siCKOKR1NTV+UuHHKWjTnfDcPANE0Mw0hZVubz+bSIbPELwu2q8ezsrFuuuirkrvKLRGDGOzD6ALjyHOgWeyYG9rO0SBQW7xOkVTT+M9ZzwKsDU1ZEv/LKK5xyyiluHV999RXDhg0jNzeXhoYGAoGAK6h7771XXX755e3J0qYRk7zUzI4v+tWeBcAJf3Pj90MfIX84FVnENwIkLPSNiUimTZsmv/jFL9x7V111lXvvgQcekJqaGjdPr169tng0pyoqbLo/+uiYwNtvz9jaR7Gi59uPft0VkU7BsVVMqudVsO7ONgQGyyrh5Znwl79b7oJArTU+n49p06a5acvLy+1ylYL5CjUcpk6dilKK8vJyrzlsUStWyU9OLQtuummzJpC4PLTPRKieECuRxBUOZRNBGaiqG93FAk9Oy+DssRF3JVXYgsyDEpeFKqVcxh2mnXsyP07ekJ/BskrtzbdFjKf0uT6f/Ri5ogIpKzNWee/17dsXPT+JaWVX+PwMX+KqBneloL3Ey7u87JwLbrDzDQffQfDfmXDmmWfGGRNBa015eXkK01rr+IIEYNmrwHwDn89HOBzeJLN7/n733Xc/d0gegJGyetmD3/9O9/esjj6g8tXV4u2glAJlFCLAmNOvSRSGh1jvKh9XH70nMvFC8Pvs9MN6/CuBweQ/Bz6fL77wIGZeSoGeK2RlZbXJyz4X7yr+DFkEqqns130uN1K6okbP91D8a8t7wfnexT2OVqXXlSDCG/++Pb5Q0OO5JbZMOLEB2gl2GwgPXm2Xdd1vaJNwp9t68sknkXnx8t2iYqutZF7iUlCX9rcHfl1alkNp32zyugXIy879m/Hhh0kmUYC9Js4RQCNUVODarMQU6Kx/VTGuThvtLJkEKZvkpkMUas0Ed7WkFJwIKB5/4hkUsG5DvNzQh5GU9a+2ABWGYXD2sHPcxYfOp7MG1llOYn6iB3vZOey6YRLIzN2zpE8WpX2yKSzJcBRnPFVRoX/rLvj1LnOKSfaUQ2AfT3EJ/qvPJHu9ewwTnwRKPNJugaeegL/dAKePBKP3RDtn1XgGnATfr4UXbreJPnV0vO6Rv4cPPoP+/bqx8uX6mAASFe1dl+u9PuKfe4BSlPTJwjDAMBR9h+bh9xtULm1i1eIm1Icfzs55++2RLQm5HW3HCm2pgzsviRWcvCYtpi3HqQnxxb+bwoQT7TcdJl0EA3rbGU89NmkJnEcJ3kVEyd9P/WAfgpYfZSiUglcODCOtUc6q6mavd/UpCoozyMg0WPVtE6sWNWEccsjhQaLYzdv5c9ZsxSr4/FsvNSRQ59DmrIEDWPLVfptlXAQeuHEwH38Jp11LwmooN03smiNs/0F2T2CMgLOXH44V3Y9I7RAK+hXSZ3AufQbl0mdwHpds7M4fo70pLMnkmSEtFHbPYGNdmA3rw4SCFtGItutJ6Mu9D2482u2RC//7s8RlXd41r27wUnopBEqY9MiEFGYjEbj+FMgxYmn7TCJaNZ4p0+HpafDfv6U25wRBxOrpPxYq13muxxyeKOAArQzDQET404KDR/nMovcCBnz5QR2WKWysi1C/Lpw0A+Ms9HUcnAJygWZY74N3v4aj9iKhWXvXrgGoQAmWMnjtBTjpV1D9NTz894mgFKp6vOvkiBUf6DORU4+dwC1P2prU8zxxQFIzd/D9tFhLUKB03NH1OKFk39paOzSJdYHvO6X85rnDZNHceloaTQpKMtJo3EFjTAAx3HNPQffGxsYNJEdtDjye1eUKUL2uR1QGjz/+OOeesCrRM/aeZOfzrIP1luVe8rSoBEtzutbhoLWt6U1h78sHrfJLVt/P7l3ka3tAGWP6b38rHLBx48bvPR28UsOFnt2VrHs7tUv1/lAKMOx3VM8tXxXnWYH4+qAUzPl4Lkf0jzHk4SpZE8m/nfTOku7NMQ3w5d9XDHC+t8n47bdnHRkKhd6HjSn3Ys1IxSoV61N7ksEh0AliJOaZ1JoJKepSPS8C4PD+r3kyejQtMPMrOHrvxIGPN43hWce+tUhp6n/9a/6ZTU1Nz21tQeFwmMzMTGvkfhjvTU6txY30sAXz8CvFXPTzDSnNN9l3/P42+M9bUD/T/i1+8O0HIrKl45G2MWECkp+fN37zKbcORx11FD6fbw0xX3TyaGThK0jr10hoAdLwAbLiHeSD55A7/4R0y4+vb+3Ro8dB7U2PF9soss3DMNRV48fLHVuS9s47c64PBoO3djRN0IGMFxQU9LjiisZ1SsF99+U8V11df6Y9AZkwVcb77xvmu+/qQNsldQw6XONeOL7EMOCGGzbf/fwoMGECMnTokILNp+zCDodojf56uABYS68S88sLJfT2rqK1PTUp5s656vwnYGc/DLLy4iNoeE2UsoNPCdega2ZDuOkBw7BDv+anelkbH8yT+tsLjtuRtG4tupTeFkov+BAjE7EszLn7W7Lx61O1pckYs/KPAOGP/uwXyzIwLfIv+nz6jiZ3a7BdO/SdDbLoiOd1RP9KgkF0KIIORTCKR5dFvn3xIzGlv30tqrtdEe6wbY07Al1K3wJEZ+8tOhRBYorXrVH7syXSWPSnYOGOpq8LHYTFja33t77UR1r+XSo6+tPasvsnCdEyuPmtZqmORuX7cFieaNiwiYevnR9d7n0zEJGMykgk3P0DEwSsVovvDvLxjgrqv5SW7lR9uYOu6H0zWHvlui+zDIOGIwLMX9vMyhNXYvVYzMjS5casjFlv7Wj6fgi6LH0TEEv6Nr/TXKkbNW8cq6m3LIJaExQhqDXNhvCX1kJf/4G5O1Un36X0NiAixupo1PIDjT1XoOs1JiZRolSfHOC9R0potTRBrZncu4/y+XceT9+l9DZQfWH13/JPzL/8gyOFDTEL73XbRnrfXU806Z+JyVjG7jSy3GkI3Z7QWnIa32xuqfvzeiLf2PPrX7xUSPWogO3aLSvm4oUWsRj4RA2D/rHixvOqxt28g0nfInQpPQ2WN4dko7JYY5qE6i32HLjOde3OZ5Qoy8dl8fYDZYRDdiN4Y8BAn89Qnb5/71J6ErTWeSuGrWiKfBOhriKP1X/McwO3oAhH9lpGxDJT3Lvz/QzO6PQy7fQEbm981hKUoNYU51e61zQa04jw+royglY8encjedOiMcsg8F0LLS3RxZ8cv+8eO5CFzaJL6R5UX1KdVxs0mxru7eYq1FoSZfeD1qa1avt3hJWjs5l+XV+qBmTSouCrgYOVvxOvDul0Sg8EAu+sXbvm2O7dSzafuJ2xiEUisUV6CviqvnfCuDwogvo2zGGjlrXZCJxrF3FRp5Otg05LGNhvsBQUGO9dcYWMcq5NnmysDoWK9l63bl1DINB+a8s+/3rjKAb63zOfbSbnwg3udY0mSpQVF2fx+Y3dCErcrbfoVFcfja2AC7dGVy/fZ89+7UZgO6JTKz0dDMM4uaJCT9FpYmSfD55/ntCaNcW7VVev+d57LNDmsIhFEmoZQItHqWW/qCFrRqtrwQBjGUsttQApbwTfM/hJfK0WGWtaCCnNH6OXKcNvoLUmJyfnb+Fw+HLvm8ZKKbKysl5uaWn55fZcLLhDlD54sCHHHCPk5orOysLIykp6ncPzBoQX27rQGWDmTCILFxYetWHDhg+d406W91hxSWR9yN151CpTLPm6F0Gtueess1j/+edUVlYmrmNNoCtR+W+99RZjysdwO7dzpXllwrEqDpKVLyLsvffeLFq0aIHW+sBt57RtdCpLHzRoYN8ePVatKi9v/00+nfeA/H5obobly2HJEqishF+d9UcOKS9n6H77U9xrA4IwnelkT87m/PPP95SR/iCR5OveBuD3+2lsbCQ3NzchXfJRNN68q1evZvDgwbNM0zyqXYXg1NcRhbYHTNMkLy+vVyQS+VZrXZDhh4dvgt8di/s6LuC+uJXASPKbX8417y0FrWF45xN49g248eI6WkZnEtSaXwweTP369W0quC1X7D1IKfmoHcuy0lp8UVER9fX1m6qn3XWkSkv5/LTT2Oexx7L3bm1t/aq9K9gcotEoeXl5/wyHw2eMGQlT/xYjjNTXlxLeFPRcg/g7m8nveqnE7G7ZYO80UFUDM+fBvG/s73fPXM5Hz37EGWecESs3bpne98K7d+/O008/ncJPwvYIMTjbKHjhbJnglDtmzJgUl29ZFn6//5fAy5sU4lZCAcMrKkj3Kh4PPBCYs3Zt68j2OKTOQc+ePfuvW7du1f9NgItO9Fhs8Vm880E1o/ewj+pRkkbpMYpTmn4f+zSw4Krx5Dg7mKjEF4djWVMU3xCGLAMamqC6Bi66FT75KnH7B4hbsWPljtIcOPfHjh2b8Bq5txz3bUzsBjrwJKhcp5g6daqrdCetpz73pJH2ggqFQtx+e9YWLf8RgYUL0XvvfX3g5ptv2eI5ZsMw6vfdVRcteDaN53Uu9JmEICgUUjU+wXK9yby0QOx+n4kOOzQsH09hdvw9v3jLCUDZjTz3xCTOON5+WeHy+8AKwz3X2Ec/zZoMlg9GnmWluHCvyz755JO58MILExrEkUceSV5e+s1ZRYSMgEH04zRdkYcfYwS8+OKLjBs3zs1nGMYGoHuaLD8Ybb+iuykIYOG+7unzwZ135k5qaGic4LixsrKy4urqqjrr01hFKjEiT3DDAqpsklv0ku++Z7esR+OK3oTV4+8PPc6zf4jw1bffMiz/uYQ0SkDKbrGJFuGV/77JKQd9hO9Ae++DX11jfwKcegyoEfEzf5Mj7LbQ1n2lFH379qXy1So3lkjmx9sVOadQGcMhPz+fhoYGlS4W2Bx2P29o/K1ngagVCX731Kpcp76Ut+pcWNgLqtI1Te/L60moqACZG2vVXhNN9q3OvZJzkIyBqNiNRctXsUfWY4lv/HqyJgisbFL8iijeeHU85QclegYBVNlEQLnXcweP56mJsKYGFi6F4w+10+7SDw7czX3XO6VP9yI5Gk/3PTs7m+CcECpZvl4PlibIdHg2RiRIbLMY9Nv+Nb3KCkoMwz5YKxK2CLVYmKYmEDD4/MFFygCoq2ujzNiGN+5+FI3EJe58NgGxTXw+/xwuPS5+UBzEW67EmEj+Q4CMgbEi7W1Vdo8p3GVVefp+57unc3QmTh9+6AHKD/a8Hh77U2QAjiXa6YMhWyjvzoOR+8WL+y72nEXmwbr3DNfNb0rh3vuO9dxxxx0MG6ponROKK9LDQwIcOtOkkXmQlYls7nTy6Mf7hK35w2W3fUtKSvpkUdo3h9KybEp6Z9OtRybZuX7MiOagv+xtq2TAAFadcw79N1lqBFe5KRYLSA48/ywseiHpujc9qW4egJi1vvjy65x68CcpVZeeAP1KfZx3djnnnn0w2VU32mX6B0LpOTz2+DOcV74s3lUkR/B9bkJcbyCwZgK/vAp+fRxk+OHSu+Cvf4rXt+cQ2GtAokc5+c8w9T2YNWsWo0aNSnH7oVCIvLw8DKWJfJzk3NJFkNLGCEWRlg9nwwWtNX1O7TWusLDgxbyiAAXdMmINxt5hpkffbDKzfWRk+TCjmpbGKE0bomzcEGFjbRgzKi4JJ1dUMCVF2s2AJrVlpumQDB+cfjQMKU3lMXkrjmQm03bYSZHbLW/Ej7jcWmgNmZlQXQ3rK+GF2OYDani8T3/+No83ERh3bCLJaXtzj2KSh5PphooOUrqoNOlCojj+tWG2B4l5gSkjo+RmZ2DVt1C+IJeCbhkx76Bi24gpikozyM71U1iSQbDJpKEmTENtmIaaCA21YVqbLbu+UChk3H57VqpITSCYRFk6TmL33n8fZjyUlCypM/a23pTy2uhlBKD3RG5+bALp5ty3Bt1y4bKj4ooyRtinWZ6WpPgD9oTBvT19bBt0eYdgXmX+32tw/V+htNi+uLzK/vRlKHYfW8Yux/Xhhd2bUIYPq6GZs2p6unV7FR3b5MR1/U/23sATG7KZHc3B51fcUlhHiY7yv8HeKKXIzDaIhDSZOT6sqNCyMUrjhggNdbalh1utODubjeCTDpZN4TrG+MR77H4o3RDLK8A2h27p4CuBHpciCm6aPCHtvPzmIAJffAElfnjk6lh9ZZN4ZPKTXDjhO/59K/zvbfb57efHto469VgSLNjlI6n7SmfV3lHH2D/BG3Ps6394839cpe6qwlzkq+fDVh+vZPVIsFrHzW+qAYgWbjNqkIjFgiaL5/P7UFsdorE+QiDDQGsh3GoRbDJpro/S1BClR9/sLVB6srIF+1jfcBLHzvd8uOMO+HYq9C30XJc4I0KavA6SywNwThsVoTkc5p6n0+9NohQ8/rjiyQd/z5E9H0nY58spy2Wl518QIx+lYN2GDfRo/TvGCPifA2HhEmhphZXToLczQk7jpZItPaWOJPYBWkzIPyR+/9jD4a1709fR63hoDuX2bGlpWZ+WYW89Itzw+XEbfYa/4IsP6tiwLmTvHRjRhIImrc0moMgv9g9y6bv6aiQ721OK4+zTDRG9J6l7h2zNQGx+IraZ6qODyjh/+askKD5FEt5rLhfxW5RNYvKjj3N++cp4IJiBHVwCZO4OJWe4DSMCZKyZkNDAEvpeQGLH2brVOtt5Ob/FMwrwkOTGKB4Ft1mHSmVtU3XkHgHNs+0uZ/To0YHp06f/oINRAY6s2EdQsHZVK/6AIifPzyXlV/p+N/o87bI0bBj148ZRtNnSkvZV8+Lhh3v2W7t23ep093w+39LMgDWkeXY8gt+Uu3cE645h01iRiz4TwTMGl+rxadtRvHBiIwanrtiOX948QuLej0kKTI6utwbp6nu/3ZUAACAASURBVDBGgN/vP8o0zVlbWdxWw51U//prrh03joc2lRhwFW4Y8PDDvfevrl6zMH5zXfo8gGVZQ4OWzZxpmgQCgVYRybI+ByOa2r+7FuW91qZ07RsLPv+KA0tfaHM47KLnFYneJqbwhDwqsVF6y1TDY8eUb6qOTcBbhzEccnNzy6ClenNj8faCW/9TTz1prFhx9iYHRbfemnFSJBKZ2hGEiAg+n+81rfWJ/XrBsmkQSKd8B211nGk+EyxTQJVNQkT4dOYEDt7NU5Ynn2vlKtGqvWkmPQUVD0BwLmSrpDo8n153IED+SGhpJQR4O9TtBld0lmVx002+BPHedJPvj5Zl3bf9yUqEaZqUlZUdsGHDhjei0WgJYBy0F9x9DRwxDHtoqVKtTjxfXOFD4ojDueTtX5P6Wu+wUzSQA0++ApfdCsFWO6HP53vzqKOOGvvhhx+e4Pf7/9rY2LhrrCE3BAKBm1paWu7z+Xyd4kWIBDllZfnvDYXMP+4oYrYVhYUZr1x+eeSU9lhW5cXateh//7twaEPDxu/at+Qdg3YWz/ZFdnb2qGuuaX2vrXG7CEQi6DffVMbJJ8sWrbF76CG1uKoqukd7riHobNjplO7zMX/YsH3Omz9/wUJlu9U205qmyc03+9M2ifp6ePXV/gcuXbpsQUZGRofR2xmx0yl9C9F3wgQqnWDqn/8M1H77bahnZ+lTu9COyMjIuD8rK/ME64c+melCF7wQHTW0NjvvC2pbgR8FEx0F+XKfK7Q2sT4qK9I10yxz1j5W6PUhP+BxT+dCl9LbgF76M4v8w/6qAKyIIZFaRGt8ZT93X6Brmlwk9XdnPbHjqPxh6FJ6G1DdTjMIf4d8uf/59PhdA41fgNYE9r3LBNChekQLRkCdvYNJ3Wp0Kb0t+PIQMYGcfxhDbtcSWgs67tlbntnzGtGC6J3P23cpvQ1IZDXKyEW0NgAktD7xGYAvfBuikW1dyrMD0KX0trBx2gNkDEK0YH60vyXhWu1YesvUYzLQYlt+xD9nB1O61ehSehtQQ/57qegISgtoDMzwStGWffrlhi/CojWihaKrGo/c0bRuLbqU3gaU4YO6//a0latBGCxaE3y+TFwr14Ly+Xc6/96l9E3AGFG9nkjD73ECNi04Fo4WCi/euFNOY++URG9vRN7sPUZbOdN0KIK0RrBCEQrOr++S3Y8ZIpK3uv4bCb7YV5bu9XLXxP5PAc1hc9SaSERWh8Oy7MBlos2drhtPQFefvhlorY3WO+tn+prt99Y2vt+b//x56b2bz9mFnRYiUtQ4pVGaZzTLuqqQrAyH5R91dTvfNJwHXZa+Gaxriozx5RoopchdZFL4Ygtlfj8zP6vbaddTdUWgm4DW2lgejVgFho/c98I0v9VM7dO1hJvChEOh0GH68B2yhHlbsdO21u2Egjx73wa+O8yH/+QNRINRLCzEfqNvp0SXe98EGp9rHJ3/XoSICN+EQ8xYVcqM7/rgrJaf2WPmqE2X0DnRpfQ2oLUmsiTyPAZEbq+nWWtEKcw8xdS1A3ilehDTn+j/3o6m84egS+ltQCmV4xuXZ1h1FuY9DRxesg4RQYugAa0UdftmYnY9Wv3xoGZN+CDZI0DDFyF0g0aL8D+l1Rzf83v89bGnbUpxzuLvZ+xoWrcWXdF7GogIKz5rtnKHZRpzW1sxnm1mwMUNaTfzjxLlBE7YqeTYZenpkVH8jWXUr48SEqHm19m8X9cT8fwD3O/a2rlcfJfS00CHdC+j0MA/cg0lf21CsN+UmbG+jNdrB2AYOqEBTPFN2bijad4a7FRuaXthSX3rmuJHWnrVXFcT24ZFmFHTwz7Kw3N8RwtC5jdB9rmjkj9MP2ankeVOQ+j2gtbaWGOa1upolPzcVe71KFHMrChTvu9Hq6Vp0ZpW5xwXrRk2taHb7Zfs17ADSd9idCk9CaKloCoa2bg4HKZZhN2KqhAhJYjTRoRXXh9A1bBMWk1Ns9a8N3jwTiHPrmnYJFSfVX12dFkE653uiAgL6nphrTLZZ//qhH7c0oryE5YnNISdZaamK5BLQuS7yL2hBSH6F6xGsHdJbe1nMLOmjK/uK06J3L3/ni17dtP763YSdCk9CY2/zkaitmr3KFpDr/ua7egdqDotj2lrBvPSuqFES1Xq8K1KVm2q7M6CnaIP2l4QET4NBsW4s4G8iY3u9ShR5v2nG1UjM2hJPmdVa5p8QiRoYlSHeO9/hnV6mXZ6ArcnFja2LA8ZMjgoQqjWYuCgagBMTCJE7M9ck3+v2sU+PtsZunmO5TQ+qTtqwZkHz9qxnGwaXUr3YBGLpKG5n23Bln0m+t4l1USt1PNUnWnYUHeT2Zf1YsEvu7MxWxExYPGgIZ1arp2auO0JK2KxJGOJAKxuLKPVsWARWtAcVryCyCYO1fU2hHMi5yh/RucdGHWqQM6yLIqLi3fInjGL+iza1QnMygpWE/jajE2/CmiYvn4Ab64dTKSPkRK1Awmf/+j7j9HbnYGtQKe09H79+g654ILVS+fPV/qdd7LPbGxs+vcPObFoa/BZSzCambvS7xweZB8XBrPretpW7wRusf67Bc2gh9dxwM0rCInl9vmOxV/CJZ1SttBJle6gV69eOWecsbalILYJ8bffKl5/Pf/SuroND7T35n6ftwQlKkJu3iq8YokSxVRR3ljbL/UY7TRHaTvbwH6+yy4qsB1PSt4adGqlO1i1apVx9dUDrN13j18TgcpKpZ9+2ndza2vrhG05S11bwpfhVonElNl9n2r8K+wnacn9dyQ7ygvf7Eqzz2P5saCvVWtX6b3m1Bz+7rmHf7jNzHcAdgqlOzBNk5Ej/dHjj0+dPhaBdesUTzwR+L+mpuaLt2YXyG+yvxmjf5Y7Lfhod3f41YKwV2FVgtJnMYtruTbtAX37dj+Ms4suJLC2FRUMERKLK7lSAVx//fVZt956a52I5CSf3VZYWLhHQ0PD4nYR0BZip1J6KBQCICMjg0GDfHXnnCPFbaUVgYYGeOyxrH/feOOEM6+++to2VzosYpG9q3cOrF1bluiyw8IzfU7iY/UxoVCIQCCQcuieo/ju3bvTUN/A3/k7UaJcG7j2j9Fo9N4PPviAQw89NO0x2lVVVfTr1w+l1JsiUr7NQtoCbHelZ2ZmfnTqqdFDiotFZ2QI2dkYPl/iNtvpNvjd1p2dRSAchsmTM94ePfqU8ueff8FpBMY3fGMpFM5Zr8tqehP02677nOJiIpEIfr8/7dHadtmJyjcMg1u4hdO/OJ1Bwwal4SX1iE7TNIl5pw7XyQ619JKSkiH77lu3dOTIHfdq2P33s/C91a37+bJXgEfxAHuwh3vmKqRXVlvX+vbty/G9L+PB2ZeRmZmZtktIl9cwDCKRiOrITYo7jXsPh8MUFxdOvuyy0PmZme1fvtYQCNgeo2o1rFgBi7+F4h4Hc+p557DvIYcyaEw2UmWiUOzBHvaKV49Sko/OTueuk4/irKyspHfv3gm7VScf2ZmcXymFaZqqo7Yf7zRK9yISiTBw4MCS4uK1q8aN0zmQ6PINw/69di0sXwarVsLIveGIw+yTHgb2AEOBcuZ4vOewSPw0h3UbYPa/f8auD/4Ly+nHEc7J351Kq9JVgANHOaZpkpOTg2ma9OrVi+rqajdNcn+vlCIUCpGZmYnWmv32249vv/2W8ePHc9111yXk8dbTEeemuzR2RKHtAdM0yc/Pv621tfUqwDh4H3jnUcgzYkdveE5ScpF8FosXSee6iMDKNfD4FPj1E1/T2jjQHYefXFpKOGwfLJtslSeccAJvvfVWCr1VVVX06dMn5fr7779PaWkp999/Pw89lHoukvfIbi/Ky8s55ZRTfBdddFG7L7XtVErfY489+ixevLhSRIzLz4a/XrL5eeIE/Ur8KC3veS3iUbj3jJa1dfDidHhvPtw2cwkbninilnevZsqjj7pWmtz/Gp4Jl8svv5xjjjkGgJdeeonHH3/cTetFugCwV69ePPPMMwBuGcleJRAIBC3Lyt2MCLYaqqICefhh/+Lq6sgexg6YQTr//PMzHnvssbCIsPQN2KV0E4m9GvaexERi6013dhqpWWlphQWL4b0F8O1K+OWnj/PzunNdRXsVDjB48GBWrFgBwNSpU1MaxXHHHYfTD3uDtmS5Tps2LSFNeXl52vgglq/dDdPQGi66yNx90iRDxo9HsrIy/tzRDzxizL4IyK8OfTRszRX0vPiJzBA7/yzpz1Gy9ztAsPTGWMHxvF537v5OGiRkZcIeg+Do4TDqAPj0pHPTnonu0Lxy5UoARo4c6d7z9vu1tbUpvCqluOSSS7Dm2efKH3sojB07lurqajdfS0uLVy7uX8+ePbdMoFsJw/scwzDgmmsid02a5JNzzlFW//79+rZnZVprlFJ1Pp8Sc56M0/Pg2H1jSukzyT3pE4mP171/yqM012oFcgIBGgqucrLauvV4gZQBYcy9+wzolg977QKjD4LjD3PKTIzaHWXce6+91YwTgCUjWUlOGb/+9a9Rhk3T2/fZB/mNGfp7xo4dyxtvvEFubm6CZ3DyXXTRRejY3rTtibQeXSkYMECMc8+trJwwAdljD9W6rdbv8/lm+AxDrE+l2PoUlMYVvsT+M3pPTJmYUR7Td9J6/wj0BYSivDw+WpSYN+Fstf53IiqxQVx5v312enYuDP4Z7Du0bfpFhEsvvRSwHwGnu5/83VFiXV1dvJHG6u6da1v+7b97AMMwWLp0aUoZpmmmjOPbA8bq1ZvuMpSC006TrJtu8smkSYhS6iknst0SFBYWZgASmmsdrefF+1jnaGgFYBRBzKU9+c6Q+D1v2jSfAKr4V+73Q064FRXzCN5+XgGG1QplN8VNX+Cep+D04yFzhH12ekFuoou1+VcJv7XW/PznP0+wSK01Y8eOTcgTl5/i7rvvRiybLqercfjIVSBzYf5ru7kW6NT30EMP0RGHDRmLFskWF6o1TJggv73ttky54gqkqKhg+KYagGEYn+8+YGNY5oHfsWwP467bLv4ZiCACZ//urERLTnbzeKxVgfiLcNSrTDM1aHPz2GH8O0uOBeDDJXD0iPj4XylQPicAjCs03QRNOBxmzJgxlJeXU15eztixY9106Sz+/fffB4duPO3O4U/B6Ufalm8YBpWVlYgIGzZs2FLVpMWA3/R9qe+ve9+V7C38DQ3F38GGIVtVWiMUFMDllzfOvfXWTEaNUmZeXnn266+/7j0hVurmaApjs4muxaaZW5fMIST3vu6R2d7fxPvphPx4QlxvggQp2xZ87P8cAdXvcMSZ8O/b4Kmp8NC1drJPvoLPp0BNTQ2lpaVp+3WblsTP5EaR3D97u1Bn+KjEw7GHMZkHh5/Un30OvQjDMGq908Bbiv6/Keufk5m9SmJS2f38Xf9sWqby++yRhfHFFxte3+pSFfYZ6tjKOPpo8R900OvRigqkuNj/ASB6ARQGcAWfMFzyuHblXBV7zluh2nzvxnXXHqv3NpYEAcYy2HVkuJ27UrDguzgdr38AxbFFGpVrYO8yOyBL5+IhUcEuXUkzd958o0aNwnROYPZ2aZ6uyuHJKeGDR2FY94cRkenpJdE2jrtmtL+4OH9VZo6fQEbsKYIIwy7YwyXcMAzjpTZLaGrjupO9MfVWQ4N5mP7MDtRcBj3W7Yyfnd8uJa7MBKLxS17BJPtuW1CxhuI2HBL6bRRQmLhk7eMvihKLU55GJDDtYfjww/TrH9p60pYMx8rnzJmDsuJ8O3S5bcehMW4BAPzhFPjlcXJ6Tk7OsC2qEAhHwmzU1dGcPD+5+X5y8gNk5fhcmsvO6PU8gHHiiWPbXt2Rj634ZOX7iUvME8jW1sK912HPeSePlbzWpxJ+AmIPx2L9ujuh4ggiSUhumkC/hAqefvbFxPocV593aDyjxIlWpEluwJjhcMQRh29z5GwYhnvOujuvoOIycEtP6o6cCP+FmyEYDH65JXWJaOTdYZJTECC3IEBuYYDcfD/ZeX4ys+1xeWlJ0a8AjP/856VNdxrO9nhe5TtP/RTQEk/6wANwySlxwr0Gl0Kkk98oBJSb5rHHnkhQtMQCnbUNiYYuAN1PRYgHUL89+qtEZTuJ0cRbm+KEw5vwGR5Ze7qEz5faeZ2gKhqNxtJseQNwZuH03LiivXU4AlDK5s3lN5k/bDqUUpscLzd/eHKWfHWYGDmZZOf6ySlwLD2m9BwfhgGtzSb3/vdujM0GCl6rFmzFhz0MKCAC0SgsnhZXlNtvJXtDD3MISPG4WDm24s8rXxlvEB702usm/u+lQltIDi0+O3JXStG0ckJCHm+j87Y6EWFwKVg6qYpY4qUrcOuQedC/dyaHHHJIysSJU1bytdmzZ7sKdypIiidd+hLYVIn0JAxbRdJO0GitOfAvu9YcP21Fq1nfzCcbhOxcP7n5AXIKAmTn+cnK8ZGZ5SOQaa9UeWzGQ/8wsrKyNh8eFsQJA2zv6DXjENx6K+zag0RFp3HLXiUIoDIHOhJE1k6IN/NYmjv+Bb2OhfOumsJ+R59JZX3CSVmgFFb1ePIz3GISRwdOxBar+9HHnk7lTyX+uYGiwJq34YOHPkEpxaBBg7AsK63yL774YgzDwFo5yla4F+IJ2JxRiCTRmtAwPX9A3Rw48MAD+wDc89LdxqDf9lu6+3lD5eib95eCguySzBwf5XP83FeXR1aOj5x8P7kFcdceyDTwZxgoICs7cIHfqcRVVJMjKA8hCvvJrklqR+go0NsXJ+okQf7JP0Q0369dT395MO6ZPVZwzRlgdgPLmsf0D+fFswsoNR6t4bvv4KOPINgIZ5bD/54OA3sRm/Xzu4K1gPPHLE9goaTQfvbubSjisToR8Fm21Yus4tXJAc67GhqabR78Pjj9Z7vyxPVLeOAcj1K94ku26tgPbx3OtQRdxMrQhg9GBav2H7UHb859lO6l+fh8Khak2RUoBVk5fnwBRXaeH59PIQLhoIU/oPD77UKzMjPseisq0nS7GmhOppREjjycvT8bpnseF3uZThxIt1GON19CfwzXPQJZqY+qtwgi9jMFFMyfB1PG21WeeSMcuhf06wW/HQ+P3hgX9j67wW790pOe0qiTriUPNBKJ8RiGR7mSJt8z3/XgsW96xctUMP1YhUSifL+mhT/VlmL4nNlCu6yikkyUYX8GMg0aasM01ERin/b3XgNy7BGxzwcp08kGcbcewh1GpXAU49qKTTO2JSHnXsKEC3EBOG41naBu/d9Sbplak0rjFsCd5BE44IC4a/3nJDBGwAu3Q1Mw0bo+XxxXOh66nVGHJF1P7o9d952UPk0bt9MbcP+S/rxWWYrfkFg9irzCGF0KlFIYmbYt9svWZOf5Y8pWbpdqxPRoWQIRjeg4744ulFK20uvrFQUFaSTeTBtNNplD2GUX0D4wPBFC2iFtkuvytnzPhyshpUC6n86KxR/Rf+g8thVugIn9aDUcgQkXQMUjUHFBYt2SktHz07HSJDftfO99AgT8kJcDGzbaS7OUgrxeWRx60a5k98zmxb2CSCjCmSuLQEG3YlwFxwNh5Sr+1dWtjMls4fyWfmRm25qe3G093zYLf5deiEA0bNHabGL4FJGQxjI1liU48bqI2Epfvlz0/vth0II7uknbJNuUIvTvByVHQt3MBLmlGn5S3+96ea8XSI4D/KU8etdJ3DR5XsoU7g+BAPT8C8EP78I40H7Y8s13ca/gWqejWA8jyTFiwjVP3/2X8+HKOxLr/c2/DrfvK8XAgIm/KAezXrisWwNPqB6eYE95LNwuUSl4W2XwtlVIfje7UdybX4svt4DdaSTLNNACG2sjtlfwQSSkiYQ0ZsRWvgBRM2qvRlq5MrsWBeRhu/R8z2d+Eqfp+uaYUPbd3XZVritJShqLOdx8Ctw5aKcxeGfxnMBOxczu83l7sS246y4PHb5c6DWR+k/gtGvh+dvh1KvjDe/7Gg/dSV2PSvPp3HfyX3GqHfyFFsTz1X3bSH63DPK7BdiQn020tgndGuY/3crI7xaI/WV4vtu/C5J+O983mKAjJqKFzJzYuDzfT0tTlJZGk2CzSThoEQlrohGNAppbgxMcul+rqODEtJIKEh+ipeuvk4K6intsZh33qBxXqZKSelxhcutIMebeE3Fa+8RHxqclMx3efBPGHQVX/CJNV9N7EiihMRTmqcm38sfb4IU7bMU/f5tN32nHeehJ6ruT+XB5TUYsYQjIHp6eztcegrEjiHvB2GfUgKzhRqPWutBJa+kIPiODN6oeKZq1/p83WuHsK3xKuwaz4psmGmrCKENhmZpQ0CLYZNLcEMXnV3zx0CLlB8jIyPgXRFKVLsSnWdtSuPMZu1ZRAWp4TPGQ1h0mlyfJXzytw/UEyp6ifetlOP4XqYITsZdEz/+kjDmPV5GpYfyJbcSVrnAVBVlZXDIOdh8Kx50L/7kDxjmKJ+5tSOqSkvlIjr7dOpz4AXvFTOAge2LIgZ7v8SKue7fLyTgAIK5wAJ9hT0iUl13QAFwZ++OKBYf2yfMXVWVkGkRCFqYpaFMIhyxCLRYiUNg9A5/hs/v044479lWYlirJZs/3fM9350GLQ2wBCQ9fJk60Fe/OSqUTjKvMpDqTG5K/2G39b75cwQePwaQYqVrDfffCkrnXU5Kb4T5YWLdoPD0L45aYMufgwr746JQczj85iJ4PxoE23f6D4ZflYHhGLenG25uvgwQvaX4K1zwMGT6YeD6g49lmfAHH7Gt3kb4DQGuttnSx6j0HfFQNqOsWjq5Zv7q1pLG6FcuyAzszKuQW+gkGoyMgtsL41VenhFJKaSbep3sV7n34kkt8WFeAO0+vNZSW+hYbIzx9vHfY4MjGK0SvGXlMSYrPRBs+zDUTOOEQ+0Y/H4wfAxUnw4YZuAp3shUXeUINT3/sPs4N9I7dsycwBu1xhH1fcB+QWJ+CYXroayOQa7MOldDrJeS77SJb4cpjEADHnQ+PTAPfAYSALVa4F7fuN710l/3y9s8tDCBaiEY0hSUZaK1Z+Pdv50HsybVpmqm589oo1Wvdybf88OhkVV1VJWVOv+A7gKYjDiBv9iPxMaNKEqDXUpIXT1B3P8qKP2JXCs45gRTLcmcERfDruNCT4wMRoPtvYt/txQ6RYDjRYj00JIw0kkYe6RSawI83j5e3NHX4Yqt4LrnJXw7mm6nS3XI8OPr9hW8tmOa74bmrooZPGXV1jfOWPVo5wrnvB8jKytIVFVtYYh4pbyCIwDPPqNply6zSiRNTWmf+Z9/mZqjhLeEV02FgYdytpwRySa5dgb22zBGac9uxIlfx8cLmff4lI3rEK3e7FE8Z4suPDRPtu6ENH7iJ0wWXydfa6pES6nDYSJM4ubzXPgVBaa0tX3u9e3D8AWPbfC3KrWGLx79JND33nGoYP95S330npW0R3NLSEgHULscZ5WoErG7A7dPjK2DSBEiORZD4PcFtZu4Wf3IlwvAe/0npWr1z+XZeSbDenx9lprhi7yIP74OipJ5qE3WkerJ0dbQK/OxihYi0m8I3B7eWLVwQ4mLKFBW58UZLLVki3bZ0EyCt9ZuA2qU8cKwxAiqeiNVN0qdHsir5tySmV8W/dH9HqiZ440YX3gjcneyIlTt91gcJedy25lGid4nTsRfDqAu2pI7EstLVsSECOSNAJMUfdChcpW9JIxOBN95QesIEUZ99Jpk/dMenaDQ6HVCDRzzp8x2ktBoOtSESHie6wZPnt7tuXTyeychiY2sQqR5PpmdhRJzoeBkJ3wHxGRy721spmdxHnkn5AKY/CBefgT0sTQo+E+pI6egTfx71Byg5nCCp7afD4VZ4/fXIpvbqmT0b8403IoGOelk+Ly+vb0tLyypEjH/eA6ePIsGHtiUZT8yU+MPjS903XBWIrwzV80I2NDfTbeOdqf44Vle6AEx50ghgDIeD94GPHovnT8hDatnzV8KIcZCbmzu0paVlWTuJb6vgrjutrEQPHpz6kuinn6KnTtU+wzDoyN0RmpubVxMLPHSPZw3/Qb/9SGt9kKHg1QfhxINBOWPapMjdhdNveqPnpL5jVfhIileMp1smiYryFJvumbbXNzt1yDzQAfDta1979FY4pxyU6bF0BV9Wwv6/sBd/FBQUnAqN/3HeX9sRcFk76CB/05gxpjtQ+/JLePHFLZ8c6EhorTEM417gMufa1RfBLRfYAnZIdOXsdAne7sFpBJvwHuL5otpoWAJ83wAXT4TX5zgNROmCgoJf7r///v+dNWvWY4Zh/DZGs+H3+6cPGDDgj19//fU3HWk0WwOXnZwcPr/qKvZZtgz91FOWr6N3aNxWRCIRo6ioaEg0Gr02Go2ege21DKUgww97DoYTRsIhB8DBe0JRNxA/qChgghmFkAmtUWgNw8YWaGyCxSvhky/g4/mw5Pv4rhdK4TyevPG0006777nnnmvs7DJqC67S8/Jyr/vDH/5w+5133rVzHTIWw7XXXuMPh2+PFqSZNNpWPPCAb+G6dZH9d1YlJ2O7R47tDdM0OftsvwzZuhezNgkRmDFDmTNnRgLbshNlZ8VOrfRRo7COPjr9DiUi9m5SX36p9FtvCZdfvvkdr2tr4eGH/fubprmw/antPNjplK61ZuhQf81ZZ+kSp6+tqUFPmaKa/f5+l7744n+ePeCAA7XjirOzee/qq2nznHPDgNtvz7izsTF4dUdt4dXZsFMpPSsra0pWVmCuzxe4Z/XqqmB2dvYm0/fvT82551KS7t4jj/iXVVVFh27pu2k/JvxoOT7qKOTII+O/ReDTT2H48Bt8N910804ZrLYXfpRKP/dcpH/shLTWVnjoofxjGhub3t2xVHWhw3D11cjEiUh2tvFER+zX0oVOBK01Q4YYa15/fev3WPip4Ufp3ruw/SDaQhn2SEmvHJ8hG2Z/ICp/uGhDS/0XSNRslGh4llj+q7N/XrVkB5P7k8WOf5jShZ0OEvrONZ4LrAAAIABJREFUkMVHN8nSX4gsOkREx/ZOXvtMAd2OHa7KLoBosyEqw8DIKhKVscRr5CJC06Pdwo2PdFvX8NfMX+0wRn5C6DL0LmwV9NcH96f+JYteV+WhDJSRAVWT7InMnr8LSaQO2bgAHVoLWpBoayRz7HdXJxRiRcHnywDpYWRnPL/x/hwrsmJ6V1vsQPw0Zpq70G5QWUP2kcwhKKsRMTegRNB1U+pFR/PBFzQX/kzjKzGwLNvQVd5Iw0h8TNn0RHEPVADRGrRGLDFanv/Za8DYHcPVjx9dXrQLWwcj9yskiqgMUD5EC4qcPD1vv6uUYWgJrvxQQutABNFaZ435OmEXCa01CrUILcT/NBIO3bKjWPopoMvQu7B1GPB/K1XTeya+XJS/1O61tSBk3mHOHjTOyD/4eMK1iAZEGSLxN3611jRPLngIw1/s7IUj2nYIRTdGO+WRtD8WdBl6F7YKyvAj+SOzVf0UJGMXezG5E4Kr/Bd17cx3JLThXbQF2sIwMjWAjjQTfKZko8rIvEi0a+CgNbrJf3hnWGT/Y0bX47Uu/CBosxkWjxWJhpBoFDEtxLTAtNCxTzEtdMuGazHybsPUbpqEv6A5tdu10ZN2ND8/dnQZehe2CeZHA45GFc9Ia8SOwUfT39PN1tPF14Z/t6N5+Cmgy9C70C6Izij7jaiCZ+KGrBHTBFPbhm55DD5krje67doz/7ef7WiyfzLoMvQutBtEayP6xRVj9Or/vibeED5qIaHWL8gePCL/N59HdjSdXehCF34gRAQt0v/LYGu0KhKR6mhEvl8flurfV8mi7otkQc6CI3Y0jT9ldE11dqG9UFB3S+1fe1TjN5RCCfiKFK33d6f6yz5k+gOzl4xakrWjifypomtlXBe2GSKSYa4zDwj0C4zzr7JQqzTNB/rROYoMpehbnMnSyv4sikRadLH2Gb6u/mV7o8vQu7BNEBEE+ldVhmZ27+UcOQ95801ECZEihd7Lz+CMDMJg3Fq5fpW29IAuY9++6JqM68I2QUSKvmlunV+c6R/sCws5n0ZRKEQLVoNFeGmY0OIQkXURwk1Rpt9TwOpucvNdu/W9cUfT/lNCl6F34QdDRDKq1oV+K92MyZmx41M0EJgXwZrSQtObTYSWhTCDJpZYaDQW9mfGXhn5w78e3ry5OrrQPugy9C78IIiIgdC/9ubaFfyxEMlRWCI0a83KSITVpkkwbHHIgWsxqs0EI7ewsJTFEZEjlBHoCuG3B7oMvQs/CFrrgnWXrPsg99jcYb4CH5YhVB/iZ3kkzHrLIixCVISI1kTB/m5Aj5ea2OeG9ahak6g/Wn1U61FlXcbe8egy9C5sNUTEX/dh8xjfN+aUwKAAYgrhL8I0PttIU8jk04+7E9ViG7jWREVcY3f/YvfMevP2yXsPvHYHs/SjR5ehd2GrICKISK/vzMiaQuUjY0EUc04rG5/eSPirMGLZO3JqNNEizex3e9DcxyCKTjD2CGDGnMA+3+qyP44eUL1DGfuRo8vQu7BVEJH/Z+/M46Qo7v7/ru6eY+8DWJYbDxBFQRHQoIJ4a7w1kMTz8Y4+MY/xZ+KNSIwxeYyP5vF4YoxJjMb7iuKNCoiACoogl8h9CLsse83O9FH1+6OnZ3pmZ3aXcxX6w2uZ7urq+lZV96e+36r6VnV0SUPLW2WFodEG0OA4rLAsii6soeSlFgQCmfzn9ckzjoWDYzhsPjDEN2cUs+zkErb0Mfh7dS9hBEtVdxoCogfoMKSUmrnEHNj8evNCLiuhOapYaVlssu1Un9xuUYzcaz3SzEN0bzDOdy6RmMJccWr81L20SED2nYGgVgN0CEopBKK09ne1n4X2CaG/HIPTvqXlkzgOpKbWnChMXVfN+5t68E5dbzaeGU19rFqhkMiMc++fprT+LxW+dHanFXA3R6DRA3QIUkqjZkLNxaG+oUdFkaDp+Saa3m5CNbmktfoKPp1XhW3L1gNvqcE3hakUff9ex5CJG5CJtF73jnqd3ysy/J/DgxVuOxgB0QO0i+QAXNUqy/q2QAgSN2+m5dF6nHrH9YJDpX4lEqdIMm1WFbEqDZMcpPeNumc3CnGlzDf79IvoRmBs7kgERA/QLpRS4WXftvy7qNI4ocFxWG/bxKSk8G/NdP9FfTqeR3ScrH65Q/1QnZl/qqJ2YBjbVlj4ptykzGgQ1Lr4P985Yv8LOq3AuyECogdoE5423/SrTd82TCpjo2PT7M2NJ6fJLBQDTq+laGoiD9HbG4xzAIfawSFWjixm+ZElDPgyXnHz/4za0tnl310QED1Am1BSRVces3Kx0cXoG5sWw9lks2ZpTxLdhEt0vymePO7yXDMDr96Eo1ROsrfVEPh/f2z/WGiBCb9DEBA9QJtYMqP+APGTjQvkZonT6GRck9015i/ojqWnTfDsPrepIPxNgsN/tBp9rdUhgnvX4uH4jEvMS47onJLvXgiIHiAvpFR8Ho8pW0Hohs1EH3IXm/kH3wAEApDMfa2CmsPCWMpHeHzEz/J712ptDnhwPf3+XUt4bQIblaK7R/p4l5YfXFF75czOqoPdBQHRA+TFnI2NP6VIezI9aKbQ32yh27gasl+dbI2scFj+s2I+/00XHFNmDLylyO4P8zcKpEfkTQHz9t5H6CJ4VbcHQe0FyAnpSBaOXq7ib1W7I+RJAqZWo6Hof8ImCmbZeUfbc5njSndYe0wBn1xfxbdDC9xGIN+il2QjIBvtVfOGDOoXCla5bTMCogfIia9KvrpZNIq7AMzxBWz5S5fchEyG9Zy0hR73NeC00wfP5wLb3kBdS0XLsdfWXTulc2vl+4uA6AFaQdpSWxxa7CS74CmoIsXy9b2xfX1uE7A9Mzxpgps69L9rM/3/WIu9FWRuK0wJxWWJy4Qe1jujSr73CIjeDqSUVFRUHN6jR8OHAwcSnjpVn15f79xr2/arSikMw5CdnccdjS9HL3tCn5o4X2S9Ht4gHED8YJ0FH3bDdvJ4vPm0v6kgYSi6vdvIodevxlhvtnJ97chIfILEup/Hf95LjwZk31oERN9KSCkZOHBA16amFR9deqkz0DBACHAcWLhQ8OGH2qJNm5x7RowY/uzHH8+MO44jI5FIZ2e7w2hZk9CWVDiWQmn65TUYT8VaxfET3ht4W3R/GWvOK0xvOJGaXsvj/po6d48TukI1O0Q2mJQta6JwRQvhzQlQkubyEFv6R/n2gFJQnPzJ6IPe3LW18v1HQPTthJSSY44Za8yePePvF1xg/bhHj8wVgSpp/uo6LF0qmDpVbFy5Uv5h2LBD/jF79ic1lmXJgoKCzsh6Tsxd1zhNK9OOVIANmEohV9l0GbwuVRY/8mlhu1wx6/HurD+iANtSGQN62fPtZp5GwM6dRTm9tEeosnvxbmdJ7UwERN8JkFJSXFx89ejRLQ/+4AcqJ0H8UApCIVi5UvDhh8SWLFH/O2TIQQ9+9tmcNUopGQ6Hd0m+14xfY2xRtuU8XoVC5VyQEn4rTq9xtZCcR++Iye3FW8xi5vE5q4vXM2+0QhREifToQfGQIRQdfDB6jx4k4nEs4XrdZbjnKAUKnIhGaHnjks+PHLKfEcptwicSCQAtHA7L0047bd/JkydXRqPR8lgs1nTqqac26Lr+1fPPP4+U8ntlbW0PAqLvAti2TSgUGjl4MB+NH68Mx2n/Hj+UAsOADRsEM2aI2BdfqKeGDx9+10cfzVgVCoV2iGaTpmRh8cIvhCmGCARyWIimaT1ye7uR1MqOYvDobwktNFNk19C4jMuYwxwAhBBcddVVXHvttQwcOBAhBEIIVLL1E0IgpaS5uZl3332XSZMm8fnnn6OUQkPjaq7mAA5oPTjXS+1/w9obFkkpKS0tvaq5uflPSikDoLCwkOuuu45TTz2VoUOHEo1GU/KklCxZsoT333+fBx54gKVLl6auhUKhpxKJxHnabrjTzR5D9Hg87h1qAOFwGE3T5MqVy7X6+nqtpaVFu/jiS4xVq1Zpzc3NRjKe1qVLl2i3bl0rCwsLK0Mho3L27M9KlVKF4bBeXF5eWlZSUlS4du3qUk2jUggqhaBc1yksLtaNwkIRLirSwgUFmuE4ZjgUwigowCgoQItGJYMH75iyKQWaBo2NMGOGFp83T0zu12+/SXfcMWHeueeOk6IDziZv8mZhX/o2AxlLT0Gx+fFKGs8uTJvfWQNvy7/4gruPPRaAO++8k1tvvRVwSezmT6XI3davP653vHnzZgYOHMjmzZupooqbuMklOoobuIERI0YwZcoUioqKtlmGl1cpJbfeeit33303ACUlJWc0Nja+uiOeUWdjjyC6k1Sh1dXV4ZaW5mt797auHzvWqu7Rwx1E292drryug1Lw0UfC/uILbbphdL2rvLzig0WLFkkgvHBL01J5dW1vLTn45h9wS59Dw48K+ObRchwF1x18MJtWruSuu+7ipptuapdoHrLP/cgXVwiBZVmUl5cTi8X4f/w/hpwwhHNfPtdrtNtMp6My/L//+te/OO+88wCabrnllrK77rrrezsusJu/4h2DbdtomkZhYeGFVVXWxLFjnf777gu2vfs3ApDs/iatgjlzBPXLDuJHC67gWI7NjJecWD+f85nDHKZOncpRRx2VV1u2L7d13HzaNju8qqqKmpoa5s9fSM+e3SkqKsIwjFaE3x4Z3nF9fT3l5eUAMaCoQ4X7jmEPeI23D5Zl8c9/PsF//dd1JxUXt9w/apQ18OCDFaa5cxsBP/mEcAfrpIR4HGpqYNMmqK93f2NN0Kcr9OkOVdVw0ADo1xUqu0OPMqhMvppKgpDuryOhKQZrNoI87ltEYUlKtpMyyxXav2NU/NT1bRcIBjGIESNGMGvWLCBTO3ZUo+ciX1v35JOxZcsWKioqOPLII3nllVcoKSnBMIxUPL+8tmR1tBz3338/1113HZqmjZFSTt2e57urERB9O+A4Dm+8MZlx48YP69o1fF/fvltG19ZCbS3EYmBZYJnu1NreveH80+Hc46B3NRQZSZNagfC/h8mvkQpB2jHNO1eknpjAd578FSp5j0gHe/enbkqeJyzYuBkWfAOfLoRjn3+L4kVHQtSdVsvwbVeKuo0b+cmAAbz11lscf/zxbnJtmMep4iTjTJw4kYkTJ7YZNxKJsG7dOiorK3MSMB/C4TC6rlNXV0c0GkUIwdtvv81JJ53U5r3jxo3j6aef3qpyWJZFJBJB1/WHWlpartlVMyLbi4DoHYBlWYwbNy789ttvXxyLxf4AlLq7okJVJfz0NLjhP6C6FFJzQsJHxqzzFDeTte/jbwoeif2ETR2r1vdmxxfJi0r40k5eUAoSJmysg/nLXKIvWQEb6zX+9PFCnAM16mf3wEp6vQld59iiIl555RVOO+20Ns3cbDz00ENcc801W1PdgNuIeiP07clIeijSt29f7rnnHsaPH79Vsvr3788333wDkFOGv9EBME2TSCRCNBr9RTwef2CrhHUSRDgs6nU9+vOmpqZ/6Pqe7Vpo2zYDBgzovXbt2sdN0zwOXEKNHAx3Xw9jhoHYSpM9m8TKp62zyajUVqSdpc0zGgO/RZBsWPA1DCjXdG9ogtUbYdEKWLLSNePrGqAxDn+c8RVCClREcpAcSu+evVi+fLkrI4/pmy6je+2JJ57gwgsvbJX1Xr168Ze//AUpc49tefeffPLJ7crwn3dkWuzJJ5+krKwsZzlOOeWUdmV45++++y7HH388V199tf7QQw995wfpRO/e3V669NJNZ3qmoa7D/Pnw0kvi+X322e+8hQsXmh0dXPm+IJFI0NTURJ8+fX7c0tLyqFKqWAj4wUHwxO9hr65pEnoaEDK1qB8Z5GrFbFKEFGRpWN/1Bd8eQFXhV3Qt8VnY2aa6/55kYLaV0CpyrmPh9tNtB5pbYNMWWLcJ1m6EDbVQswW2NEJTC+ib+/H3mSupq6ujvLy8XRM3lcWkls2OP3ny5LxpZDcew4cPp6qqqk05fo0/ePBgVnyzkLoZ8Lu/w6T/dRs0gBEjRjBp0iRs287bHz/mmGPI5UCTb+AuEomglJpp2/YPOlQpnQhRXd397Cuv/PaF9rgci8Gzz4qNDQ3l4zdtqvnAtu2clfJdhFKK6urqqo0bN36slNob4Kxj4Z/3QIGnRT0CFAzjb682c9Hxi9G8aSnSJM71m5bTTrwc2tc7Ub0mIXC/PvrVR3cwdJ/MxiYlgxzhXhr+froPQmTFId14SQmmDbE4NDa7BN/c4P5uaYTH/w0ffSFSpnSuwTVXRuvBr169erF+/fpU+NNPP01xcXG793nhxx13HKFQKG/cbMI+9dRTnHfeeag5gMzq1mjw0nQ471cQT4Cu69x4442MGjUqleZJJ52UYRW01z25/PLLeeyxx4jFYnpBQcF3WquLgoKCnrfc0rLWzuNY3BYMA6ZMEcyaFf3vk0466ZZnnnnO9EY9Oxu9evUqX7du3UKlVLWhw8sPwimHprUq5FF2PSa5c8YCnn7+34wf9UmruP7BslZpqA7IIOslJAQ9b814kZ5/aTLnHDaztSb3p5urrw6s2QS9u7bOY6txAAHaodCjK6ye7PoU3PEY3P0XmPJ/0K8afnwTfLpQpHwR8jmdZPfbvXCPOEII3njjDezki5bPmcVDaWkpRxxxRLsy/LLmz5/PkCFDsD4DXbZdVyiI6zByPMxf6oY/8cQT3rx53kbF37jcf//9/PKXv5TxeFz/ris9rampaUNLC9vUGtk2jB6tuOGGlv930EEvJX7zG0PdcQfqqqtQVVXG3D59+uzrbK2/5zbCtm10Xb8C9zmqy85aW+d8oqrVp2DOgh8e6sZL9Vs99ijfi6AAX1WMP+dUnJ53UtfinguVJosXX/nCUlqzLRleBL/5XXZ8q/Kce9YpvLnguIxGQiS1sScv4333yeo9dCL0nsTDL6anfP15AVCGS3KA/7keNAEn/twl+a2XwpD9oHcVjPmBV960tm3LESX7XErJoEGDUEoxffr0jAG27F8PRxxxBEcckXtPyHzdSCEEU6ZMQQjQab+uhICoA/OeAvUJyE9gTK8LKIhqaJrG4MGDU2MIufIrhGDu3LkAmpTyO+8zKwDOOQd10EE7T4hSron4+uvEFiwI3zBy5GGPfPjhh3J7fYqVUhQUFFwYj8f/rgmY8xIM6UW6T+wzbVOEye5zK5/WKzwUyk9PXhMpLWBKCevvIOTF86W71TJ8+U9p2l6TcBuYTPWslOLFp+7knKOdzP51OmY6vpdgqA+q6+WpfDmaxvp5t9K7whdVS5P8uXvcvL7xMTz+Ctx0MRwyyA370XGgQqAfDPPnz2ewz2e3LbM21znAxo0befbZZ9l7770zNKSUkn322YdBgwblTacjMkpKSigINbPxHVIDkG3WFZkNtddFQ4Gtw4Fnw+IVMGjQIBYsWNDKrE92K6Y4jpPpWfQdhDBNk6FDo9b48XLn2tzee+yrYE2DJUsEb7wRerOgoOtFy5ev2NiRecn9998/vHDhwnog+uqDcOrIrL4yrc3jtvrOqd+ebj/Zn4Brxguefe4lxh0xd/tlZPXrURqq54T0S5siuZvulKkzGTtgcqsptFZTZ14eqm9E6YVuOSDpuAqfTLmdEYPc+/Y9E75ZC8/+LvlolGui79cf7rwynb8Tj4CSAjhoPCxeaWCa7ifRck2v5Qr3ruUyv7342XGy43VUxvTp0xk9ejRbZkGZ3sG68j0/r+rxnqdIWkHC3aCy8kh34PIf//gH559/Pk888QQXXXQRDz/8sP6zn/1sl/XPj/yvI7purN+wVtO0FFG8MjTHYx+sfnLt2Fz3CYCystAr111nnb7Tc9mQ/I0k/3JAKbev+MwzomnlSmN8S0t8sm/ab19gaSQELbNwvbx8BMruk/lVaOoh+zWjr6+NAtHrDkBrTV5gc1MTlU2/T1n22yzDr0kEqLLTEEUj3OipNibpbCoE875ayJDyf6XNfzJlpbSQd9Jroj8nKVmmALHudmpj0HM0HLo//Co583Xh7a4DzXP3uKT3j8yPS/Yq9BEwZOhQ5s6dm7eP7GY5P6nzHfvjt4V893jz2n+8Cf7r7MxHkLeu8snIvierJfh/D8If/+aehsPhs0zTfLntFHcMCk8uLN27f59670VyHIXympes9zBuxTcs/8eKHkKkp8uTK7nKXsozpZkfubtpHUMCl/Q5ZArhDvKdd54qvvlm6/VJk3R1xx1urzYaZqmcA/GPSTmmpB6cSBsM/tZciDTRlC9uRvYVUDAcpTTvcmaeso63WYYvLJXvopFpGcILTx0wc9q0jAZCeBkQSa3jNXLgmu3KJYBSbiZdcijCSvH05AjjfuFGve0y995IyCX5kAFpLef1bzUNmpJ+A85n8MUXX7DPPvsk01cp4nWUtLn65/742eMAHZFRU1NDJBLh1qvgunPSz6LdulLusdeueY/Ia1BdGaRMeQ/3/qfbn7/jajBN86VIJHJ7q4LuQNiOzYD/2NsafED/+oIinYJinYJig6KSEAXFBpGohh7yaTgBhQUF1ftfNkjd+rebUla6BnDUUaOm+2YxOgYBtOAStpGOEV8jkzXNQFP+6ErB3/4Gd9wBW2ZCywxXi0P6oXl/+I9Jn2dn2Xu4qZFz72FWnoHXL/aIIlCpNN99/S1wtlOGSpdLKVdjCiUzy4GvgdZ1LvrhmlSdpRoJv0x/ml1+4msoVLLRSRILQdwp5+Mv3JB48sPEUz51f88/OfczeGNqMl/SfcHtluVompYaPc8emMvl0+6ROB9ps+/P18fPPj/rrLPo1q0b37wNky711XtH6grfq5j9HLOeMWSSH2DCJSA/hdLCxERA9ejRY4cOyEkpeeQPPb848Mr9VXWfIqOg2KCwxKCoNERRiUFhqUFRqUFhkvDRQh3dcHMpHYWuC16e9YL13LRnNEgS/cUXX/06vVx7K1AAlOJWRmPWXy7iZzcmyRcoZdJnYdIkaNzivmBlRhbJciCjJcenZX0POCW31bnMaM0huTBTgIXkrCO/2AEykvd4/5WfnpLhRfDMS6VprJlzCxEfyXN1FzLKrxen5AmfqvLWl5cVbsaRrlOUV84V69zfqspW2UyVr8VKn6/6N9TOcP3LS0tLffFyL1zJRdL2fMvzecN54a+//jqaplG/7mXkJ9C/kgwF0qG68oJVJuGzo6eeB62fuQC+fRtefRjWr1/vRCKRA/MWqoOQUtIyY9AyZ9Yw9R+Hlg3pu08RBUU+kpcaFJaGKCxxwwpKDJfoRS7ZQ2G3vXFsha4JJvzr5k2OdFyim6bJN9+IbR9QKAWyh/I8wjfgam0FZI+z+Rt0H9mVgjvvhJ7dYNM7mRWc0R/Fd82fpEo/aOEze1NpZMcvHJ4RzzVf3dXYlpQ0L0+PuHdEBn3v5rHXIqkbUkrWn4YCUXxYOi3PpFUwa+48WHsrfbr68uvTLBkDTV4dhPqm8uS+iO5qM898bTJNzj3OZazts0zKk/4rcTNtGXn163Hx3x/4yiagMgzqU1j9bhOa5k5H+VezZZvofs2eHe4d+3/96fi1+7Bhw9A0jb/efxryE5jyYLJ+VNpq6lBdec/DM+d9xxndMV8afhneuVffpw2Hz1+ARCLxZZcuXfZmK2E7Nvtd0/voY28erJyPD1bhcPHeQhfEhCBS5BLZJbpL8qLkX2Gx4ZryRTrRQp1IgU64QMMIa4Ai3uJQUhytHHvLYYNSu6189RXbZ3oUAp7TU3ZLKsmt6bNb2iTZX3zRnY5b9XomgbwWWEHqKaRGT32aL2W2ZTNaZN7r9d2oOA1Iv3gqefz3J58ltOEOyiNty1C4+U3JshNcduUt0PNOnngjlC6myOARStnJYTfXXJ2/aAmNK27nsO7Pt248fHluFQ6ILuN9pkLGIVLTaFx2F5qEkuTUeii5e+2Rw9zz92ZnNaS+Y02DuJPZLgOUGy7h5Scw89XD0TQNIQSXXHJJhsZua+47X5hSigULFlBRUYEQgopSjamPzEV9Ci/+rvWz3aq6ypbZKhOZ6YistPx9f+9vaD+46CzYvHnzMsuyslNMQUrJuLt+VNj/gj53j/j1oPqjbj9Ijb1zmOretfJ9K6xz7Dvwy+lx3lnawq+WFhBNkrigxDXTi1Mme1KTFxpECnQiBRrhqE444mp1zXAz1txo02TWv5UqY69epR9edlnDaCFwiZl0EnE3VPL9ieRvRicnC02kp9PaMJ9aVa4CocGE/4bf3QC/Ht+OnBzIFT17ZD7juhLQ646UbaaARx5+mKvOWJ9XbLYMKWDQePjpecmiJF8Kx4GWFti8GVauhDVrYOO3UBKFbt3KOXzkIHp0D3Fg12kcdQSUGKAr3L476bQ6VOhed/psUa+wsKm+gZKme4kmm/E358DJV8AvfgxHHJyeWtM0+Ndd6frJLqOmwdlj28+PUiD1pK/5w+4qOfd+jd69ezN69GgGDhxIeXk5zc3N1NbWMm3aNJYuXUpdXV2qgThwX3jhARjQLY/MrXwvtum+PHEzNLtfERmgHQyGYfxkzA2jXlu6bslNRZGia0NGqNjbocdLr6p3Id6z8oenGxNBZXUEXRdounAJHNHQdFcJOZbCTDgkWiSJFsf9iznEYzbxmEM85mAlJEopuvUqlKlsVlV1++XPfrbp3jYfZAvgNVbZ5G3Frhxx8t3nq1Ah4P/+BlOfhr3LaE1MX/Legb/PlCtLKteN3sWikcz8ui+rvnqRHx0nM82+rZGhweDzYCtXSOaFvwsrhEs0pdzNJlatgnVrYc1aiDWDjsaIYQMYOqw3R4zszz79KqjqVsjiaXcyYn9aefTpI9wyePPo730C//cC3PwfcPB+ZDwPf+N4xjEQ0lo3BNkNaOpckfatT9Zj6pqXvnSPF6+F/fqQUhDbIsMLz/UOZ4Rn5as9GelE0u8BCtBgaUOUSTN7saSuAM+fRgjQBbx7rO3mz3ZQCZOxs0vRgcJSg3BES1s02dYGAPvkAAAgAElEQVRCMhMVVRE0AUITKdM8FNaQCsy4R2zvL0nwZjsjXCmo7luQLoau6wdMmOAs2OppNou09m9Vc75wfNfyheO+0FOmwG+ug1H7peOJrPRythXZsn2y8jXkOdshkbsI7crQBYf8h+Kss3II6iRUFMG1Y0l1eYSA2jh0PRIqS+Hhm9x4l9zpOoQ8d0+6GyLIfPnDBpw+prU2888qeDJSg4K+vKSqLut6RjdIZR53pgwEWAgmr+nC08uqWN8SIZRc6ZTu4vh21vNl5PUjbPTCCGgaMpZANsWRsRhnfd2NolIjVamttblIpV1SEUIIgVSKaKHumulRHSkViZYkuZuziZ55joIe/QvTWVNKaRMmCEvTtrGvbuPOj/td2/MRnBznPkydBi1x+OivmQ/BD38Fedf9D9GL1IaY3D0LX0C2ZmhXRtEYqDyew0+7lVNOySN0F6OiCH4+tjUZpn0FYy6Efj3g99e65Rp/k1u2F/8AltP68QGcdVzSl5zc9UCOe3Ioxpztvf9iKs5WyvBPt0LrVw5AaIAQWLX7oKRAWTaXraqkUQnf6LrIlJdBSjc0YzwjI57gsu4tHNfVjaRMC9li8mhtAVMThRhhLTnY6N7iINCTWt0nmnBURw9pxJttV5tHNIyQ65ptWxKzRZLwafYM8idNeaWg516FmXV8ySWovn3JDRuIk3ZyyfukaP/J5kMyjqa7Wv2nZ8Blp+TXxN6D9bfM2eFeXO8gw/TyJZxN9lxptSuj190gTQjpHHnmrZxwQjvl3QXwNLr/EXyzXrB3D8XKzbDXCWDo8NRdbnkuuM1dtvp/N0NFaesyFkXhlCOy6iO7Xsj/DHK9NrnMZ7LTTt7U4yT4trZjZQ8XGZz5v8OztKagq+Fw314JtEgI5UhX4zbHeaMhzHNORaYSySBxVn/aHy+Zdut7vHii1T23lNTRN+pGkKaNajH5MBHmeb2bGyZB1wUNm03CEXc0XdPdinYc5ZI97mDGff30liTpYw6W6fbRe/QvSvu3Syk5ZrQu+1YpLWNo2Pv1jjXfMVnxvELkVJUdOPfy4sCRR8I9f4biQvjx0ZlJ+sWmiOurfH+473llLEbJeJl8CXojqipHWm3KUBpCma7Zb0umv3wXR597C8cc07p8uwqpGQkyq3rv4b8FafLJ7Ddw5swkNBzG3QgP3ABPTIJ/vQVX/haqKuBPv/YSc8sbi4PS0uf5xi/89QV5Hr3vAWU3vqk0s2RseCsd9/OVcPylUFufo/ACLvjnqAyt66XbQgjlxFC2hpKeCQAzSyopEUaG5s4etc+45k83S0Yui8Afz4xL+kQFWlHU7dA3J5C2ZEyiidcKq1PxG7dYSKmINdkYIc21RgDlgG1LbFNiJtw/y3T/bEti265GLioN0dwS+yZDWXbvHv7kZz8zh+eotvbhTZ3la7L9Bc6l2XOFC7jjXhi6H8z9J5mVno3sViD7sspqgXPFV+TcY61DMorGoEqPzYwe0Tnhx7dw5JFt5Hs7oZTrMrxqFbzzDnQvhjuuhRMPBxK56syAHmmvTSXgxScnohc6nJXc2u35e9zfH/3aLfL44+HsY9JplRTCSaNoG7nqjjxh2eEdRZYMacBPb4Jn3kiHRUsMDhnXF6RCSlDS9RGXjiQRl4RbLOrioOI20pZcfobivJMVBgoDBwOJkcyoSP26fyY687/VOf8WnR4HFj8w+IroO12ivSpB9RRC7aNQvaN68b4WLb1DIhLWhKbhrtDQENDUYHOntZawAGyHGkvxx/L+6dUWAr5d1YySsPnbBLoh0DQ3F0oqpKPc79qZEivhjrIn4q6Wty2X6N37FBLWi7plVK9hGBNvucW+vUNTOh5s0oNx/gfg1UgO8rZrwvtRCn//OyxfDotfh4HdW4tKJdmWOZ593a89/PGy7M9WGjGfjJ6TSK8ySMPWdc685FZGjuxwiTPg9TlDIZg9G6ZPh7OOhhuvgH26g3CyNGoeGzk1Ol1yIqr4iFaF/svDf+CyM5q492m44V439H9/5XrMeX33A/eB2y93r517Iu54TC4Z2XXVVl2SvpZxS3vlaEcGCl6cCef+vP061oTr7VfmjZHlkZGRfwElyRVt77//vhg7dmz7gnLAlhYCtKe/uVv7vO69czXN+LuhhcIpo0DAV7Pr0DSo/TaB5llAynV1tW2FY6W1uZWQ2JZbYV17Rmlqaf78s98vPSSD0qNGHX74iSfO/LgtN9MMeN5s+TR3AWmPuey59WwI3G6BzLpeki7wb37jbqG8ZDIMqEpWerLmveOURs4iZHY/MvsFyW4DMhqC9mQoDXpNSMrwrbMWgk/euY0hB8BPJsEhh+SuRi9vsRi89x5s2lDC9dcezcU/GU6x7hqJQhPUL72NskIy+r7pRHwvp8osi/DXea/fpBskkVkvX396OwN6upfqLKg+yu2v6xrMexUmPADPv+1eP/s4eOEe0ls25bLkssqXr9zbVI6tkKF06H6MuxeeByFg2pMwakAHZPi7GAJ6nggbaqC6unrwhg0bvmotdfuhlOKGuUeOK9BLnhFCMOf9TQgNatbFU9elBGkr14S3FLYpU2XuUh0hYVrm7Hu+iui63tpgmjAB1a5GjwNJRwg0XEK3tYGsp/VzmfA6rb994fd9L6UV/vQnd+/0X1wE9/2c3C1ujhYeMl8GLzy7P9/KolSk+pw5ZRQfjSo5JnVnTUMj6+b9N0P3ScuIKbj491BWBm+8Af2r4Le/hNFDAROEKED1uBGR/Ug8MmqC5uW3URTO/VJn5D+rIU3PXIQQPW+lFUuSMp7613P8dMyXraad4hqEFan5eAW8Mh3OOCKXDDK7SDny6K+/bSvHzpcxYykc8RP490Pww5GwqRm6j3GvVVRUdK+rq9vYWvrOww1zj7z3s8lNv9Q0dyCuZl08Zb5L33LtaJHrHdcYa1618MEV/VLl8idmmibXXhtxqqvVjt0ax6/585G7A3jkEe2b4uK9D/nqq68aQqEQkUjkONM03wFY9Drs192n5cnx0JN5SJFX+LS17+XJGE3259u7niVjU9F1vPLcC5w5ehVdi7dRRsU4KMi9JkIBK9auo794JKMaveMMyyW74L7rovREVNGo1o1JMurUj2cxpv/rGYN4Wy0DX/1kacRWGvo7KuOtT+Hkq+Cff4QJf4RlawCYqZT6QWfviDzoF917VxZ1Ww1Ja0UqpALlKKRUOLZk8drlRS0vmjH/fRm5bmlp4ZRTCtWYMVnN3PbAI7mB6w/fASjlOs48/rjY2NhYObSmpmZDWxVs2zbhcPhaKeX9AE//EcYdlaMlz27d/S+HX6tntRB+RwqP6N79O0QGpHaBzZWWAuZ9dDtD98pIMrfc7HTxhfeahFISf12mGxzFo48+yuU/XNPKwtkqGdB6N5ccaW1XOXaijNFXwPTkRkJCiG+uvvrqAQ8++OB3bodXx3FYtPYr7Yd3HNdfSeTyv65f4SiHkJ57vXmruundO7LwsssSg3ZIbmxar2rLAa/in3hCbF63LnJILNayaltbTtu2iUaj+zqO86VSKtq90u1fdvN2tEk+2FxztCnT3B+e1MjZ3nLZLxlZ92ydjAii582pPr5bJ+76M6UJZr1zG4cdkPtF9ucn20pIWTAKIITqdWsyX5kyECCFoOHr2ygv2A4Z/jrLUVdZivk7JWOzCV2OAE3Tmmpra8sqKiq+c+TeHrSioWWVvKtUYtAOsVDykNzT2M8/L5pWrCg+pLGx8evkFSDO9phHye2mv8YdOWBDraKwsPCnLS0tTwBaNAJvPgqjD2hNTC9vHjzNlv0lFeGL5/dmyna5zG4w8sqoOCupsYQvXDDz0885rNeLHLZ/jpdT5MiPSKfp5SNlUZQeCwiEbyeG1FJRBR++fjtjD9lOGcmA7Ke39+kwfAg8Myl32tsrw183uRrYtmS0AEUj3GNd18scx2moqKhgd0MuRh09cSLvd3jkvYPQNHj1VewvvgiNMU1zRmf1daSUVFVVVdfV1b3jOM6BAHv1hBf+BAf3y6GpO4CUNvGZ7SpPeDaUwh0J91wOleKpp19k/Jgv0uOb/nSVr2Hxy8iXD0/79XT3q0/FVi4rbCHYtOB2epTvABn46iyrcbvsd/DXF1z/+k0fgrB3sAxfAu2WQ8E7n8OJl7uNXTgcrkgkEr4x+d0PrV6//v37GxdfvCL/gtqOJizgnXew58wpPLmhoeHd78qHHXLBtm169uxZVV9f/7Bpmmeq5OZxY0bA45Ogf1fafamSpzmvt2owfAFK6MzZdA6LPnuZn5xkugsN/PflMFOBVHcCL1s+jaY8zYZ3rCF6TUgmlXS4EIIXnriDc49xdpCMLA2apxx1JvQYA6YFfXvAktcgnLWIZntl5CwHMGUenHCZuzRXCPGslHJ8Zw+u7Sq0KuXxxx/LgQe+p5Lfoet4QgJmzMCePr3wJ42NTc9v757t3wXYtq3ts88+pTU1NZOam5svITmcaOgwdgRM+gWMHIQ7FuGHT+34++cZLyTkNjGzM5FH3WVoNrJO/BcFUPpDKBqJFIK/PfZXLjxpBUa26bK9MrKvdaActganXwtvTneDu5S701mHDQLhbIcM5aZ999/gzofcfQGEEGiadmUikfjzd1np7Cy0qi7TNLUzz4w6I0e2PxYxZw5MnmxcblnWX/aUltGD4zg0NjZqlZWVZxuGcZ1lWaP8u6qEQzDuZLjoNPcrrLpIvrzkN+NTyLYe/CQhR1fAT4bkufLkJDWjv5+7U2T4jjsiI1VVYViwGiZPhUefgq/X5K4S7/byEne5rFKwuZGMOeR0gypkJBL575tuuum222+/fbf7SOi2oFUNxONxDjqoZPV551m9/eFKwZIl8OKL4V/HYi2/39M/sdwRKKVobm7WKioqBpaWlp5dW1v7IyHEwd41D0K4q8IOGgCjDoURB8DIodC/R/IlTiR/fTv7ZPAnm5TtZoxUAn7z2M1M6/5wPhnuPC4QBmGAmYBpC+DzL2HKJ/DlQvc7cFlmikwOAsbC4fCzUsonfv3rX8+87bbb4oZh0Ja2NU0TTdPw3j1vP/cA7SPn6xGJ8PiNN3LxypXw5JPab03TuSVoFXcuEokEoVAI0zQJh8PahAkTor/97W97h8POx1dcQaWuu0Tz/2la7vPsXlO+R5cisk8bAq0HwToIbzbls8+EnDLF+EdFRfUvVq1a1RC8O52PnE8gHnenuILWcteirm4zvXtXP3jhhebVVVVbqaU7AbEYvPCCtrGuruzy6dOnvzZgwEAZ2uoPBATYFfiOv0q7N5KLX3566qniyeHDt0J1tpnmjm8gdB3mzYNXXtGeHzp02EWzZs2O7Q6DrXsSAqLvQiS/Fz5wyBA195xzKGxrfz6l3D/DACmRK1cKbfFiJVetMtasXWtP7tev3yuHHjps3vPPv7jOsizKyoqfuPFG8/zt8X/w7n3mGWGuWVN4Xn19w/PBWMzugYDoOxFSSvr161vqOOvmXn653FsIqK9HLl0qtK+/FjWxWMnnq1bVv77XXnu9Nnr0kWsef/zvcYCt0ZZVVfrSq6929t2afHlaf+lSeO01/e1evfa76IsvvtgQmN27LwKi70TYtt3mKPK2YvTo0Vp9/dTEmWditGemK+XOI//73zQtWhT++cSJE/9xww2/koGm3rMQEP17hJ49e5Yefvi6+iFDcl8XApYtE7z6qj6lT59BF3zxxbx1QV86AARE/15ACHHgz3+uvqz0PoSo3AGyV18VcsGC6PW/+MW1D0ya9JtgxDtAgO8bkg41/zlhAurKK1GVlfpnlZWV/U3TbO/WAAFaIdDoAXYolLRQ6BqAwJFCC6yM7wICogfYZijHhpoHNdH9FxLA+XTYrUrvehNal6hsXI5qXitRkUUy3vB29PR11wttz1tM8l1BMFITYJsgl55eyKLDl6n691I7vCu9+AbR/YJC0eUEDaFp6EWGQhyIdOoCkncuAqIH2GqoBYdcJjCaMcr3FnpRHFyTXaBFRbgrqmU1WI0pr5/ICZ/91n9//Z8LDmx4tKK54aGiD5tfOSl4B3cBgkoOsFVQX428XxUf9SjFRwECFfv8BAChhVBK2jgJVPNi97taSqHsxHQK+2T4AAoS/YUmCtGN0faa6VbDY/sH7+FORlDBAToMJS0welwsSk8AFMgEQkQvkDKRjGFsUHYDJDam9qIj3OU8TctyzrH06pSPr1KaU7PihV1clD0OAdEDdBxKaehlpehlYG8AHJSIDBTeLqBm7VfKrEHZMVebKyUjY95a1SodvfAMvIZAAYY6UzmJVtEC7DgERA/QcWgGGOVSIVAyltwWS2hq1Y3ujv1Ke4fEBpCWq62lYxLu2ioZYaiDXWXuanXXjVcP3sWdiKByA3QcykG53yREINx91pVC1b75DACRXh8QX4enrZVjrsk225VjgzB6eqa76xikcDeFCrCzEMx5BNgaSKHsBqBUadHkPnQKCJ8qHQuhGfPs2Sel19gq5mQnEHthaLFCaX7TXdlSqhxfog2w4xBo9AAdhtBCYG+eqZQJerlvR1uB/OSgn6JslFkrVVJTK9tqtdWjrF/9jEi2AymtntBma0Z0F5dmz0JA9ABbBy36B6xvEXoXXMXsae/QEwodrMbPUe63lIXQMyxGp/5rMEKnoBTCZ7orad7bWcXZUxAQPcBWQfV/bIqIfSqVXgoi5Otna5rz4V7/paz6iUg7uVNkpDh1n1LEXhj+SorcqNSXVEIDTn+580q0ZyAgeoCtgqbpkticyVjfgihK97MVKL3kPhHtOR3HBhTKiQ337mv+a/QYoYdOz5hWQ6Ec4sXjn8v+BEaAHYxgMC7A1qPslDNonu0oDJ/pnvyzY7VKOV7fva+SFs1Pdh+FCL2HnQz3x29R/63p4U4u0O6PQKMH2Hp0v17SsvRKrM3+EfakGU9q2g0RKY893f19YUQ+Spn4numevEff67CJnV2cPQHBMtUA2wSlFHLu0IXKCQ3CdlA5/lLhVna4RNkOTkzNKL+h/ohAo+98BBo9wDZBCIE+bN7+SKvJb74rn4b3a26vH58OB73nkDEByXcNAqIH2C6IipPKULZUGSY8GcfuCHt6Sg2lUA1qTNmFs4JBuF2EgOgBtgv6wN9L0e3sENKx3f551uBc1p9SoKzQGRW3tUzt7LzvSQj66AF2CJyaqdifXVirpFHp9s0l0rbxjpXloEyzQTnhPmXX1DZ0dn73NARED7DDIKUsjb89ZKUwm8v9A3LSsqVKOKcVX1Y7OdhnvnMQ1HqAHQIppQZULx/zSbldPDBlqlvr9+Lryz64pvSKuoDkAQJ83yGljK5uSlyxIpFQ601TrbFMVfNwg1o2Ypn6MvRlQlrB6rTORNDEBthuKKUQiNI6Ie8KCQFCoClouCRCeFgUrUQLLxi4oLyz87knIyB6gB0BTaGqev0l1lXzffUxJAQ191WQOLEQa431707M3x6PgOgBthtKqWjDUw0nGNUGJdPTn4wSQLGmseGRLnzycY9R0pbB+9ZJCCo+wHbBM9sTXyYmaCUaQkLJR6a7pgUIC0FXQ0ffK6z97+trftqpmd2DERA9wPZCU1JVh/qFSkUoabY7irKPLBQKIQSluk5Xw6D50PBj0gkG5ToDAdEDbBeUUtElicQ1ehcdoQkU7m4SQkHFNAthKVer6zqlUSP8xuQNwaBcJyAgeoBthnQ3bi13BJeYoyKAu9hFKJHcM05R8mYL4Q9bUAIKNI3Z+/Nkp2Z6D0VA9ADbAyORkD0LdU2T3TVUcmdnhUIlFNYqi9j0GOZDm+l+8lq6/08dToV+kgx2dt7lCFxgA2wzpJSFS2vjjxWXGT8OCYFEUfKhibQU5tcmzdOaic2NYW4ycSwH27ZxLAfTiB812jx6emfnf09CoNEDbBPcDSFFqSwW5xrpc7YcFcZeZRF7P0bz1GbM5Sb2Fhun2UElFEoqhNT/Le1Aq+9KBEQPsK3Q7A12dWFcGJ5ZqJQigWLe+WFin7Zgb7LxCJ3c9zV5o1a+9r61wX6FuxAB0QNsE5RS4br/2Xxl+XsJ3GF2gQPUOQ4bLZv351Sy+PelCKEy70v+W3rH0l91Ssb3UARED7DV8JxkVEJdrBVoGOsdlFI0S8lG26bJcZDA2gsLeXtTbxbfWYow0hodQItpE4M59V2HgOgBtgWaU+tUhvYKRUVYEF5qYycUmx2HesfB3dU9ueOzVCy/qpTX1/TjjWV9+faUAhASlDCm7T2tqnOLsecgIHqArYZSKlx7T+2VRpUBGihbEX2okbpGm7hSSKXSW8el/hR2oeDjx6p5cc2+PPvtAGZeVfEvO5hq2yUIiB5gqyEQhdJRV2ilGiiQ9ZLEFwn23W8DBXMtcs7aJpevKkAKELZi2YVdjiZ4B3cJgkoOsFWQUmrSVqWJ31UUipCrza0VFokvEzhNkoOOrWXombVIzb8hbFrD+6GU0m6YtfKYXV+KPQ8B0QNsFYQQxvIVsSvDQhA/OopslCS+TGCttlCOS+WS6RZjK9ex381bcAyRJrtHeJUelqvprv/LCQbldjoCz7gAWwUpZemS5sTayqhRLICGJhtx9HoSCxKQ3KXdwUEm/zk4mN0Vc++sYOU5xTgJBxOFBVjK/f3hai007gc9gz3edyICogfoMJJauecqy1pbrGlYSrHBtlm92WRAn3Wg3NVrfpJn/koUDokqWD02ytdnlrLmsAJiTfaEZ4fsfWdnl293RkD0AB2GUsqo+X3t+dZ1pY+HhaBJSlaaJpsch7hSHNJvA6JB5SG506oB8I5tYcdOjp9cpEWCnuTOQlCzAToMpVTUXJC4peiNOI5SNElJs5RI5fa4Zy/vzobLC1KOMdm/2WHesVCicOGNC4N3cSciqNwAHYKUEoEoNnoY+2qGIPR8jAbHwfQG1pS74cTS35Xy4YYeyDL3vlwk98MLW/rg0v/bBcXYYxEQPUBHoTW+2jgw1DcECqwvE1Qe8S2OyHSMAUBXfLC0Jx9/2B2R/FhqLk3u/9Us7ZJgRdvOQ0D0AB2CECLc+ELjbXoXHafewZxvwlKLA6rWpebMwTdnLqBp/xBvru7LO/P70Dg0DHlMeAChhPZKt1cCl9idhIDoAToEpVSxViCOExGBtcLCXGyi4gqVgEPK1lM02/KRXGQ4yZhdNT54szcvfbsv70ztx8bjCtCFRCWHgj0t7zQ6/+rEIu7WCEbdA7QLpZTWssYaWFeqFha/Eafp1SaaXmtCNqTXmoMgMUDj45ldsaV058m9X6VSc+Zm6prCNEDbaFMxv4nqKfX0nLqF8xaeLDQj0D87GgHRA7QLpVT4m9Wx+4qqQ1dbjRJr7DrXQcbJjOdNma39jyhf/r48TXiP6CrtKGMqha0Upv+6lFTMbjr+yfEHvdsJxdytERA9QLuQUpYua0zUlhYaRp1ts77Rorp6jbs6xR8va55848lhZv+5K6YBlucNl4/8yXB7i7lm8oH79jECrb5DEdRmgDahlAKHwmiBZlhK0SglVhS+ruuFKs4R3zfg1uWNFk7os4JTey1jr8e2gKOS/fesJaw+H3hZHuodqJ8dj4DoAdqDtm5xbLSGoEXK1HpzgEWre9BweqRNxxgAKQWDb63hjN5LObfnIk4btYR9n9mM0SRxQu78nBIi1Qic+fKXP961Rdz9EbSdAdqEUiq85tw1n+g/Lxvy7UiDOschkexPeya4vsjmoB9sws7r497a/TX1KyQCm5YywbqDC1gxtphlAwviL/7ooALd0Du7+LsNAqIHaBPSkeXrL1hfRxhqegk2/brYHUyD1GCbqRSWgH0u20zFC83tkj07PFdDMF6OF0ILXs8dhcB0D5AXSikaX2qsBDDnm4T/UE+34zeikt9AV8kdYwCQikV/rmDq+p7E93Zfq3wur7lM/ezzfxT944odXZ49GQHRA7QFreb2msutNRbmchNMCM+y2Kv7GpSZvSdcclAtJJgxsyfvrexDbH8D8hC7vYUvIi7+FOwSu+MQED1AXihHYdfb/2UuNZFbfB9iaIH9uq2l/KmYO5BG640gZYFg2vu9eX3D3qy4ssz9vKqXbgdWt+lKD0/58ZTorijnnoCgExQgLxobLeNr3UwUlK/SlOmGZZrbAlUKny6owixw58pNfHPlGXPmIGMOI69ZRbd3GrFl7rXp/t9mrfk3V8orb9v1Jd/9EBA9QF58sb7paLtUvG/ZUFa2EnB3kBFZr41C0XCYzqevd8HO4QTjOcZ4XnC2I2mJCnq9toUDHtlAxWeNKCmxlET5iG5rtnlB4oKIHgpG37cXAdED5MWcuqa5MiwOdl1Woap8FTitXxlv+ygHhy1HGsx6vgpL5Hd/9Vxg09dca6AlDLajCNVYaHUWpgH7f9ZY8uh1hzft8sLvZgj66AFyQkqlaRExJB2iWNvQB9m1dVz/SHrJ9ATHVK9k7IGrKP4ykdcDLjsFACOh0CxFvFSnvm+Ehp4RZg0rvHsnFG+PQ6DRA+TEgnVNxVa5aLRk1mIUFN0urKXo5Xgqrl+jt54zd9g0JsKM+7vT1E1vreVz+bz75CWUsufvtW8oFAp00vYgIHqAnJivfXmxvWnvx81IVn87eSyW2gwYuRGlQGb9y+8s49BSLZj/s0rmX9iFhJ4mve0350mHOwr2n1ZX8sKlIwPzfTsQED1AK0hTsrBw4XLNEf0b6vphG67La8aSUlyPuH3PraXovXj7rq65doEVDo7usKVfiBXHlrBsTBkb9o1SX2Fg6ZAQ4CjQ1rc8svDIg37W2fXyfUZA9ACtIC2pLQovsoQSGpqitqYPVijT5E6RXkpko+SQg9fDlq3wcW/zWvp/iYOl2ebl8csjejgYfd9WBB2fAK3w9eCvi4US7rshBV0qV8Mm2XoTSAAhcEo0Pl7ak5lzeiCLWru15nN59YdlXvNvJAma1MIf3fpReMeUbs9EQPQArWCtsSZmhgiq915H0RPNrWA/ZLUAACAASURBVL6flvoTkOij88Hyfry/uC+JXiKLum3vApvvmnf8+cOf/2knFHWPQUD0NlBSUnJtJBKZdPrpp1dZltXZ2dklkKZEJMQlitb/Kq+po8+xm1DJLZ5JTpNlkl5hlwve/6w/r9fsw/JrytFo7bPeES3vP9aatMsc02kVN0DHEBC9DTQ2Nj7w7LPPTnjvvdfvP/vssLP//tQXFBTcN3z48GrTNDFNs7OzuMOhNIFwKBWIlAec/zjymc0BZesonG2mzXiRHurxE17YMP/Wbry0dhAvf70fK39S6u7+SsfM+4x935WmfXDlB8H7uo0IBuM6CKUUhYXRe844I/H/DjgAbcMG+OgjvWHxYv2fRx111F333//AukGD9kfTvt/v4rx/bjpYnVI4N1S5EpF0eQUyjkmGyO6CWfO6YenJPeFy7Prq94SzcafmEhpUfNrE4IfX0+vtzZgy/zy8PyyhJR74pfzlLzqhWr73CIi+lZBSUlJS9J9HH91y/4gRaEqBrkNtLXL6dGLffFP4YteuPSeNGTP660ce+TOGYXR2lrcKc1c3vqF10U+SMUW020pSm68n4fd19xxlmg8SzH6rCsvA/SRyDqK32vI5eWxLRUJzGwq9xqJobYzy+U2Ea0zCTQlaCnRi3UKsH1LKpj6F8suh++nG97wx7QwERN9GSCnp1q3r6YMG1T13wgkqLKVrwSoFmgaNjTB9uh5bsiT0akFB5V1//vOfvzr55FPkd1njS0fxRUtMiWQf3FRQ0n0Vojl3fIXK0L5mN8XMl7tTv6/Rmuh+bU/uPd+zfeBlhix3pesv14qiy8buG9u5NbH7ISD6dsJxHPr06TOsvHz9++PGqVKlMrqseOfxOHL6dOxly4relbJo4rhx42ffd9993ymNL6Xiy5YWBQoHUuQrPXYDxiwzpymfaysohWT1T4r4/M6uxIsElpKZc++0se2zr1HIGMJLDvxpG1oemT96aOA8s5UIiL6DYNs2BQUF1T16yM8uusjpqWmZhPfgEV9KmDZNyIULo1Oam/W7Pv54xtQDDjiwUzX+54vrz6RP6CVUkugpUiq0aXG6/3BTqpcuEHkdXvzHStisO7GQOb/uRs2gCI6l3HXrSbfXjAYgi/Dpr7S6n3gSQmAqxaIBA4SRq3ID5EVQWzsYSin69OlTGo+v//jyy50DwuHchE/HTxFfzpwJX34ZnRGLRe6+6qorJ//mN3cRCoV2Wd4XhOZ/aG/ca7SKCJwc/WpTh37DviW8xEmNlLc3gObgoKOzkY18yXyWia+YPyxOTXeHUJcuRPv1o/Swwyg48MC06Z7U6O6HnvCWviENgdIEv/v9ishZDx+Xd8rDcRwA7ayzzgpv3Lhx0MyZM4t1XTcqKiqajjzyyDUvvfTSBiklur7neNoFRN+JOPTQQ8JLl85769JLnaNLS9smfDakhDlzhPzkk/DMujp535dfzntx4MBBUuwkTSYdySJ9kRKAubkfViSr/+wztcVii/1/sKnVLjH+XxOTSUxiClNayfLK4C1XFUmNDVAaqmB81cUMKR6J05JAxCwiNa4vvYXCQZKIJH5zU+KmjJ1nbNsmGo2eopS6RUp5eDI9zS8vS6YUQqwqKSm5b8aMGf87cOBAuSsb1V2N7+7I0G6Azz6bazY0OGMPPPBR/Q9/CP9j7dpUV7NdaBoMH660q65KjLr5Zuu5J5/c3znnHOF07x6aW1RUdOEDD9yPZVk77PlZyy1NICQIwpUrYZXTeh158i8x0OCTmmqW/qOSTP83mMUsRjOasYzlffE+Bx98MM8//zwNDQ04joOUEsdxUsdSSmzbZuHChVxzzTU02lt4dO19/HzxT3hw1V0U1oCDhu2XZHKztN17R40aVahp2lzDMJRt269LKUcde+yx2jPPPKOtWLECx3GwbRvLsnAch9raWt577z0uvfRSTUrZv76+/v7Bgwc7kUikrqqqapiUu+eGlIFG30mIx9312kopDMNA13XWrVur7bXXXpPOPtu+ef/9ty1dfx9/8WLB1KnGVxs3OvcfdNCQv82ZM8d2HGebNNNCFv5YoVKfLRZA7L8raLqyOGO1Wq7R8pI3Y0TOm8U4xiGRlJWV8d577zFs2LCUtm5Li/vjKKWwLIs77riD3/3udyilGMQg/pP/xMb2rAb5WPFjRYubFy9WSvUVQnD99dczceJECgoKUmXKJ8OfD6UUTz31FJdeeqnnAGUWFRWNqK+vn/ddGijdXuwxRE8STwPQdR3DMKRlWSxevFAzTdO4777/0T744ENtzZo1BmAAmhBC69+/X3lJSUl5JBKu1DStctasTws1jeLi4sKKsrKSwvr6ukLLSlQKQbmmUSkE5dGoZhQVCaOoSAsXFmrhcJiwbVtGQQFGNIpRUCDp0QOqqra/XB7xlYKvv0ZOnaqvqa0N3afrob/W1dU1KKU61M//mq8XWliDvHnylFdaT421C3tikzkq7g2gOcCEww9nw9KllJeXs2TJErp27bpVBPfgj+tdu/vuu7nlllsA+CW/pBe9kEhe4zWmadN44YUXOOOMM/Km2xEZ3u8333zDIYccQmNjI0KIVdXV1fusX7/e3sZH853Cbk/0aDR6sWVZ9x53nCrv1UtpkQhEIkhNUxgGGAZaKJTuP+cbKfd+PVLlQ2cPBnt5EwJWrhR88IG2ceVK577u3Xv8dfXq1TVKqWyNrwk07SvmJ0BkdAXSzjGKjY9VUndOgesBl9TmTY2N/Gf//iilePXVVzn11FPbJHb2eb642b+2bTNgwABWrlzJYAZzERdhYnJD4gYMw8jog2+rDP8977zzDieeeCIA3bp1O2TTpk2fb/MD+Y5gtyc6uM4tixYt0n74w5MPXL9+1X1jx3L08OFK2xMGXT2O6TqsXo18/31hrlkT+qOUPGya5jpA+/WZk6p++tfr14YqV7Ta4TUDhmLx7Cqa++usXbyIX/3gBxQUFFBfX5/THyAfyTv6m33fvffeyw033EAFFdzIjVzTdA2hUCiD7Nsrw4OUkj59+rB+/XqAC4B/bkv9f1ewRxA9G47jsGzZUm3o0EOu6t49cdNxx6me/fopzbY7XyPvCij1/9k77zArqrOB/87Mrdt3WVikN0F6UxRFEEPEgt3YWxJ7j0YSjd1o9DPRRKNGU4wtGo0lJoLYGxYQFSwgTaTXhV223DZzvj/OzNwzc+82QEWz7/PcOzOnvae95bynQTgMK1bA66+T6thh4qwb/nrPuE59emKULkMkguva8S173RjfyPjG8XTt2pUVK1ZkwwSkp+7eVremCPKtt95iwoQJlFDMw6++w8TdexKLxTBNs1nJ3hYcejkmTJjAW2+9hWmaZyaTyb98V8ft/wPdumVIp9PG2LFjes2Z8/GN48Zx/L77SiPyP3TMgW1DKARr18I775sM+/gEfmKdQRVVvrXtoAh9EIOoqKhg48aNOWNeHZobozcXrylww7733nuMHTuW8oFD+PTlGZSVlRGNRr0NRTsCh/59wAEH8PLLLwP8APLMF34HoJ3QA5DJZPjyyyXG6NF7HFNQUHvT/vuLPoMGSSOd/m5IeynVzzTVDyCThppa2LAR6rbCxo2wfr1aj19TAwcf+2P69etD90GD6FjZiU5du1A0p4jCYzf5VsIZGAxmMJZQ02NBgsrmoenxsOufT5o2RYz5cDz00EOcdtpp/Pa3v+X000+nuLiYcDi8Q3HoUn7XXXdlyZIl9O/fP7xw4cLvnIHuO9B1v13IZDIMGjSwy+LFiy8bOpRLDj5YEo2qXWtfF+G7xCqE+oVCimhTSdhapyRvzRbYVA3r1kFpHHpWQYcOMKAP7NYLdqmAykrYpQQiIbAlCBukpdJOpWFjDSxb05uSEz7NWhmF0DaiQOWQlRjLVYc/j/N4jdfYtGkT5eXlQJOLUZolNNu2fQRpGEaL4+cgDoB99tmHd999l+XLl9OhQwdisRhCiBat7q3FoYdxp0mllAnLsuLftVV17YTeBkin08yZM8eYNGnS3uFw3R0TJohRI0dKw11j4fYV3fKtvxuG+rYsqK2F6k2wfgM0NkD1Rgib0K8rdCqFHv1gcA/o2RnKSqCsCApMtWtU2PgPbxM4tnHnLkO9VaUTR3taEhoaYeUG+HQxDNyrDmsPQ+WX3A0nclGGkj0XsmdmL6677jquvfbanLppSnKrsiv3t956i5NPPpmVK1d6foZhcNhhh/H4448TiUSa1BLy4bBtm1AoRFVVFV988QWFhYUYhuGFaWxsZOrUqTz55JNs3ryZWCzGqFGjuOeeexg4cGCrcOjl+Oqrr+jduzehUOiyTCZze4sdZieCdkLfDkin00ycuF/Zu+++e1bPnvZN5eWENm6ELVsgnVYSOGNBh1LYbwycehSMGQpVhYDtCFDNVuQxBpcwwUfEgI+40dxcIvV9a0QvyH7bFmxtgC9Xw5z58Plnlfy071wy15cjRe4lCikpOaZbN1INDehHarVkuXbd6uvr6dSpEw0Nze8uPfPMM7nvvvty4jeH44033mDixInMnz+ffv36EQqFsG2bESNG8MknnzSJKxKJsHDhQnr06NGqKUBQBD9+/HjefvttbNsWO/OW4yC0E3obQUrJXXfdFbrpppv2W7du3U2GYYyybTsEirg6d4BjD4RzjoO+3SDkjuZcgtPpwK39fG553KXUvIPhgi2ZjyE4kLFgaz0sXgmzP4O5C6HDgsM5Zev/kVzQnZTwr4Bbv3IlJw8axIsvvsgPf/jDZmonF6qrq+nQoUOrww8ZMoR58+a1yXgWi8UoLCxk/fr1WJZFQUEBrV3K+tRTT3HUUUe1GperRZimebtlWZe1KuJOAO2E3gqwLMvo2LHjbolE4qaGhobDHGcDYOwIuPR0OOIHYDRmx9WQpT/pfASlblbfVk4e3btSXfd2Ve9g/CbiuOnpuN1olgV1DbB0lZLocxfBinWw/6JfcMDq02iY0ZnE3mFPdf/x6NGsWrzYI57mVqGpvGTV33A47O4mazVMnTqVW2+9tdU4pk+fziGHHEIymSQWi+VoFS3BihUr6NatW7M4dLdJkybx2muv2el02vyuTLd9N3L5DUM6nWbr1q1Gjx49jq+rq7vLNM0KUIQzZjBccQ4cvh+Q0AxyScDIL5yF9hGUyD7i1bykQ9guuATrk+w6kwhQuIdbz5Bm4DMMiEWhpAjKiqGmDl7qcysGBpMmn0K8u8m6T7tgGIJVixdz2WWX5Z2n1p9B90svvbRJIu/Rowe///3viWjzmEIIamtrufDCC7npppt8c+PN4T7wwAMRQlBZWdkqIj/++OM57bTTME0TKSWfffYZXbt2bVX5hBA89thjVFVVGV27dt0d+KBFhDsBiOJi86Vddx16wuzZczZ+1yyJOxISiQSRSIRYLHZOOp3+nZSyACBkwqU/hat+AkWm39IuNSkdlLqeVT6fWt0C+CSyrgm4BB7QGFqdqJMfW0IiBRu2wJIVMP9LJd3Xb1Yq/cC1R3DqFzcjgE8PWcGPnp9MOp1u02k4rpU6nwr9xz/+kV69ejU5zy2lpFu3bgwfPrxVeAAGDBjAokWLmg07ZswYrrvuurzMYOLEib4NMS3hM02TeDz+YUNDw+gWI+0EYFx8sTXp8MM/3vDDH4asSCTyG9u2v5fHGDcFlmURi8UOjMfjawzDkKlU6t6CmCx44Gaw50LqPbjlzCyRu13ENZS5fcY1ggvdT3u2+JOahV7PoM5IBE0SebDv6kZ5UAzIU++FYmBFMehYDt2roMcu0KWj+l7a41mu23s/DMLc/by6tdidAvPhkNLnpn83NjbmJfIHH3zQR+TgV5Vdonct883h0OPefPPNObh0OOSQQ7jmmmuwbTsvoeuGOx2HngddtZ88eTINDQ2j2jpM+LbASCRUBxg/XhpXXJH65ZVXGnLo0IINFRUVR3z00YdGJvOdWxvQIti2zZgxY4qEEC+ZpikTicR005Cd/3AVZD6ErW/C6QcAKbLEpRO1Zi0XwhfECUBW+uYBV/oLLeyfno5ncTg/rw9JP27fu5umyH7rTMBlIkHOYDqqe0UJdK2C3l2gTxfoUQVdKqGocj1XH7Arb4s36d+/v2/aSidIlQ+ZQzzOGnEfTJo0ydvZ5sbTn+4ceNCtKRz694EHHgjAghkw71kY2Ntf/xMmTMjBocPmzZvz4nAhGG/q1KkA9ty5c78TpnejtjZ7Bp8QEIvBCSdkKi+6qPqZ++4ble7YMfJRz549erTVoLIzQiaTobS0dD/TNGtmzZq1FSknHXsg1LyvJPeFR0BGU2ZcItbBI2hNskrdE7/hS48TlNQ6jnMu+CWPvDZYjcuFZtSTWj60MbabhggkqePRn3rfdr8jYSgphKpy6NEZ+nSFvt2hd1f13b0TZKTNxIkTtToRvk6vP3VCyCfNL7/8ct93U+PvpvyDOHT3goICAF55G4Z2hc+eAOtD+OcdUBhThDllyhTuu+++vNqJbmgM4siHc7/99gMwRowY0S9v5ncyMDZsEE1ypM6dMS66yB7xk58s/+rww0NWcXHkz0OHDi36rhG9ZVmEQqFLwuGwVVNT81pxgSx59SGwPoDHb4QSxzQhJDR0+CWzFgSkN2St5pp+ntM9NUmsG8dy0nKTcN3DPRFCcPKJx/LPt4ZnmUceye3i0Y1snr9ukNMhT36FUFI9GoHSIqiqgO6dFZH3665+fbupsPvuu2+z0s6HyiGErl27+tyLi4t9c/BBlbipX3M49LTcOe135moM1oZjx8HWt6DmPRg/Gp577jkOOuggLr74YjKZjIcnEtjc0NL0npu3Pn36jGm+RnYOMNatk83q5m7n3n13aVx6aeqMo476ZOvo0aFGwzDOmDHjBZLJ5DeV1zaDlJKCgoJbTdO0MpnMHYP7SmPzu7DlddhvYJZYsuNsQVlhnN0n3ci090QOgUr80tR9eF1COMY4bSyfj9iD6jcFQ9TmESE4/tij+NMzZYphkAcHmnousxq5bgz0xvz56gQtXQPMiGN9L4TKMqXK99xFEfmuPVS4Dh065KjVzRGjlJJ4PO4jliFDhjRpfNO1BPfXq1cvH658OIJjaSEEGzfl1hUoZv76n9TQ7LoLYMmSJRx++OGcfvrppNNpD18QRzAPwWHG8uXLcyPuhGBs2MD61gZ21cYjjpCxq6+2//zSSwfK3r2jq+Lx+O47k1HCmb+9RAghGxoapk7YAyM9F+Y9DqXuIRMOUUL2WxhRAAxbcuAxNzBvaVZN96zpZDtPzlNmv/V4Phyafu32dREf6GgCqrOefcFlLFrtDxvUyb30yLp7lx+6YbX86NNybty/TYfQaHhlnpLsRUWw29FwxKXQtZNafgtQUlLiq9umlo7q/kIITjzxRM/ftWg3FT8IgwcPbhUOVbfZ9FLJZupKgGnDtaeD/QH89ddQvWkjRx55JFOmTCGRSOTgcNPX392FMwC2bZe2WJidAIzNm8NbtoVGhVAd48wz6XLFFY2zjz9eyMLC0DNDhuxW9m2p9s6Uzt5CCJlOp+8YPRhSH8Fr94Dpnh/sSlONYt1vGR2EcKjHsG06D76UtMgTT4+jP/OFDYYn2wddFV+GKnydU0ibNakp/jS0+O5TbzdPsoe68O7Ko6hPa+m52dDoyzbgDOcc1cl7KTX+pKvU92H7qbX1leUqimtYyyeJ9fcgs3/ooYe8MF9++aWvnZqLV1VV5dty2lzYYHpFpbS6rn5yIGRmw11XwYIFCygoKODUU09t0uinGyHd/BUVFW3KydBOCIZhFCzbngSEUPuZBw6En/88c8SPfrRg84QJISsej11pWZZ3SOLXDcOHDzeEEGsymczMsmLY8i588He1BFVoHd7Ncz4DFYXOvK1Ue7A7lZfy9+d7ev5NpUMzfjk4pP9bSsDOgBTOUEL1zPHj9mR+9kwHX5yggPfhLRjF2N2HszlyHltTfpuAHv70a9XzjCNg9WqwQvD0y8ptzCClzsfjKrtLly51ypQriYPSLkiYixcvBhSht0aam6bJqFGjWo1Dz5eUkv49/XXSXF25cMERYH8IZ/4IHnnkEQzDYNq0aTmzC/mGLxUVFUubLdBOAkYikflwRyXmjucnTZLG1KmJm375S1MOHly4plOnTgdYlvW1TEM4hphL586dawno/NIDUP0qlIT0MP4On7OizPEQkW7OODxLpWeccxbVDVr4QFnbjEP4VXswEUI66qXqkkKAkJL3P++dTVBjKt40nKaeu3kgvitCQLddOrMscSp2YLUeEmwBj/xHfR6wF3y+BI79ufq+/my1ai4Ugoih5tufffZZX31DrnTNJ2mFEPTp04fly5cD+E6j0dNwn0VFRRxwwAE5/i3hcP2EEEzepw11BVk7hwX3T4XGD9S6gilTptCzZ09v3r0pK/3y5ctn5WRqJwSjtrb+y1YMmdoMQkBhIZxyitX5/PPXzzjzzJBVWhp6q2vXLpU7SrXv0qWLIYRYZ9v278aPVoaWHwzRVGM3L2iN7TjoqnS2M0SyRi3HmiUsi6de6ZxD1L7xb1twuEFc9T7SDRA+A507sJ986KFqRb2ejhPXh0/PhNlBaQfA0AF9+fu0Pn5KF3D30+r1J86q/Zo6ePYV9T7Q4S21DaoNO5bDhx9+2CyRtQTdu3fHtm2SyaS3F11PLxKJsP/++7Pvvvs2Ka1bgkWLFiGlZP9RtL6u9PZwXqLAV/+FF/8KK1esIBQK8eabb3p43Hy/8MILAPb8+fOXtyqD3zKIwsLCEZdfXv/RN4FMSiUtZs4UmZkzw3/s2bPvFXPnzku09RzyxsZGevToMWrDhg1zAOY8BSN7aAE0ju6Ng7WGdr9znl1vcNrekVqoSwPWV2+iU/IP/lv/thUHAbormYIsHuN0RqmJfwnCILPqasIas/A6Lf603Siy6w1aFiVSGHzw6tXsMSCb19hYSGXgyVvV94cL4Ja/w/EHwFH7q3Bdq2DsELhvGpx7LTmHRXg4RO7+8eamxfJZr3X/fGm0BsePf/xjHnrwQTKzs9pSs3Wl+TnB/bMqKDtGvylqO+/kyZOZPn26l59x48Yxc+bMb3y7qm3b7HXx7pFV69dcaoaMk4CNRZHi2z75y2fTmlvCbvTq1XPp1yHRcyCVrey995ahn/88dcnRR89vHDkysrW4uPj4//znP4Y+z9oU2LZNaWnpjRs2bJjTpROkP4RRzrhMHyfrDew9ZeAbTZUWUTXF5UhPgdO5gE6VHVlfsyNw4E2/ee4FA7VOJrLjSWfN6tJVWXzeHL2XgSxOT1C5mogEKQXYkvrooZ49z44pIu9UrmwrUsLvnWsbfuTsQJXAyrXq/SS14IzXX389hzjzGcd0i3hTm0Lcp25wa2pFXGtxPProowzsrRSgVteV8+e2j64KSsCwYcm/4fe/ghkzZlBZWemp8jNnzqSgoODFb5LIpZR0P7HbW7UNW5PFRUW/KYgWDCmIFexnG9bzA8/sb/U9tdegpuIaEydOzHwjM2NJYGv2Uwh1PNLRR8uiSy/d+thLLx1qde0aXhSNRgc1t9beMIyXUqnUVSdMgZXPg2kFxqD4vz1t2JG2Dh0QLLOMD8W9uMBreDeFTIa5i3cADk2N9BiFWY6rm2f/wZ3k37gxT/lEdmzpdVAJhHsihXDy7477JRPG7clXG1Sc3/9dpXHJidk2SCTVM5PRiMDhOEVOPz755JO1cuRu3wy65xtbN0WwTS1vbS2O9evXk8lkePDWNtSV8xA6Qwi6O2lcdAQsflHtrQ+Hw8ydOxeAUCh0Nt8Q2LZNr1O6zS0uLBznukmNawmEEQ6HP+twTNlh+eIbHTt2/GYM4yYqU7V+Z+Fw1IoKOO88+l11VfKzY4+NyKKiyKOdOnUq0eYrEUJ8Bkz68w3w6LVkLdu+BPFJ1aCVW3+6jS8ACoZpBKss4J5UMQw2ro/4E9oWHFpQ102QcsI70twZnwtnnO3WD9rTReBKKDcxWTDc6bhuj3U6gm3z0jvlSOCOh5RXf2eos75aPSfvlVufDUnldve1sHr1apLJpI/QdGhq6i24EEb3C4YNptUaHFJKDjtM9e3RfdpQVy6zxQ+6UVVvuL4V0DhH5WHkyJEIIezq6uplfAMgpaTHyV1mFBcVDguFDcyQxvicP+HYeTqWdvx31dFV3YJpGNdee51dV0frjuPYHnCHD4IcYtfBsmDECLjsstSJ55+/vmavvQwZiUQviUajs6WUg6b/GX56EDkU5X1qDej1d/xjNPfpYxKRrujVlz3XXLmmrfD24wi8SxFCUzazibt5EIIq7domtxNqQbIdGyDe1+nU2bGBm4dBoyYiJazeqOLYTqd/dbYKesg4/CDhs8Xqee7hymn8+PHZbAYIrqmxdjbvfgnfVJzm5unz+SWTSWbNmsXV5+Gj2hbrSn93vnWm6n2TZegxqdbPh0yQUhr9+/cvyCno1wC9Tu1+R2Vl2QGhiEEoIgiFDcJRg1A4e86f0lZUYUtLilYEDd6GZVlGdXVuwzQL28IWXEJ3K35rUwEVuMRy8MEwYEDyjnQ6vftjd8DkEY6/FtYrqPue7edeY7mE6KWtx5eAiHqcUTit7FrepZTEwukdgMMfh3AXD4eXrpawRNK7i5aO8KeTM3wwK3VVzoskJQwZtiuyWMXvWJZNxyX0Lh2zCoCrES5f65TJhhsuhNmzZ7Nu3TqtjTTWGFDPW1LdW/PeGhzDhg0D4LqfOvXT2rrSwnnSXvMLPt0Pw4akU2dLly6tb2kP/PZCxdGlJ1dWFl8Sjhh4v6j2ixi4ZgK3zU3ToPvJXR7U0zEsy7LXrBFtI10DJZXbsszdFVy6iGvFkGHxYvjkE7j+fDjOkToCTZLKrIR1QQCuMUY6XMCnjslsGNdNYGn5kw7XFx6yDh1T24cDv3RBgogP9XC4hj93fA2w5MsV6iJjjZO5ODwLspuWVwZ/QHesXlpcypYtym2PwXiEUFOnoOMcMQAAIABJREFUgqbSuQzLtvEcrjpNufXs2bNZlTqfEa05ad3Uuvcg5MMxe/ZsFi1axDN3KQL06rilutLDaeHdvkTw6csHGBYk5qjv/v37b/q6ln8POXPgqI6lnR6OxExCDoFH3J9O9BqxS8AwBeXFpacuXp01LBm2bVNfH65rcy5KUITeWoLXKtqDdOA7DzzyCAzrD1efnnWTTnqe8Uv7Fpq7L7xLlG42ZPaJEUOSleCQtbhLJIRCDOu7nTg0N5ehyIJhWRzCnUuXIBX+V6b9KxsvgIdAungpCY/xKPVVeIHnzFevg/tmVVLLzpV2ulaSymQ9lr0EqVSKKVOm+KRqWyR7WyR+czgymQx77rknXTrC4Xv5899yXWlh3bbQyo3A09iC3dNV7aMSvpgOUsoKwzDu29HEvuKBaJ/icGZOWccIoYgi7EjUJBw1HWluekQecn4ujWXSEjMsmDh13zvc9AyA5cvTzYyam4FCVOIuwTd3MI1Wib7WaEaFdw8N+fgxbbyk/UCT5vmktNAaL5AFLw0BxAc7HV94LFyXKes2rKdj6XbiEHnCmsVaPOn8QAiJNAyOmbQ5K4H0Mmp14SYvIz1RZJ21tgdV0rXOlFnHMn8eo46dMUgMEvhsaTZs9zK45VKYNm0af/7zn5V7E4Y13QCnS/Z8kr8pLaAp4x1AUVERQsDyF7JdqtV1pbvr0l9j3l7cXNQe9O8IZx4DUsqzCgsLd8Al2Ao2PlfYr1P3XktePdgiUhgiHA6o6wEiD0fUeD0UEl4fszOS0uKiS9yDY4xYLEZdnb1smxiSSXbsDUoVb4ngA5JQ6Y25wRYtglQKFvwX7wx0N3oOV87j7raoJDee9+6EEfFhmvRwpm7IStfnnnxC5WF7cJAr2aVMZyU4womjJPIT/3yKDoX+cnrvMk+Z48P8fs48vOu/buMGqh29LRbJSjDpfAutg+tEsWylH/fUE+CHe8PZZ5/Nv//9b5/kxa0zyHEP+uthmppey6cBSCkpLy8nmUyy4a2syu6L21JdaeG8HX+6v9um0m0n511k3dzw9/0SomFobGz8YkdI9Q1vjdqrrPOARUY0Qr0RIhYxCEcF4aiZVdu98bmpEbsy0glnJWU6bROJhhh6wcAe4Ej0iorO237/cwG5orY5gndZqjeAIu9Y/dFH1bFG/auy8fTTVzwicr2Fn4NDtmF0N49jC63Boj18otonYUyDY3+4ttU4pABp5MGhF06CIKLhktk0BCQtm0PGfOypid4iG5HFo6vXCBDx/v48Oh/CmRD+YPanhJxM2IE8uQtndAOim++Ma7zV8M74A4wcAEcccQS33HKLXrImJbVKs+nxeHOS3v1Op9OYpklNTQ3LX4EOsYDkbW1dZavHJ8k9DUhrZ2/kE3BzrfpImPUESCnLwuHwdh1CUf9K96sqCs13jVgYQiaPbog6RKyr61lpHo4KxwovCIUMTOepyiIQBliy8SZwCH3FinVLtieDFOFnmW5Nuiq9vuAtn0R3wzrg7IFg3rP+xvAaRVPR3F+Qmbpqma6e6eg8tRsQIuwqvVrWlGS//567KY21Hocwypi1+mjqMwEcmlQXAJEqR5K7nVp61ya98OSNFEUC+Q28e99unkLljtQTWlj1LZFs+PJNKpxt5QmXATv5a0z6JTr4icPWmLLLEOY8Aj86EK688kpGjBjRovRubvot3zy67ielZM6cOUQiEaSUVL8D3Uu8LOWtn2brSvrbUu9XOQzZbUNNLRBaHAEM7QYFMbBte3pOJbQCLv3beQy7pM/ctFlyo8xkkKkM2DbvbI0QDgtPPdct7d4zrNR2M2woYg9lF0wlGixiseiJGSujCH348GGfNzcWaREEECe3dt1acyW8xiFzdCmH0KWEv/9dnfPl7kDTVSrIqlv6T2juevLS4fJuPj2VVWTVMonlEaDUEK3asIkzDl/jubQKR3woe44aTm3BRWxNOemLbDxPasaHeolIpBNO8Ke7fsvhE2xfNenvejm8upCAzICj/mfDOgY+0+ToA1L07qX8Vq7TyiqV1La9xAMg1Xy6p0Fp5fjnjfCP38G8efMwDMO3TLY5y3xrVH333bZtxo0bx+67707HcrA/grKIVgdan2tVXQXK4YV129DtF5qbL47m77arAB6/HWzbrmjtDTEAlm0Zgy/p8cv3Fr8piwoLhh33msBuTGE1JLGTabYK00fkWdXd9BnhFKELjJDADAkMUyCkIJOyKYgVGEjHKP/JJ59uH6EDhMn2HrfGc2pd89e/3XBptRTTtuHmSwPp6+Jbi+IaH6Turr9rzCGIXwjALHI4s/QkHAgsAY3L78SULeAQIJ1LD6UACoYBki6VHVgtf4Kt7T7zScv4sKyHFGCa3HfXrZxzVG1+DSTfu6staFJcP53WfT7+6BMUR2B4H+U2zyFcKcFwbCxmU+0vYMlKLe/4y3HCBKibBeXF6mz00tJS7+jk5ha/BN/zqfFnnHEGpmnyzjvv8LtfwLoXQViBvLS5rvKXQ3fThz96OHR/PT0Bh+yl3gsLC6fSBGSsDJlMhvKjSyb1OqX7zAnXjrAqSsp+40rjRmHww9dM/vWlxcNLoSCiCDcUEYE5dOEZ4ExHipum+hmmInhXfTdMyTG3HVQRAjAMow52wNbRIrKr3nQ1XifmZjoUjbB2s/o8/wQg4ZfmPv4gct1cXPmmRgRkp+N0NS2+GyCQzlhWoO4be//Fa9lnSCtwCIjsBQVxdcBij74vsM8ePRk6tCvjRnflH9MGc+IPP0OksnEBxWBQC/XXVm9i4ft/4OyjNP8gXg23XqVSAJEeuDvtdCYngQySH46ehxDqhF+AOZ/jMbXuVbBstdp/nsr4m8fFkUpr+LSKdW0QBQZsehVmfgETT9vKsGHDEEJw9tlnc/vttxONRvNazoPLXC3L4t133+XUU09l2bJlABy8Lzz3B7/RTe8TLjNvdV1JTasKlCNYd4GkskMvrX95fpbqA2nLuhj4P1CnDhuGYYw6f1jBptrqO3b7ya6nhsPhSFVZFZ17FnjMJ/tUCf91lTrSrLKLgQC1Ei6SVcttC4SwkRJsS2JlFENwpblpCNKuAAKWrl+8uwCYM2dW5LnnxiQ9Y0QKNa428vyCLC4IkhZXveWtQQdefh9mvq1OaG2ykl2CzZeVvI7NZLfDaYhoX/UuJWuqt1C75A4GdMum1RKOmx+GTIXSRPQptFQKamrUtNaKFbBiuTpOurQQdhs0iL326Mb6Je9y+RlbKS2FQqdxgjesNp15Jx8lh0LhHjkZlcB9d9/M2UckVAexIbSnqscnHBva63PgnifhxnOhf8/cTo6D4qgfNCP1dZwSFm+EYy6GTxa6ar6grKyMXXfdlX333ZfKykoKCwupqalh4cKFvPHGG6xcudI7zCEahl+dC1eerhanNFkHbWnrNvaLbYFTroVHnse2bTu821l9RliW/ed4LDpKCMOnBRYUhygqdVRgrb9kn4poyztFkRKicdNZ8qq0HtuSpJM2qaRFstEi2WCRcJ8NFomGDIkGC9uGkoowdcktv1cMUUouu0xYJSX499xJoAEl7IOVlG9M5zIBffCTT0TkA8dv2ovwwWeQeQ8wAh1P0xJ06eJyZB9nlv4OoofVsyJ3uQFQ89Z/+8tfOWXyMiLBnYct4JAGDDgOTjyxecJ0hZceRgjFEKqrYeUKWL0GVq0EaUFxAewzHIYOgb0GQ6+u6pTWqCB7RzogdvkFiEKvbqSD7LHH/8UJEz7xjaBGnwwfLYDHb1ZnAyRScOo1MHxX+NVP/XWjw+BdYVBPf33mK6pPAzNgfS3c+ld1TNWajWovgzvGdeuyvMRg8MDuDO21hctOqaFPVVbqNofDK1egfby6Fvjui8+XXqtxkCuX9LLaAm55qYQnV3bGjJgIFGPsVWiztCE7By0EVHSOZdPWpLnHDASEIwalHSLYNsTiJpGYsrobBmQyknTCItlok2zMOMStEXm9YgBWRhIvClFYHHo9BJBMJo0tW4RRUhKgXoFaFAOK2Bv00jUBroqVUxuBWgwyAAfKilQHD/q7Y2RPVdM4gKuy5l2tpjsFOgPA5voGnn3in0zZ+0t+elAeidYaHDYcN0kRTnNTqfmYgJQQDkNVlfqN1txdqDfgtS+h8XNYv15pBytXwsYNkExA964Psc+e3Rk5ugejBnZily7lvPvadI77wSdKM9Pa4oaL4dBzYcZ7cNDeEHes+3MX5WdEbn0tWgYDe2pFb4ZCvGksG6qK4Y5L4PaLtfSM7DsSVm602aXTV2r6T7QBRzBMvrABwnf7Tptw5AkiJaSF4B8LK7l3bhWmUKsS48Uq3vXDMkzoBNhgJxM8sMzg0TVxQBJyx9CuRNcQuEMcpaobkLGVOh5Wc+WGqSrWyggME4QhnJ+qV7XPP1tWK2ODoIeX/wMPRO61F62DJNnpsCCbC1SWr5ZaoT698w68Ow9qXs+TflNstQ0qma4JKLZLy+aJ1uAwYPBJcNxxrc/LjgJ39sC9g2DrVli1Sg0Zpt8OpnZIpB0Cc4S6peXhG1SnvPpP8MUyePA6iMf0hLOS15ZwbFNXowcIVOZxb6rJ8jXtTofD/TBgfk2ce+Z1YfbGImKmyxmdh/PsEJE8Pg5EyATLwk6ksesTHPRJKQZQXJ6dO3XXUWR3HKrkwhGT4vIwqaRNvNAkVmASKwwRCguSjbYnuV013fvVZ7xnJi0xQ4KKqlgiBGoKY889jTqUOa1liDo/CTQCwSsg3BptSqrniNosxGJqoYyU+Bbq6xtCvEbUhwh50Aj83FtqWXMPJPCIPNALcmi7JRw2TN63H0Isblaqfx3g2gXca/LicejXT/0MC3+Z0urqpaWrIGND2ISfnQjn3AzX3Q+3XOSUSTpN6NSJgVLWzGD9CI1xojWt7t4UFer9QObW+beOw4BFdXEeX9KJV1ZXEDayhF1SiKea+CQycFzPDEYkhBGLYKcyiIwNIYOJFWnea4xjht0dkm5c4ZXHVeHNkJu8eyAJSCmx7WZZVRZENpwQGCFQ94FXV4dSUmaaN/7kJIZaGQeK2BvJX9lBgm+K7QKdO0NtLYgQSNcQkye+K2m8sRj4bi5xVW0PjcZ8ffxB+hmCXoVtxfG7G05l94Ou4fDDW6i3bxq0TiwEvPYQ9PwBXHMv3Hy+mhoDdTZayFAbXfKpxktXwq7dtPpzeqBbfzoOnYCAnH7l1q17wotPZedbwAGkEbyxvpzpqyqZt6WYiCk9HIVxF4HIsbG4dexOc67O6CfHZnvUBiNCJJ7dR+5G9IY6WjquWi5tkLbEtiSZtLo4wspIbNtdIpytI239M2r5s6MpCOwQQDQapaoqshqoYFtA4pfqOdSi12qeuNkyU1mp1rlvaoAO0UBS+RpK5Pr7GlDj9Fqb+Aw4bufJuU6pjTiELRg5eHeE+OAbl+qtAbdzdS9Rlu0lK9XFDbaEn58Cv30YfvN3uPxUrcgaQS1YCv265frlw5FPauatk6YY7HbgkHb2O8dkJECYYNVXYjVUIC2bT2vh+o1lyhLtUFthQTZvurR2//IyGIdI36g3OT9Vh21LsG1kJgOWzdpwjChquixL1JBCEHUFmpemwmHbEiujfumUjWG4U2oS21JMQNF1lug94kcoewCs9E4/FyL6MaSG5GmKXJCoMbq+zVQjqCYldz5pH0g3ElGZ/NVd8KfLs9GCa7GbS9qXFSee6xA0xnkdTeZmJ1+xmsQhAWlz/22Hs/fhH3DQQbkS5tuCYF0JAe89ASOPhItugzsugzGDlfsHzhy7FIH6Axq0PQm6stZUs3phZP5uEkyryfw679KA4cfC8jUQDinDrWkqZtWQhE1boK7RScNJJBQ1qBpaTu9xlZT0KCIcDzGxJMm53TIYhQIyGYaKFL8W1fy6rmM2D3kkdXOE79MWBJyxvIQ7qzZTKG22pGwure1CNKbSNZx5yt6hNFcWb1HEmkozNbULdZgecpcxWhlJJmV7w1nbVtLdsmwsS0l723Z+GsELIBQ1AbHcI/S1a7c2feOEjSLsDOQQs95awVbLEY0aNEMEBQUwS99mIwNJBHqGtxAmkLTPMi+aRZkfNDwt4giVIVH3tvXvOQLD+HinlOoSqEvB8G5qqm71RvhqDfTaBW67CH7+B/jx9fD361R4r5xuJ5NgCH96LeGDrPak950cgteYSr6u89VG+FQ7pHNTTTN4JRx86wiKOsV889MCOK9rChGLIUIGdkIgMjYDYo2EM2ZWqOZIa+EjZHLCBZgB8LOaSm/4EImLbHpOwX/VoR5hRhWhG4L/s1dzvuiF6YwzbUsZ0zJp21sHIQwlxTNph9gzDqFbjiZjK0YgnTXNkZjBlvrq9/QZ4+zZ7jZqvF3r/OrJEnlToKse7pghSFmtYQDA6NGqE76g3yETYPO+0U8gXZ901vx8qPJQvZem8+JT41vCUTDce3/gzqOZMSM3/W8LfOW2wa66go8Xw3LnCqapd6qlsN07q4sVGxLwyixtWEO2wy9f50+3qSaVWhzPX1eHHEeffGiCyN3gvcphw9swqG/LZS7oEKFDj0KicZNIzPSekZiSmMI0EOEQwjAU5xJGNlzcJKrHcb71d3+4kBdXD5fFG/KlF4oYTCloQETDGIUxjFgYEQ5ByGSAmVB7y8MGliWJRE0yaZt0yiadskgnbdJJm0xa/ZRaryS7K91ddR4gEjWxLGuGR+iDB3RfLlKoDSju1FkINf3kzsu5q+N00Fu6KSnvhtPD5CN2J4xhqEY/5Cyya9TzgNDjatLAWzyhofX55cmWb9wnsuFajSM20FHxJKZt06VyyLeuukst316Zw10oicUYut+NfPSp4PdXKucfX6/KdLuzx+C+p6G+MZcffrYo//Alh5HmYRJuuGCda1G8cHlxoLamfvo4ZObCT49uuuyjju+ZQ7DRuEEkbjAnGcZOpbEbk8iMBZZNUkpicYNIzCAaM3yEGo2ZROIG0bjrZ6h0PcJXfhHXz/H38HtxlF+swCQScg7mCJmqwxuq04VN1Pr2sCI025YOkdsOkVveew6x69JdZon9nEk/+9Aj2/KOPdaaBUAMtRMtjrKoFzq/Iu1XjDpKqkR7jwRaJ9hiNOGW51sI2Gcf1cC/vEd9ewqDwHcKK5o7kJ2GcwLokjdftjz/JsK1GkekC+54TgjBY/cfx2uv8Y2Cq15XV8OTT8Ljf8d3YQSAKByhxm5SsscB13DkeBi6K2yth9seVjvZ/urcsvqTG/AWt4Cj9jc0Uac6DrQ2k/74QWXPDeMxTOmv87w4hLod9y9XqOuPn/oDRLS79gBm3ruYWGGudI7GTB6inLrGNHZDEjuRwk5luKqob0BSGz5tICjRI3FDe88N5yP2QLhYoclLlGCnMtgNCWQq7TGcL+NFjkQXxAqUNM+k3CWv2V86ZZNJucTuqPCaZFc9USJtOHvyxXVe85x22qmxPn0eatymcaUNuKfO6dShg86a84rUXP+nZ8C8z2DTTKiIaOGDkG+YIGl+BVS+OLq325lFM+F1HF1uzEZyBpsHnvwwe09Y9LWP1W3bWWj0Dpx7Ilz5YyiLBsruQtXFEOrgfX7yxSKGlD1MfAwk07D/HnD2Uep02PueVoLmyVudpatOWY+eBEa++nRhWzSZptJrJQ4p4f0lMP5kSGszQMOP7EZBecSxgEtvDGulbJIJC9GYoTEhsVIWQkpe/ZPEkJIQFiEkYSyN4UiPIVkIktJk33NDJKVhT7yj/KC4WVJSFu3UKS0bewvoZxqRLkLQxyZTGRFRWxghQ21mUOPsXWu3cqK9WU3DJdP8oaAr68LO/X/OCrdVi+tJJtRiGMMQniaprPEOE0g5xJ+wSCYsUgk111BYEkJEMon3bp4f91XXVVchQwHO2CoInjiXhyDyEnQ+0P1icL1zbpw1B/8KNo0ARQtp510LDd5BD3qSXl/Sx+gt4bBBdrve5aFKhZeStGFw6OlXM3ZsE2XdBnCXzH7+Obz8Muy5G1x7AYzsDyLtL59eLleVFl1vRHW27M6x++6+mbOPSRLeXc2h7zMcLjoe/vkiPP2aIvYnbnVOhQXGjoBulc3g0OooZ5ZDLws+vui1iV63bcUhJTz8Kpz2izZUKrBLR1g1A28PgYsjb9d1Ps6/He75B0Sj0T2SyeQHTaVt2WkMEcKSadYnlhu3Lzi1X9ws3quuseGUMIWTbGzbEMrG6bf2w5JPajFDgk3rkpiGmymJbYNlKWmecVT5VFIRuZVRtdapW5y6hoYrPrxt8S1eldq2bZx3npHu3DlnFN48bCU/p21KqrcEerwIyChcfz3sUgmrXsj6+8baWucIEqveSjlKQ6BT5Sgjun9zOMyOUHWhL19qSkmy34/+zg8mf7nNUt1Vx2trYfp0qN0Et/0CjtlPXUeVj4HmLadbxq5K8/CuKhKwtaGB4tpbsIGCMWq7as9d4LaL4dFp8G/nMtG/Xg1FBVBcCAeO9ePSJXCwyqTm5wunaUS+9mipHK3AIU3oPhlWb2i5ji8+Fe64SEswgEPH4/pP+xgOORMikcg9yWTy/HzbcFsDlp02DBGyL5s1ro9phu6OmvEDvXIAG1c3snF1gppNKWxLZtd+2Cg1PaNUeE+qJxU3FgIqqmKM7T8h/NvT/pjxiDqTydjr1rXxwrh6fBXjtUiwU4fIrTUC3/kIwVmhdcEFaufTiBOzeJpq+LzpO25Sf+YJGxTUItgxm8IRH+IVQjpIpJPAiZNq+KBJXp8nm066lgVvvAF3/zFCYstE+hV04dOHYfl/4fjxisjdcbDHgPIRh+vuOdiaGFXP4nicx2cIDBsS70PnDmrK7fgr1cWL/3eJSuenN8JjM6C2LlsfOTjy1ZHuF3j3GHdbytEKHIYNK6fBhSc1XddCwOLpcMeFfrfgbIHbJhLFmN5boojcNM3P6+vrt5nIAUwjbAshuH3PmUtv2/2Ng64e9oxI0XCnuzmlqnsB6ZRNUWnYG6d7FviUY5hzjXJp28tneVWUhkSi7jcn3ZEBzYZuGAZbtrThDpYUflU6SKxhsoa6As0vKCVbAR06wOmnw7yFMMq568/tGAKyy1sDRBlcGOM+9H6mp+FzD6bVHI6C3Xwpu2H+/d8ZnHV4NdUr8nRQDdyOtGgR3HuPYNbM3Tju4J/xymM3sn7er7j55xMp7rCLx/jQ8qqXSepEk8890g13Halyl26OKe06SZXZhtUz1MEftg0nXw3TZ8KqN9XClGdfh1OugRU1zeN2333qufPLZwBtUzlai0PAnZfAs3fm1vnwAZD+APp2zDKWJnFr/fW1z2Ds8RAKhZZZljW4rVd+twRRM87/jXzz4s2pteGUnVpuS7V/PZ2yCUUMb3rNld7ppDtOl97QKhRSe9YbEo0HhUNhf50lEgmGDy9oPOEEO5Y3Bzroxjc3FYki7hj+lgA1J5/WwoKf4A2UZV+SPbRCaOmhGmDZMnjwQSVxVr9IlrkEJG9eG4Dunw9kgBgdtdKX16ZwdHHGvZrftBdf46BhryMkZAw49gZ1p5xbFsNQN6W++CIYabhq6g845siJhDPOIFHHBzz13EscvfvbOe4+m4PvJeAOUHwIFO+JdwunllZjJk28+tdqvYTjvrQaBh6ijFuJOao5DroIXpwJB4+H//4O/zSkXn351HUdtqcc24BjUwL2Pkkd4vjfP0HXYr8WEcQx+wsYMiDbnaWEB2bAT6+CUCi0MJ1OD9geSd4akFJy2Udjjy8Klz/2/gvrEAZsWpvAzkjNIJddNKM0SkF5pwiJRKphzu8WFLp3pnumt1gsRlUV64EeLeZAJ/IwaiquOWjq2nOJInD9AsY4ijH4cqc6Qu/ecNFFcOedYI6GrbOg0N3hpkm64I41N223A0ntW+/vQXr2iN/thPlweEOBrN55/31/4qzDVnlhQzasWgyDByvr+Mdz4PyT4LYz4Q+noKawOu+htpMBEuEZfNzO3FBfm8UiNKmoVZ37opfDk3QCiO/q5El4dgQ3rXgszuatUB7P4uhTAcl34dm31M0koI563mJBaRiEnQeH1l4e4Wjl8OV3W8uxDTgqY7DwaXIgHw6AvU9VwyfrA+V3xFR47jUQQryZyWQmfN1EDrir7R7/7fxTnu49pH7roo9rI6UVETauSWCl1ZJXy/I6IFIKyjpGsNKSDsWdurtEDj5SglCo5EOobZ7Qa8luU211jslV2U2yh1ro4GpCiWDuFCxdig2cKeGvRWNg+v0weWS20fW2zjHQiMC38+6paxKfcUi3ukMTOMCT5ivWrmfh7Ls561C8gC6OmX+H+SvhhmNAZJy0LEXkEsAoBplRjZtn4X166xof0qbW/et+HtNyGVWoI7jTRUEclsWGaijvmovjiOwlqkig1AgQuY5Dz1Mwg8JPTNtcjq8Zx0dfqvUED9wCtgGVE2DLVohEIpenUqnf8g3Dzwc+nLKlHZ04Z8gXWKH+JRURMmmbzeuTSEsVoqDYJBxR8+41dTXXzv7d59V6Gj5SWreufnFz0yGAGnO3BRoC30Epng/CZAnegXnzYMaMgvNramrvCYVCSCn/ZhjGkoPOkn32GQVv/QWEYzPwCFejGV2C6a0e5D++bajOizsu9oV1cYSreOXNWWxeOZ2jf2DTY7SWmIYjLGFYV5AZTTrp6ciMF084vU9qCXXqsCH7pWks+nSULqGDUlKlYmmnxObisLJZ2A4cWUmad0mr1Nx2QhwI2ONY9dq7G4RGqfdIJDI4lUp9zrcEhjCQUg4YeXm/9wsi8TESKO0QVSvgbLUaThH51ocW/3nVDeJ+PxH7CL1v375fSLlwxy7ddMd8AkXcbbhRWkpYsACefjp0eSqV/q0QgpAz0e+cINo3Ho+fM/PDxL3maJjzJIzo6cR1ExH+hheQ3YjiOgTCeGEdd59GKPxhyKxj/37PQz/cQ123AUfIl1/11NSBkMn+e2Tz48uD1vt1q7PHpLzKVH/Sp/uQYSYLAAAgAElEQVRqOISgoGAH4BCB+gqoP/p8986I4zePqo07sQjsdzIIIVaOGDGi50cffbQtl4XvUHBU+T37n9ft5NJ4ycMSFJGrOXW7urZ68ooH17+c98Rd/SMSiex+5ZWp2TssZ5Ksca2I3HXyzcCiRdhPPx29ur6+8WbDaD7iuHHjYm+//fYqoGK33jDvaQhlspJYHzp4Gkuww+TRZHK0m0AanoDYXhyxAdDhJC2+e7SQcnjlzffZv+/zgJYu5N2fn49HS0CEukOnM5vEIU2T1Fe/IiK2A0fQTeZpA/dzJ8Sx1YKSPbPfpmkenU6nn26p/30bkE6nGXv5yIoNNRu6jOk/duVjU5/aEjKbVpN9Ej2VSlWb6pirHQMNND0WbwK+/BKeeabw1zU1dVerucSWK/ntt99OSCk7mKZ54oIv7YcjIzHOOxH+eJnWmK7K5nL7gB7oEqzXAXQpLAJqn8gmKbXvbcYRH66YgOMvPC6ijGbhugCRi9yOmvMtA/mMD/GlEcSxeu1auphkrx7aFhz+asjNbxNMcGfAgQGV49w0xXunn376vg888EBmZyRyAGdarxqo/opnefKK5sbCARl7zTVXrW+GKbQd4rSayJcvh1//Wtx+2GH/Mmtr669uq1VTCIFt2/+46KKLwqZpPnHPP8DcXali+jU6OqG5FnWd6IISOCipwUlLS8fnvi044rs6hQB36ks9JS+//jb7jsgGRYvn+/Z75YgrER/oOQVxICWvvPCSd2PsNuPQnGTg6Q1jtrccXwcOYI9T1EUVBQUFYy3LGvvAAw8ET0L8ToOP0C+//Bd1iUQbFs20KfVckBLWrIFf/9r405YtR4lMRl529NHHbBf+O++8085kMscNGjQobJqhd668A4zRcPXfyG63xd/wOR2G/J3MR6Can2fV1VXItuAwCj0fgXBmBwRJ22ZY5xdzOndwzb6eqD7FJHWvcKXnpuOQDjfareuC7cehuXnSV+DdaaaPtXc4Do+zthGHhFNuULfXhMPhexoaGt7bWaX49oCvROFw2Ni6VXztpZQS1q2DW24J/+3uuzMik7HPffrpPJOc2whCCD7//PNMJpPZ58QTTwybpvnOTfeCMQpOuR7S7n1oaOq01ASu24Ok1mlkNqxvLBiI61MVW4tDqsPXpY7QELzwrxuocmc5nOGDqyFo/drr3LrK6g0DcDt0JrsSThtqCARz5n7KHrvtABx6PTl+81cqRtsgNZw7GAdOfXpurcQBcMHv4dH/gBDi5XQ6fT7fU/ARdSwWs9esaSro9oOUajXYb35jPvKHP6REMpn+aWibtsu1Hv7xj39kLMva51e/+pUZjUaffeQ/EB0FfQ+DBWuzDa4TZg6Ba51IJ1TvqUt3/J3TJfAmcYiYhsPtlYJH/nodh40LyDERwBEAbywbfKJulhCaaHPTkEJgbXzCRyTbhCPwdN87O8ewFe0Bq/RVjzsQR740WsIBcOyv4J5HAXjPtu2mTq3/XoCP0KWUbNwod/g0gpTqDrLbbjOf7dr1ZjOVsk6JuLcNfENw00032clk8kjLssxYLHbxsrWCgVPA3AMu/SNY+omzWXrzq+U0PdZz/byfyP68cbn7reOI9lJThU4CjdLmlX9fwykHWtmpLjd96eMTnmrqe5Ibh0hX8HDIbHwp+fP9f2HMbjsAh8zG0eujIgpb31ff3SbCwy/teBy+MK0phwk9psC/XgTghXQ6PfabWOn2bYKvdKlUijFjYvKII3YMrUsJiQT88Y/ixV13HX7Ixx9/vNMYOCzLoqioqFc6nf5PJpMZAmoL5m+nwplHgkhp6rVLlFJTy3HcNdXRm3YLPP2R1LenyZYfg4wPBcPgH489w5QxH1LiHBrhMhh3kYvU390k8/RPN6+eX/HBULSXT02RCN5+fw7jev7bGT5sJ448AVxvCdgmlO+rTrIZ3A/m/Uut8d+ROFqqKwksWAeDpngpXAzk2fLy/QOfRI9EImzcSHVTgVsLLoHfdVfkzZdeGhGtr5eTdyYiBzBNk8bGxmWZTGZoJpMRkUjkyLoGtpxzHZjDofIHcOfTkAxp0iQgmfNJk2DHkzIbVl+S6wqQOqMfDzz4D5bOuZoTx39IaUwLQzZN9PeAVqDj9i37dd0KBoLQbvwAZs7+kL26/1sdtLAjcDgvMk98gdo2WvOG2hX32WJ1LdSDL2t1ugNwNFeOjAmjT1VELoSwge78jxA55OGVvXqVvHH66bXj8wVuCaSEZBLuucd4p6Rklx8sXfpl4ptW0bcHbNvm3HPPDT366KOnNzQ03GrbdgWoe8qOnQy/+Rl0KQNhZ1V6XWBLzSEo2YPuoNxsnGOZyNUgdEMT4NMevG+Rxa3G3Nq3m8cuNyNl2hsiPPDAw5w2eRHGjsThSlftO185pIQ6G3pMUuvHQyY89Uc4dI8dh0MvR8aAU6+Cx6erb9M0H6+rqzshHm9pJ9b3C3IIXQgeuOYaTm/LkEVKdfXvvfcaswoKOu+7cuXK1Hd9ikJdJX2Zcddddx1gWdbvpJS7SSkNgPISmHomnHEUVMQ0ws9H4A74CB78okdo36KNfjkZz4Oj6/WA4MtVa1j8wZ/44R5yx+IQ+aO2hGPmFzDpx+qsOiHgrB/BHVMhFpDmbcZhwKL1cMjZsHi58jIM49OysrLR1dXVqTwl+t5Dvma85JpruKM1dCqlutjv3ntD8/bZZ/Iezz3339T30ahhOUsFx48fXzRnzpyrLcs6K51Ol4GqwHAYLjwFzv4R9N0FhLYt15VOQYkDWYYAmsTSpHpweK9Lt3ydX9cqhBPGwmTaJ+Owt7zBYRNQKsQOxqFv2fUkLK3HMXMh/OgiWLspW0fH/AB+eQ6M6KsYKZaG02WaLr4QbE3Cv16DW+9VxO3mwTTNF4cPH37orFmzUl/3DM/ODDlU2bNnjwNPP3359OboVUq1TPbee8WC4cPHj3z11TcSTYf+/kEqpYTCoEGDem3atOlnmzdvPlkIUSal9C4xGdoPzjoRDtgH+nUG6Rj3vGp1O6mumrruoFFFVl0NvudIvODT9dYYihf+28YhQYaVNH/tA/jbMzDtTWhMeinYgKFrSO41TKBWsW2py6c5CdswjI9t2/6Zbdtvfh8Fz7ZATi107dp12Nlnr5pr5zG8uxV+771iWU1NdHgikQie//o/C6lUiiOPPDL2zjvvHNHQ0HBmKpXaDzDU5hEVprwYJoxRxyXvPxaqOoBIgLTViTP6OBQ0wtGIyxujBhkEeWhQYyY+yfoN45Bhpfm9OQ/++za88TZ8vNBXfTZAKBRaLaV8aMSIEY/Onj17weTJk3u98cYbU4DD0+n0KCFEmW1ne6ZhGJni4uLlNTU1bxYVFT0zduzYadOnT7f/lyV3U5BD6Mccc3TF8OFPbQpubDEMuP9+Y7Vtdx6+fPnyje2V2TxkMhls2+Y3v/lN0S233DJOCHFKQ0PDMUKIkHPMsjc4EgJ67QJDB8HE0TB8GOw7EIywGgbINAgjIDFd0AnUeXrB8hEgeaRvS5AHB246BsiISm/xSpj1Ebz3Obz3IXy6BJIpv8Q1DAMpJeFw+J3CwsInGxoanquvr19mmqbdnPRNJpMIIdzNHEgpSafTRKNtOQHlfxfy1uwNNyBtWzWQacLf/iaqk8mqoV99tXz1d8mKvrNBIpHArb9DDjmkYv78+bstW7bskA4dOuy/efPmUbZth4QQhtQpgyyRdiyHDqVQ1RH69YTBvaCyI/ToAt2roLIQYgVgh1UkIQFLjXGlM861bXVilfezIO08k2lFmO6vpg42VENjApathM+XwcqVsKVWXc7o3aOuq+VungXYtkAIcX84HJ5RVFQ0b9OmTYuTSaWbtxPoNws5hC6lNC67TKRLSjAeflhsWbeuYPjWrXXL28c6Xy+kUikikQiJRMKYP38+++yzTyVw7F57NdzlHirprtRz76ZzDabut+7nQlPNFpyG0iHfrEFrwJ1efeIJsWXduvhxmzZVvxyLxb71AxvaIQ+hJ5NJo3v3+Iba2sjIxsbGdgL/hsG2bcrLy0ftumvNzClTaPlE3m8Z3E09L70kEh99FL72mGOOuf2hhx7Zafdx/69CXip2pUs7fHMgpaSwsLBHt26Nn510kiyy7TaOo79BkFJpDnPnYj//vPFEaWnlj9etW5doFwo7L7S3zLcMqVSKHj16VIbD6z8780y7U77Zjp0J1q2DRx/l40ikw482bNi4uJ24vxvQ3krfIlRVVcUymQ1zLrjAHrQj6KXN1vRWpplKwTPPGFuWLg39tL6+/ukdfTtJO3z90E7o3wJcf/11xm233fjS+edb+8dibSdOdzZECFizRthLl8IXX8jqXXah8oADtj9/UkIoBC+/LJgzp+CGjz766Pq+ffvZ7ePu7y60E/o3CLZtE41GHz7nnPTJFRXNE7grnSMR2LJF2PPnS2PhQiNTV1fw6po1Dc/H47E3x48fP+8///kv++03oaCiYubWkSO3T6IbBsyfD9Omhad16NDtxwsXLlrfvl7i+wHthP4NwNKlS42BAwfcdNJJ6V/2cO7B0aewTFOpx4sXGyxeLBJffml9XlsbmhYKhZ/ZfffdF7/22mt1oVDITiaTOfPPoVCo80knZdb07t32fLnTZzU18PjjLGtsLD168+bNH7ZL7u8ftBP61whSSkpLiy846KC6uwYNgnQa1qwRfP65TCxZYqxuaAg9Zxix/5SWlny6YsXK9alUypBS2q1dTBKPx0ecdVbjR+Xlbc2X2qvw7LOiduHC8PmNjY2PZDKZ9kUs32NoJ/SvCSZOnFj01ltvfRKPizdNs/Clurq6af/852O1hx56uB0Oh+1UKrXNhOVcvnDwFVfwfGuTcDWImTPJvPGGceegQUN/9eGHHyXapff/BrQT+tcI+VTt7QUpJZFI+Jwrrsjc2xoaNQxYvFjw9NPhV0eOHHP0m2++taV9Sux/D9pb/DsEUkqKigpvnTq1YWpLy1NrauCpp0LLVq/mhGQy+Z65Q2/maIfvGrQT+ncIQiH+efXVHNvc+vT//EdkFiyIX1hTU/snIQTtBN4O0E7o3xkoLOTdqVPZK3ikcTgMM2fCK6+Y9w8Y0P/ijz/+NNFO3O0QhHZC38khnU7TuXN4yYUX0geyknvlSvj3v0OzamvNExKJxNL2cXc7NAftvWMnhgkTxhsrV761+ZRTZAmoI7Qff1xsXLs2ekJ9ff3L7ZK7HdrhOw577713aNQo0XjttcixY43GsrKSS4888oiQvbPvemmHdmiH1sFBBx1UUFkp0vF46OHu3bsXuIdRtkM7bCu0q+7t0A7tsNOBlBIpJay6O2bXfhATxbt3lrUfVZJYH5HJDQlK9lhPzafV1sbZKbPHaRkiZanQ4JtsKSXti8Da4X8V2gV6O7RDO3yjoIS1hViwXwWy/lgy1QZDl94jjKxt2fr8xDKSK76QkZ6dRLQHmGVqD1bDl9i1nyOTG9V+DVuCLZG2jcw0LscoHhs7+NPVwmjfdNkO/3vQ3uvboR3a4WsH204jNj8Na2/bi/n73CWM2CgpTEPIEBSMHqhu3FIgrTT2B4NCRLqWiYIBiKJRSBGG+sXI1EbIbFWnlzvnkkopkZmGtcQ6D49Oen9LPmEupaTmjmhEhKN9ZGJryqwcmAr1nrA2Nv7ujES0j+rb4XsB7QK9HdqhHb5WkMsvibBg35tkqPwSCoaHhFkGVi2kVoBM2TKzcZDAXOBFEBLSNbYMd7GFdIyIqWqo+xzZuAKshCPMUU87XSftxsnxSbO36KP8IBhF1hgRDr9BYYUhrbWkFz5Gav6jtmxIPVd9feSUsqsSdYbZLtjb4bsL7b23HdqhHb42kKtuqKTxi0UU7vlzUXZESJQeAtF+IFMImQBhIOy6kxBZViSMCMR620AddgMyU49MrkY2LodMgyPE1Z3ZUkrsdM2FscM2zGtOmAt1U2gvpFSXnjoKgZAYIhY+wig1t9bcXjBeWu0L09rhuwvtAr0d2qEdvhaQVtqg4YMumMXdiA+F6AAQMcish8xGkBkAAxmaJD8ZuZu009nIHQ5NYKeXyvQWZHozJNch01uQtqVM7KBG6HbqQ7PzIY8bLcyZSytjiEjHkWCq+0Bdcz1KORCGAMN+csvv/5+9M4+Tojj//7u6e8492ItlgeVGUFRuREVFURDjEeMVlXgQj6hJ1Jho1CRqjMYY81MTE6Pxil9jjPGIGo/EW1GieOEFgoCAy7ns7uw1Z3fV74+emZ2ZndldjgXEevMaZrq7uqurZ7Y//Tz11FP9B/feFdFoehct6BqNpncQoHy7VWOWocw+IExQEZTThFIxVNLCVsIqRYkbVOO/0/cjc9jPo9jhhcQbIO6KOU4coWTa1a6klDLecqN3n/u6Navji/7sJRE7COWk3fUq9Y5KTSVYpaLRs5S0e/WyaDS9hRZ0jUbTKygMhIpZGEEwfCgEStmgEqAkIpUPTUnAOpaVl30jJaZSJiDe+JyKN0ii68FuAyXdSPeUde1EQtbgU19MHqDweThxEm9fsxeGnEjG/qnPosNiN4QRP0yhI+Q0X030D1ej0fQSDsjIRlTCEEq6Y2SFiRAWApF0m6vklGACJUoekO/uOVI5MQQSqo/7D/H6TSq2DmW3g5SZYixRfOSZ+KdGYXi7Pg1hGErJXyAsIyuYLiNKHuWeLr6aB1GJro+n0eykaEHXaDS9gmF4JUVTVygVW4psASQIP8oogZQIp0UV3MHmxjNyxVVeMDBH/S5OouX/VLwR7PZk+eScoEoY4PsMlejyHqakTeudFSdhGke5Fn7SzZ8WdTpEXcq1CvmEYfp0PkbNVxIt6BqNptdQVWe1CKf5AeKrwWkF4UOYlSCCoES2C10plAiMUvX/ett554BilJQ46jbijVHsMCqV91glI9wT4ajqYkYy5di03TdwlPAY96BUHuu8ox9dADiBf5Rd9MXa3r8qGk3voAVdo9H0GgKkKj/htyr80VpiS0HFUVYFyioHzPTwMVLCrBTKCI5X8dD/7NeHBa1DPl+NE/sLTkQinSxBFsKyCqW6VI5N61+rJyPiH2CYQfJY5ZkvGVVRo3KvG4ROnqn5CqMFXaPR9BrCMBHlx8eF0/JN2uZHRfRTd73Z1x3ClnZ5J9/BFXnh20sZxWsSL9TuJQaccrWKN7YhE6hUlLuUEmEPV8LTyT2ulEPbfWUXC9N4B2EG03nhs14ZUe5KIXB+V3rOm5uE6dmu10ej2Zbox1GNRtPrKOnAZ9P2x6p+E/8olPJAZAkqtg7lJBDJfOxuXnYFjkwvE29+XCnzbYT/RiWlu11KVDy81qqYNNB7yLOAq/Htf+23uzCdFzCsWqREOcmyyX3IqUdJCTH5mXfM0eOCRz8UF4ZvB18pjWbL0YKu0Wi2C0pK+Gzf3ZUoehOjokLZEUiEUE48j9hKcJLvyhX4tNgnBVo5dhRbHGpUTVng1L1yJoZ5oxJmRa6Auw8HGZO4ZIp7wokjvePKLmv5rKtMcxrNVwEt6BqNZruhZAK19FteFVvzGPiPwrGzLOgOwc2wrJ3OlrUryBLlSInCUDkPBFnHzGulK5Tj2Coqv1l+RfxZoYeea3YBtKBrNJrtjlI28p3d98Eo+rdSZnUn6zxlWSsFOYKc+55P7LO2OTKZiCajrC3jMmofUX555GVhdjOOXaP5iqAFXaPR7DCkjCHfHHIcVt8HlBLBfKLc0aeeabkXFvB8+7qpYpPHiSdWyLAxtfzn0U3C0LdAza6D9jNpNJodhmH4MKatfVzUzC0i3nwO0g5nDy1LRr2Tek9tIytqPXP4W+fhaSTnTFdtyvEfbfbdbUTFVTEt5ppdDv2L1mg0Ow1O6F2cBUeegFF6j8Iozeozd7Ld8koqRCpALm2dp7ZluO3j9nxU/LSSH8ZXGLqvXLMLowVdo9HsdChpG/H/DjsIzIcUVk0quE2lI97zu9yVVCjbtoknnlVSXlF6QfsipRwMPb5c8zVAC7pGo9mpSCaC8QohKhqi9sQNbUseHvr+GcVGtKFzcFsqYt2WqFjwZ2ZA/iE454s2PQRN83VEC7pGo9lpkFIihLAUlCqlhn7SHn0o6DFGBQwDU0bw1P0Lz5rnMTdtRDb2J/z+TFpfmEJirSIRS7xVOqt0+rDnhnU7P7pGsyuiBV2j0ew0KKUMpVQxULu4Pfory2McFxQCS7i3qlQ8nIPCWesQvLeN6LsRooujJDYkSDiJe8e1jDvL8Om+cs3XDy3oGo1mp0ApZQBepdSA8PPtMzYlnDvF4QHDg0BkCDpKIZXCASJK0b4qTuk1TdhvhknUJ4gn4udPjEy8w/BoUdd8vdCCrtFodjipfnOgwql3xjTe2viAb7RvgFltEp3gwa40QIi0oKcsdVspwlLS6Disj9sE7muh//WNcSPuTBsbmvCuYWlR13x90IKu0Wh2OFJKQwhRqhw1uOH6hpvMSnOWNcjCCBoIU6AMaB9n4RSLjplWcR8EEkBYShoch/W2zaaETWuj3XjAY/ER+/10YMg0dYCc5uuBtaNPQKPRfL1Jutr9KKqa720+BoNZZrWJ4TfSqa+EhOIPbFfY9zRJ9BGk7BELCAhBuWm6rnilcCqpeOMY+Zj1TMMRgA6S03wt0P4ojUazw5BSopSyFJRubIlPjGxK/NQzxINRbLhKnfQhuk52EA4UfWRTNs/G96WDEgBu0FzQMKgwTfpaFuWmiae/Z8arAxLXO1Lq+5zma4F2uWs0mh2Ccn3nhlKqFMWARZHoc0WGMbjitQSmJRBmxu1JkXW3Su6LkgrHgtbdLaKVBglH0iIl9bbtut8dR7I89u3Lpg583DQMuT3bp9Fsb7SgazSaHULK1a6UqlnaEv2FGTDODAgDCwh+aGO2KBBJ8RYkQ9zdd5VQqJjCaXNwWpKvNgfbUWycaLHqQC8NXknIdmiO2/GaNWqP8yf1X2HpIDnNLowWdI1Gs91JJpDxKkXV2qbYYS0BdV+JaRgeIUh60TE3SAJLnA5Rl6DiCtkmsTfZ2Ots4nVxEusT2I02TquDjEuknXxFHGzp0DTSZNXBvkV7h6xJU+7YLWpqUdfsouigOI1Gs13JcLUHZVxVNZvyxiLTNCyRHG+uXP+6XW3S0tfA/5mNtVYi2yXOeof4yjjxZXHiq+MkNiZwWh2cmIOyFVK5c6grkp9RlLxrs+e70TFxy/79hrEl39vBzddoeg1toWs0mu1KMhucH6j54pP2X6jR3jODQrjWRU5GuHQCGVtivhzFf18b0S9iJNYkXDd73EE6rnDn/pNkr5dCErfiZxzcdvD/6Uxyml0RLegajWa7IaUEsASiquXx1gMi88IPmccXWXI/f/pmlCnmCaVol5KQ49DkODRJCSsSjPxhE0VvxZCyeyHPXCeFbHMCztTpDdMXGQEt6ppdC+1y12g024Vkv7mBIihjsiL8Uvv13jFey9OoUB8kiE1ITnGqFBKIKUWblDQ5Do2OQ6vjEFWKxBCDd5+qxBZAu6T2vjYG39GKtc5OD29LkRL0jBXFhPnXqhtW7QHoqHfNLoW20DUazXYhaZ37ger6y+t/bASNC727ezHKDYQlwBRExntIBCGqFK1JIQ85Dm1SElMKG3CSgp/1rsA2QEpF2ZtRBv2jlZqn2xBhiQ2oHKs9IRJ/OdQ59HvC0LdAza6D/jVrNJpeJ8PVXtb2XNvktqfb/u2f5LesGgvhd4PhlFSoqCKqJGtmeGhUrpi3S0k82Zfu0NGvXug99VkqcAQkTPBssKl4P8LAJ1vp91IbnuYEMeGccmjisH8Ypna9a3YNtKBrNJpeRynlutrjcsCGH2x42DfGN94zxINRYoAJSJBRibPRIb44TvSDKM2mZNGfSolUCjedKxnijWuZp630jHW5ZdLbM8srRRzCpZ9Ep/2/WcMXGoYWdc1XH/0r1mg0vUpymJoFquyLDdFz4j8sHW9Wm4gi4d6BJKiowtmQFPP3o8SXxvG+HmPcqI1MHbSB/neHwUnOsqaSs7NBxwxsZGSPS0XKp5bdmdzSwXap/UxBMDTC89BP/7OiePtdDY2m99AWukaj6TWSrnYDRFnrytjYtRXypZKAaXiFwLfYxtwoUVGFXW8T/zROdGGU+JI4ziYHFVPp7HDp3u+Aou5UP19cVEx4gImUIClgqZPHis+19AXEG+2/3b/n4NO0la75qqMFXaPR9BpJ6zioYqrm89bow4E+1mSfEJhJK1raCvM/EZy3okTfS4p5vSvmKQtbIAoOScOSNEzz8sWZxdTv5yNaZaDsbkScbMG3BdKpi33vgX2G32vofO+arzBa0DUaTa8hpbRAVHzRGPmuLDauLzKS6V2F2y8el5JWKWkKO5SesQnvi1Fk0jJXqCwx736suUruI1FC0ryXh/qJPhom+Wgc46VliIdIqYFU7qQu0kn2twuIJqS9W53c45fThiwzdZCc5iuKFnSNRtMrJDPCFbdH5fA6lfhfsWX4fYaR6jYnrhRtjkMomTgmLCVxqeh/dQvVt7e7IerQraB3lUgmv+hL9wyEWypWaRAa6aWxUrw7vax02tgHx+v50zVfSbSgazSabU7SXe5FUbX2nLXXe4Z7z+SsEoxKEwUkkkljQo5Dc87QNOm6wfF/nGD02SG8S+0C8ty9mBcW9vz72QH710c2H/kz02vu2Auo0WwBWtA1Gs02JR3VLikN3RPaP/JO5F/+yX7L09+DKhWEJ3ho9ylCSeu8XUoSSmEX6ON2BPR5Mcpul4bwrHI2yyrvlM+9u2UhZbwyPv3Y9ce+YehZ2TRfMbSgazSabYqUEoEIKkdVrTt73ZO+PXzjvSO8iCKBDEvsVTbtX8RYfV6Q5kGCeGYGOKVwhMifNEYpHEPgXZFgxG+aqXomjIp2L9I9tdRTnx3TWa36qinHrD5mo+HVoq756qAFXaPRbDNSrnYlVdmGizacbfiM6317+TCrTFRCkfgyQeyjGIfSqnsAACAASURBVNEPoiRW2jhSUndLH0LHBrKHnymFFIUTykgFDu46/4oEQ+5upv/jbRhNDlJtvQs+EUg8fmzrscdrK13zVUILukaj2WZIKQ2BKG77b9vwlr+3vOKf5C/zDPKAAfZ6m9iHMWILY8SXx3FCDiSSOwpFy+E+Vt7ah1i1kSXYnfK2d/UO2AKCn8epfbyFmudbKfo8DjGJsxl96UooYp7Y909sP/F24dG3Sc1XA/1L1Wg024S0da4oW74yfE/Jy7Gj/D4Do8jAqXeILY4RfTdKfFkcp9EVc6XcoWnQMUxNmIp1ZwdZeUUxiRKBo5LJYyict73L8eZK4ShI+AXKVhR/HKXfgjYq3w9TuiRKcE0cM2QDMv1gIFHYwo46fjnhhPbjPjMNHSSn2fnRgq7RaLYJyWFqpWtXRGbH+psPBDyG5YmC+VyY+DsxYh9EiS+O4zQ4yLhEqI7bT0rMU5/TGJJNR3j5/IpSWsdYbtIYkZGXPemWz5vHnZz87eQJuEuWkTLpERDgGJCwhDtLW31sxRnPRff89iV7Rj0e7X7X7NxoQddoNFtNMiOcPxGWVV9G48/5Ssy9vEIggbCUtDQ4+M6px3gpiop3JI5JkWul5+vnVkicYlh3QoDlF5TQOsJC2XlSv/ZkAhfyCHu+bYCoi97+zLRR3zeFFnTNzo0WdI1Gs1UkxdwCSld/EfmuGmTd5E/eWqLJTHAtUhKVkkRcUX5VM31ub0PRWcjTx+ymrzu1jCFp291D3bFBVh5XTOsIDyIqXTe96LqvPWXRp5fpHFUvgZgBA14KHfnAmeOeNXW+d81OjBZ0jUazVUgpDSGEP7w+PnxdUP7X7zcGmEIQU4pWx6FNSsKpcebJ/mzHAP9rUQae34RVJ9O2eqbAb8lYc4lyM8B5FG0jLDZMC7B+/yAbpwRoqzRRAqQtkWS46gtE12e68eMRp+4bT7dO+slPxm+0dGpYzU6KFnSNRrNVKKUspVTFujPWXSFPLLrYODxIAkW7lLQ5DhGlSKSC2PIFrtmKir+2U3t1K6LddcRvrpD3xJpPLYFCehXxUkF7lUloTx/1w/1s3DNA4ygf7VUWMZ9ACpAyOUROINWq8OPXrvF/e/qpI/QELpqdEi3oGo1mi1FKGSiC7S+2D2+6rekD7x5eQ9mKtkkeGo/xEyWZBQ5XvGWGsOedDc2Bvve2M+imVoxNKSnetulde1YGt16hUCr1ECBlhMgZp4fP/Jsen67ZGdGCrtFotohUilclVdmXh395l6fWcywesOtsEssS2OsShA/30/D7CpwS17WdlQkuX1a4tKsbHBOK34ox/Oomit+J46h8sr3lmeK2ZD/HchrNAeaUk5aetMLwa1HX7FxoQddoNFuEUspAElzznTUH2KvtZ8wq03CaHOzVNna9jYookMn+8MEmG/9YQdshXnfaUjqs9NwgtU7DzuiYsKX/31oZfHszgaWJToliNie969bsl/AmXj8tctp0Q/ela3YytKBrNJotQkllKEd5l++2/H9mH3O8shVOo4MMSVSsQ8xTuAFvivBhPupuKyM+wMSROa73fNHo5EafuzOrOigqXwwz7M4myt8Mp4ewbWsXfKf1QhEPxC89vfn035kenXBGs/OgBV2j0WwRUirjw1D4Quu5yC3eixsgAjIu3XSuSR1XdIwrzx2WJgxoPdTHqt/1ITLYdAPQRLalnjfqnDx98ApsC0SbpO9LbQx6LETfV9oRMdnJVb9lwXTZ6xzhxJ0iZ8qZoTM/0v3pmp0FLegajWaL+Oizliqnv/mhtBggBXh+2ojvj62QJwNcPmHvWAcCRXR3k1WXlbDheB8q0ZEwpqvMb4XGjrvL4AiFbYK10aFscZj+/21h4EvNFK2O4cjUsbPj6ntqydvCXjT7hdl71hxcgxZ1zc6AFnSNRrPZSCmNhZvC1xslxmWOVEbamk6A/+omgn9qBUdkudxTpIQ933JaML2S0IEeVlxSSuNULyqZmjVvZDw5edszl8kzVC4jN7xtuMtGu4NvU4LiFREqPglTsjpKyfoowVUx/JtiCKlASvelXEGXQhEzEtedvunMX/iKPL16vTWanqAFXaPRbBZSKpZ82T4yWineU4LSvCKrIPCXVkp/EUJEIDMrXMHjItPvnRzilqR9pMkX3yth7VFFxPqI7kW+0AQumW78fGXIce0r0pZ8rss/6ig5dEHz9Ge/M+kNnUVOs6PRgq7RaDaLNRuibDDtB4yg+E6H2OWKoMJJZmMTqxz6face70d2l8K+ucPI8ChaR1msPKmENUcX0TbAcqPmnQ5Xel5rPV90fVfvdOURgHhLfMVPFzp7nz53r7BHR75rdiBa0DUaTY+RtmL57zeOapsdWCKGWh1jy6GTCGYJonT7svvc0071r1owGiTk9K1vTh923n5toUBJhCkJjfay6pgS1k8L0ribj2iZgXLctK+OEGmxzyvedOGuz93Htfald1307pfGjDi/qiqos8hpdhha0DUaTY+RCcni8sUPGe3Gyc5xAaJ3VSG9osP1nUfQO7nAFUgH+t7aSs3v2xCtbg/6thgj3vW+EoEk4YPGcX7WjwtSPz5A41AfocFeIn1MEqYAR7lj5ZVCyuSUqqnzz02EA6AUcYHc7cO2w5/61vgXtZWu2VFoQddoND1CRiWfBj/d11Tma0IJr2tdQ/wHJYR/VY5j5bFyC/RRpzPFJbeVPRFl6FUhrDonLcPbIr3r5u2nkv347uOFEO4aWwjsIMRKDCKlBomAQSwgiPsMbA+gkkPZYomVh1ZW7X3IP2e0mV49Pl2z/dGCrtFoesSaC9dYoT+GnjGkMStzfVrYjwkQursS6RfZM5jR2R2fKejp/uykW96zyqb2Ty3U3N+KjOefea0307tuzYNBzIj98eR3T/5h34l9t/O3o9FoQddoND1AOpLF/RcfJerFk0KJLnzKCnuMRcN9VUT3sLLmG+92Ypas9+S0pgL8nyeovbOZ/o+2otryOdK3r2h3VaeDgwqqQ85rOe9V09JWumb7ogVds0U4jsPs2bO95eXl8qGHHrIdx0EIgcejx+Puinx5zJdWy9Mt7wgpxmcmhgEKJIwBLGg5o4j6X/XBCYq8AXQFx5DT2Yp3lMIxQDRL+r7QzuC/NlG6MIKMFZ4/vaeivTUPBrnrElZiWcVeFVPmLJwT2r7fkubrjhZ0zRYzdOhQ/7p16/ZNJBKXGoZxUGkpccexl9q293WvN/hKc3PLR6eeeuqm+++/3zZNU9q2jWVZO/q0NZuJdKSxuHTxSSIsHkRhQPbQs5SwZ5KZLEYAqljQcH4R639cguPLsdYzAs26EvZOKWBTQ+OEQsUU5f8LU/tUiH4vtmI12ChHbTNrfnP3iwVjt18YuvD7ui9dsz3Rgq7ZJiQSCb7//e8b99xzzyjT5EfDhnHC/vs7FUOGQEODkCtWKFauFNFQyLuivt5+ORAofs0wxMLDD59V97e//d2WUkoAr9e7o5uiycBxJLEvE9ZST2KNdVVTtXlPK4VuGykrvbO1LrJK4YX604Os+XEx0RojnSCmxxng6MJ9n8oCpxS2KZCOonh5lAEvNFPzSgtlSyJYDbYbANdNjvetcd1LQ9pOP+fwH6764cta1DXbCy3oml4hHo9jGAZlZWUV4XD4vJoa85x9900M3WMP8HjcDJoeDwgB9fXIVasEK1dirFvHZy0t5nyfr+iVlpbW988447QVd999b1QIgZRSW/jbGelIPlzR9hOj1roJCcQU5nmbMP/ZTldJYnKFPNM9n71OER8oWPnTEtafFMCxCo/5zjsOnK4Tv+SLqpephDA+gWNAcHWM0s8iVCxrp+zzMH2WRAhujOJtTiDCEmE7KJEcxkZnqz+1BhRKSHeqVx+0VrG0ftqgKbf885st2/I70WgKoQVds12IxWJ4PB5mzjzU//rrb5wcCNjnT57MPpMmKUpKwHFccQdQSQ+umTRsQiHB6tWKlSuFbGjwrqiri78VCBTPKyoqenfKlMmfPP74EzYgHcfB5/PtkPbtqix6vrE6Psn3nvCL2tTdQiqF44B1QwjfTc0ou/D+2a73zgKfa+WCJFprUHdOMXWnFBOtNFC2So9zzxoGR8+yu/VY+DulhXW3qYw6HdHx2X2gSX5AIVTqc3KDIVCh+F8/mrDHXD02XbM90IKu2WHYts2xxx5tvPDCSzMsy7lo993l7P32U1a/fmDbHQKfS67gNzfDunVCrlyJUVfHsg0bjE/8/uJXWltbF9x0028WXnTRJXHTNKXjONrC3wykVCxc0forc6B1JQpDJS98ephZUiiNl6IUn70JUS/zutwhf+Cce6zu3dpYkqapXr48qYS6Y4qJlwiIy46EL/TQas8ZE9+t5Z/vWBnr3XZ0fRN1wDY+aTpi0bFTXhSFftAazTZC/8I0Ow2O43D//X81Lrro4t1jsfBFgwfL7xx4IMHhw1WXAp9LpuArBe3tsH49rF4t5Nq13rply2KLgsHi12pq+r0xcuTIBU8//YwthJCO4+g+/CTSUXw5r6W6cYK53LCM4tSdQtFFP3dEUXRTCyW/a0Yp0eVkLN31Qxfu13aTviAkkQEm9Qf6WXVkKetnFJHwCYjKwnncMz5vkfWesS4rDFCpLp4+QbUllk54eeOUv//kYO161/QqWtA1Oy1SSqSUDBpUOyAUajq9oiL+/SlTGDB2rDIMo+v7aFekBD81OVYkAps2IVeuFKxcyfp164wVhuF9ubU18ubMmYctqKysbHvooX/YiUTCEEJIpdQuL/xSKj65+MtfyWsrr8QjDCE6vMnd9Ws7ABslVZc2UvTvKNgA2UFyWxJo1pOHAJBIj6J9oMmmCX7WHlDChslBWgb7SPgM132uMvrUlcKRsrNoQ2cXfEY7VVLU0z8/ITp+WKnPQqCUAtPAWNz0u4+OmPRT0zS2Wa73aDQKgGVZmKZJIpFIb/N6vUgpSSQSJONNCQQC26pqzU6KFnTNV4aUy3fAgAGljY2Ns4PB+BXjxrHXPvsoq6jIvYduqch31NHxWQj3FY9DQ4MbuFdXZ4RWrXJWChF82e8vntfS0vLuySef3Hj77bdHDcPE4/EQj8fx+/1b2dodh4xLPu3/aY3RZCwWSpTJOUXYt1eBKVCigGs6rxiCI0C1Syr/XytVf2lP522HzZ9dLXedQBAlSh11vMd7vM/7LGc57bQTJYqN3Wnu9bwkBVh4PJiBAGZxMcHdd6fP/vtTMm1/AgOHgukhYYBtKmzcG6dI9j0IpTAchRVz8IfiBDbFCdTHqFzaQuUXzfRZ3IqnORI1rOj0S1dftsD0dR/1nhJjIQSHHnqod8GCBRV+v3+fRCIxXik1LBKJjDVNs9o0zWLAsm3bAiwg1VlvA7bH47GllPFEItECLC0vL18ai8U+79u37+vt7e2r33jjjZbhw4dLwzCQUu7yD6q7OlrQNV9ZUgK/2267eevr6/dNJNp+PHq0PGz//VWwujpbnLdtve57SvClhKYm5BdfCFatIrxxo1nX1ma8HIupeZWVFe9OmDB+7bPP/iecukHv7Ml3ZFyyqHTRLWbMvDilhUoo1Cw/8XurccpEj4aY5S0jIfhGjIG/biH4Thwpu3KvZ1voi1nM4zzOfOYTIpQl1CJpDffp04fy8nL69u3L6NGjGT16NMXFxZSVlaVfAKFQiObmZkKhEG1tbSxZsoQlS5awceNGmpqaaG5uTh8zXQeCYorZm705mIMZwlAgFePeg4Qz/sRHtYfWTjn5sZPjlr8jliM1ImTEiBHFDQ0NY2Ox2Byl1FG2bQ8QQhhKKSO3nZZlUV5eTklJCcFgMP0qKSkhHo/T1tZGOBxOv0KhEK2trVnHSH3dQggDCHu93vdjsdiDZWVl/znnnHNW33jjjdLQc7x/pdCCrtllSGWr+/vfHzSuvPLK4evWrf3RkCHqpP32k1XDh2d5QnuN3IcI03Tra2pyXfqrVgm5dq2qi0T8r3o8xfMaGhoWfO975678059uDxuGIR3HwbbtHRatbydslo1YNlLWyfeEFKWdSyhUf4PwPX2JH+jr6K/O7Ium6znGO1K7uhZ85cPtDLi9mcCncaTssNxf4RXu4A6+4Iss8fb7/YwfP56TTjqJE088kZqaGgzDIDfoLFO4MrcppfIuq5wvTymFlJKNGzfyyCOP8PDDD/PBBx+kXd3ginw/+nEkRzKRid0+mESN6M++s+I7vx4yeAiVlZUDwuHwmfF4/PtATaZwezwexowZw1FHHcVhhx3Gvvvui8fjQQjRqZ25bU6dez7SgY1Ssn79el5++WWefvpp5s2bx4YNG7IfYISwhRCvAzfuvffery5cuDCuA/t2bvS3o9kqUje35I0g63HeNE08Ho8E90YTj8dTm9LllFLpGxUglVIUsgpmzz6cQCBgGYbhtW3ba1mWV0rpVUp6lVLetrZ27/z5/7MikYhB0v3o9Xq9JSUlfiAYCoV+VFbmHPCNb8CIEdv0Mmw2mYF7hgEtLciVKzFWrRJy40Zr7caNar7jWK95PNaCmTMP++yRRx5rS/WF9qaFX0aZ8SEf3tNO++kCN2d7yr2dPvf0siJ+hJ/mWyqwB5qFo8Pz9LXnGyfe3hTimVtvZt7td+IoJ13fkCFDOPfccznnnHOoqqpKr88Vr3xilk+AMgW8OwEstL2xsZG7776bO+64g5UrV6bXGxhMZzqzmY0PXydBB8K3cEvdWtaOSu9jGIwbN47zzjuPOXPmEAwGC57HtmpHvvKp4yxatIjbbruNhx9+mFAoK3tt1DTNv5SWlt60adOmOtPUCXN2NrSg7xqMB2pJCpjX6w16PB6/x2P5TdMKWpYZMAzD7zhOsK5unRfwGgaWaeK1LMPr81l+r9eyvF7T8vksIxJp9TpO3Gua+E2TzHevYeA3DLxCYHi9Ao8HLIv0e8Znmfp7NwxXuCwLTBMjVS65jGEoaRhuOcty3w0DwzQ7BA965kJPlcl8z12XcpXvzGQKvhDQ0gJr1wq5ahXGmjXUNTR43/f5SufV19e/ddNNv114ySU/Dm9tel2BMJ7hmZG7s/uSGDH3PMgeQ565nOXyBsKnBGn4VRmJaoGUrvWd5XLPE2CmpOQ/f/oT//7tb4mHw4Ab5HX88cdz7bXXsttuu7nH78aizhS2zRXn3O1bUseKFSu46qqreOSRR9LBaR48zGIWM5iRJeqrWMVTtU9x+eWX893vfhefz5eue0e3I7cMwJIlS7jmmmt49NFHcZz0g1bcMIx7J0yY8NN33323RVvuOwf6W9gFiMViPPnkk8a55547OBqN7msY6vCystiMIUPE4BEjYMgQhd/vju0u5HLWf49fLTIj9YWAcLhjaF5dnblx9Wr5iVKe1yzLemvmzEPfeuyxf7VJKckToW+k3wXG4vltjznXbjzKfC6aFUqWz0LPfe8orIju52HDNWW07etBOaQTw6TEvKW+nt+ffjrL3n47vduRRx7JbbfdxtChQ93DdPOjLOQ6z/ycK1ybKzxbUsfq1av50Y9+xL/+9a/0foMYxFzmUkQRtrA57PbDGHvGWAzTwDTNTq70naEdueUB6uvrufLKK7nvvvvS0fNCiPVKqTmxWOxlndhpx6Jv47swjuOglDIuuOAC3nhj3oBly5aPV8qe2b+/56iqqtjgUaOENWyYkkVFGCmxBy3uuwK5gh+JpCP1jS++UBtXr2ZpLMZ8pZgnBPNRIvriE2/t33fGns9IU3hFTGBe3oBxV2t6vFo+Kz1dX0FLHmSZoOGMIGsvKGJtaAW/PuF4Nn35JQBFRUXcc889nHjiiZ2s1EIUEp/cfbqyRLdHHUopHnvsMc4666x0QFoZZZzN2fStrOY7b86hpH8JlsfCsqx0HEChWIAd1Y5CdXz66accffTR6S4HIUTUNM0rzjvvvD/cdtttUlvt2x99xb+GpNKwKqWYN+9149vfPrnKcewxsVjkcI8nMnvQIGPU8OHSP2KEMkpK8rusNV9tcrsf4nFoaoKVKwUNoTIchrD3lAOYPHUa48OTqPopqIasvuAuE8dk1YUiRIi5zGUJSwCoqanhySefZMqUKcnz6NwXnLsutT63fE/KZG7b3nUAvPfeexx77LGsWbMGAM+okZx0273cvPcILJ8fr9eLx+PBNM1OAX47SzsKlWlqauL000/nmWeeSR2rTSl1ls/n+2csFkOz/dC3Zk0nEokEhmHw5z/fznXXXV9m24lRLS3Ns0pKjCP69rVHjRypqoYPV7K83I1fS3retNDvYPIZapnfSeqzSEb7S9nxHo+DbQsSNsSiitY2aGuFaAQkXmJxH+vW+wk1m5Q1FfOL+M/Zh33c4+W43zMxMLiRG7mP+wAoLS3lwQcf5Mgjj8w4r85C0dGm/OtT21L759un0PuOqgPg+eef55RTTqGpqQmAU089leuuu45gMEggEMDr9Xay1ne2dhRa39DQwJFHHsmCBQtSda0OBAKHhEKhFXp8+/ZB34I1PSYej2OaJlJKPvxwoTFz5izvwIEDhn/++dJZQthHDBpkjh0+3KkeNkwZffu6AW5qGyR7+SqyOeKaKq+UO0lNIuG+bNt9j0YgEnX7yVuaIZEcLCAkODZIGxIOlJVA33L3VVYMxX4oK4M+lVBdBn1L3JfP6x47FoW2MDSFvAQOfAWjdkK6XzR1ciL5nsqqlopQlw2Sossa8T0ehgR5+9vrqOM4jqMV19189tlnc+eddxbsK85Hoe09dTXvjHUopfjhD3/In//8Z5RSFBcX89hjjzF27FiCwWAnUe/uPHdUOwod48033+TII4+kpSWd6fY3QoifKaW2WZY8TX6+ZrdZTW9j27aRnAjFmDp1au3HH3802zDMw6uq4hNHjlSDR4zAqKlR+HwdFuL2FPtCQpt6pZbBFVfb7hDWSNQV00RSDFtboa3N7Z82lBtdZiiQjiuygQCUV0BVWVJQ+0CwGMpLoboCqiuhvBhK/eD1gvLiKqMNIjmeSySFPn0rzHOe+RtK1l93+oFBQjwB4Si0tENjMzS01dJv9jyMmuq8LldFF1OaKpBxKL61mdI/tCJaFAKDZ3iGH/NjwO0nf++99xg1alRBC3Jbkyk4mzZtYvHixSxevJhFixbR3t4OuOPZR40axZ577sno0aMZMGBA3j7s3jq/5cuXM2nSpLTw3XTTTZxxxhlZlnpX55Nqo1LuePm1a9fS1NSU9rD5fD769etHZWVl+rvsrbbl62Y45ZRTePjhhwEwDGNhIBCY1tbWFtZ9672HvrKa7YZt2yilME2TyZMn1C5duvzgcDh8+JAhnn37948PHzFCGbW1EAy6Yp8ptFK6r5QFG4tBNOq+x2KuqKZe7e2u0MZibtmAB8qCUNoHSoIwsAb6VUBFKZT4oU8plJRDRQlUFrsvfyDpmib5EKBwBdZJimtSMPNZ3Znk6Gr2+pTmiIyCyffUg47KXL8Vdbii7D6cROOuZd7YDBuaYF09rA0Jxr54K4N/NAf7lKCb1jR50MzJTArOYKYUEoE04f6zLueRh28HYPz48bz22muUlJRkndvmunO7EoHcYK758+dzzTXX8OKLLxbcpysmT57MVVddxVFHHZU+164Cxra0HUop2traOOSQQ3jvvfcAOOuss7j55pvTfeq5VrpSitbWVu69917++Mc/snz58m4t8MxznTVrFhdffDGzZs3Ka/1vaTvyHUMpxd133825554LgGEYmyzLmhoOh1foWQ97By3omu1OynUvhODwww8ve/vttyeHw+GJUsppPp9v90gkMpKcJDUi/Z8rctUVMH432G0UTN4dRgyG3QZD31IQBggbN7NJcr/c+1JmUFimIBYSx1yy9skQ38zjZS5nnH6PK9mWdaSs84QN0Rg0t8GmZli/Ceo2wrpNsKERrA17cP7/HkbMKMa+oy/0Nwta57mzlCEEd195JY/edhsAM2fO5Omnn85MHNRtn20hcejyOilFS0sLp59+Ok899VSP9+sJ+++/Pw8//DADBw7MOq9t2Y54PM5xxx2XDio777zzuPXWWzFNk1TyFtu2+c1vfsN1112XmaBpq6itreVvf/sbBx10UPpct6Qd+cpk7v/aa68xe/ZsYrEYQohGwzCmxGIxLeq9gBZ0zTYnFUVv2zbHH3+899lnn93H5/MdnEgkDrFtewxQg5sVrlNKONOAoQPggEkwbQrsPQKGDYTq0qR42cmCovCPt6eivLnkimauqOY9DwVKJC3lDIu7q3PvjTryCnrIFfKUoG9shFArtEThwFVnc/zqy3C8Ns45JcR+VY7jFdlpXpXCESI9t/jjt97KX66+GpRi+vTpvPjii+kHt54GW+Vz3Xbncj7jjDN44IEHClzRbcNhhx3GE088QTAY7JV2SCk54ogjeOGFFwC45ppr+NnPfgbA1VdfzY033piZ1GWbUl5ezlNPPcW0adMKPrB01Y580fC5x3j11VeZMWNGav/GYcOG7fHZZ59t1MFy2xYt6JrNJhaLYVkWSikeeOAB45JLLvF7PJ59GxoaZgKzlVIjpZTFmfsIXMvZY8HE3eGgqTBzPxg9FGr6gCHcIK9CruUsj2KGCzpTuLoSy9z9e1KGHKuYjGrJXJdRr8pZn/s5n5e9U9leqEMp1+WesCEWh9YwNDS7Ir62HtY3uJ+bWtxtkRjEbIvjVv6MQ9fPwcGGgCD23SLaf16OU4zbfy7AkZJlH3zA9w8+GIDhw4fz7rvvpidCAfKKQk+EoFBZgLVr1zJlyhTWrVuX59vsGiEEPp+PsWPHMmHCBAYPHkx5eTmWZRGLxWhsbGT58uV88MEHfPbZZyQSCfx+P2+//TZ777131nG2ph2Zn0OhEFOnTuXzzz/HMAz+7//+j5///OdZqWW3FtM08fv9DBs2jKqqKrxeL4lEgubmZvbff3/+8Ic/ZLVtc9rRleAD3H///cydOzd1HvNPPvnk6ffee6+tk9FspqToBAAAIABJREFUO8SwYcOGDxjQf/Wrr75mp8ZAajQpknOSG3PnzjVeeeWVAXV1dTO8Xu+3HceZKKWszuy7E4BpQVUfOGgKzJ4OB02EgZXgNdz+56zCZGtzejnHrQzktVzz7V/wmFtYR67wd9HtndXfndX/3c255tIbdSjlvmzHFfVw1LXSU6K+sQnqm6CxxV3fHnb72mMJiDsmM788nyPrLujo97cgfpCPpl+W0TYyzrkHHMDa5cvxer3Mnz+fiRMndtTfhSs3c3vWOXfRX62UYv369UyaNGmzxNzn8zF37lxmz56dzsPQXf94ap1hGLS3t/PII49w5ZVXMnbs2K1uRyapMh9++CFTpkzBtu1OZbaEPn36cNppp3HIIYcQCASyRzHkOce+ffsyefLkTue4LeIHlFJ897vf5f777wfANM3frFu37orq6uotbp8mGzFwIGtOO40Bn30m5EcficaGBu/ThuF/sLa2dsGCBe+0+f1+nfHna0AikcCyLJ5++mljzpw51aZpzmhqavq2YRgHK6WKVdYUjtCnGCbsAUfPgGMPhv59wUvSyoYs0UmTdA2jkpvzWMpZVmYX1npq/96uI1+gWpbKpsqQp1xqVWp97m65TxO9WIdS7jopXVGPJyAcSUa6t7ivhmbXQg+1Qms7tEVc93w04ZaP2zCgeTJnLv0t5ZEBuGFwgod4iGu5FnCDuu66667kNe3+vrE51m0KKSVz5szhH//4R7fHB3dylxtuuIE+ffrktTK7CrrLdx5FRUUcdNBBefuMN6cd+faVUvKDH/yAO+64o5OA9hTDMDjqqKM477zz0sfoLqgvtV1KyZgxYxg2bFje7d1Z4d11j2zatIm9996bDRs2IISwBw8e3H/FihWb9EQv2wYxeHDZS3PmhGb4fB0uRyHc1+rVsHAhdl2df8HGjdEHa2trn77gggvqLr30MqmU2unnddbkJzX/8plnnmk99dRTu7e3t39bSnk6UJvbr11eAvuOh29/A46eDn2CIBId+pLrJs5n0YqsD6RFt5DQFlzu5ToU0NgGQT/4rZx9cyzhzDoVna39zGN3Wg8d/d7bsY6UlZ6Kdo8lXMFui7gC3tzmvkJtrtC3haE94lrzkVjSWk8N23MMpqydwzHLL2F64lAaaaS4uJj333+fkSNHdit2+SKle7JeKTczWb9+/bq1Yi3L4tprr2XcuHEF3cWpOlLkE+Wsr0gpHMdh+vTplJaWZu2zJe3Ld/wVK1YwefJkQqEQRQG47mJ45D+wcLE7dLIrmR87diw33HBDt/UVOj9wh/MdfPDB6eXu2pGvjkL1Sim5+eabueyyywBkMBi8u7m5+XzLsvQY9W2AKCoK3HnmmZGzq6owunqgVsqdRSsUgk8+EXz8Mes3bBD/NE3rwVmzDv/oySefjALaZb8TkgqmqaqqGhMOh0+LxWLnAhWZZUwDRg2FOcfAibNgt0Ggohki0QMnTZa7N6kwuUamyrO+0747qg4JVJ9FU6ycf/79EfbfexV7DyNr6FfucTqJaM4hc+vPLJfpLhc55XujjlTBtKg7HcIeibnC3R5OCny4Q9Dbo0lRj7pu+Gg8abEnYO1GeO5/7jEPOuggXn311bz9ranP6XMuIALpdhSwDJVSfPrpp4wfPz6v+ziFz+fjzjvvpKqqqscPFz3tFlBKMXHiRAYMGLDF7cjdllvHrFmzeOmllzCE4qnb4YhJyWMLED5YsAgefhb+9TysWt9hjKXo378/3/rWtzjwwAMpLy9PP/x0dQ6ZAn7ggQdSVFS0Ve0o9CCwYcMGRo4cSXt7O6ZpLttjjz2mfvLJJ41othoxatRuP9hvv2W3DRmi2FwtVsqd3tG24fPP4eOPRfiLL4yXbdt8sLq63/MrVqxo1P3y25fUTW7y5Mmln3766Qm2bZ8vpZyYaXkbAibsDt/5FpxwmNvHTSr7mMh219aHy3j0ecURBzQzpJqOjbmubvJbj+ljZtxwsuogW3wyt233OkQp1PwIhYlA4ZgG/3zoUYZUfsj+e9Lhhs+lC3d5ykLOdYWnzgvyn2ev1qE6Xo7siH6PJ9yAuUjctdwjSSEPxzqs9JSox5L96/+dD/MWuse+5ZZbuPDCCzEMo0eWaCGrrjvRqK+vp3///l0K+vXXX8+4ceN67FLfHBzH4YADDkgH/W1pO1LL+cTv9ttv5wc/+AFCwA9Pg1svoiM3QOZ3njq0Fz5bDf94Dv7xNCxdnX3OpmkyceJEvvnNbzJx4sS8rvjUu2VZHHTQQfj9/h61o7suhdzrIqVk1qxZvPzyywCUlJQcuHHjxjf8fn/3F1/TJdYXX6xcOHgwJLtMNgsh3D45w4DRo2H0aBVUyjlKCOcow/iSiy7y8Omngupqa2FLi/VUZWXFQ2+88ebSoUOHSnf/zftD0nQmFWxSW1tbs2nTpnNN0zxHKVWbWaZfBZxxHJz5Tdh9EJAg604vksvpv9cMi7SqZhrnfX8qCaV47IlnCaq3OWJ/wMmxhFNCmSmYInWOGW7t3DpyVExlbEuf0/aoAxCBUShhJusSmFJx8knHEVff4i9338c39lvFwMo81nPyXDoeDDLqzSib2qRyziXrryD3PHujjuRxUi/TcF8eC/xeCNpuf3k84Yp2LN7xSlnosaSV/mw6j79g8ODBBd27qTKp5UJWbVdBZqn9KisrOeKII9LjtnMZMWIE48ePLygyhSzyfOTbFgwGuxTznrajUH0AQ4cOTe+34kvcrAyZv+fUYvJvgwTsXgPXfBeumetudyxY8Bn85Z/w+H8d3nnnHd555510XYMHD+aEE07gkEMOyXoI69OnDylx7Wk7cj0Y3d3bx40bx0svvYQQQtbW1k4UQszH9ZFptgIBjJowgXe+9S1Ku3jg3SpSN1vDcDN4LV8OH31krV+2zH61oqLi/ilT9nn5scces71er0ylO9TkJx6PEwqFmDFjRunixYsvBr7nOM6A1HYBjB0Fl54LRx8AJR7cP5OcJ3qReVPItHIzlsFC1FyIMvqk61eGwbPP/hd/4g0O3Yes5C2pt82qI9fSyDhWbp94r9ahgMozwD8io47sJ4hFS5ax+J0HOO5Q1RH8l+d4WfXnmMy5noRcOj2UdCqwbepQCmICTr4UhgyEq8+HPpbripdJq91xoC0Ox18CB0+G7x3ningiKfi2DedcBwuXujf0+fPnM3Xq1GQd2ZZo4XMpnOWsq23Lli1jypQpNDc3d9rn0EMP5cc//nG6q6kr13ZPLOjMZaUU++23HxUVWT1WW9yOfHUALFq0iLFjxyKlZPpEePEusNSWf+cAmLCxFf7xH7jpblhTn7150KBBnHjiifzyl7/Mcrf3tB2FyuR7ELjpppu44oorAKQQ4reRSOQKPXxt6zFuuOH69U1Nvk29mbQndZOS0k3ruffeMGeOXXPVVZx84YWNz+2zz39iF19c5Eyb5omUlpr/syzzwpqamgHnnnuOJaXs9ke0q+M4DrW1tV7TNL8TCAQWV1dXq08++aTZcZxfopwB++4Nz/4Fou+C8y588CDMORhKLdI3/7SRKjKsPDqsv9Q6kVFGePqB6fZBpl6GUhw1eyYHHHU1dz1Zy/rmjP22pI5UGZG9Ll0mc//erMMIgn84ApXhDRAIBQJ3upExo0Yw6/jLuetfRSRyjwlu2ZTgZp5z6rxE9rmL3HPKeDjJ3EcKPw++Pp7/vC2SGdm2rg6lYH07jDgCnnwF/vA36HcQzFvkdqF5LPB6YMUmGH0s/O9juOE+GHMitCfcyV9qKmFgXzdPfYrUDGK5lnhXr8zyueu72n/kyJG89tprnYRHKZU5KUgn8U6VydyWW2/muWSWlVKyzz77UFFRsc3aka8MQGNjY7pufxDMrfzOhQAk9CuCi46HL58F+R40vwX33eBmWayr+5Kbb76ZkpISLMviwAMP5NVXX00/GHXVjkLXM981SV3LDGydYGbbYFx44cUtGzYk1kejyO2tm6mbi2HAwIHwjW/gv+QSue/Pf+78/vzz168pLb0rcdRRpjNokPFlMGj9ORgM7rvXXnsFHcfptaxJOwOJRAKlFJWVlbWGYdxpmmZ7XV1dzHGcBxzH2X30UPjrDdC6ABILYP69MHtictgYHdc1y8BVHZ+h4+E+XSbnu1cKVHAc7lRaHdaJSt45fMLgnPO+x8r2b/Hie+7um11H5iqVZx3boB09rcM/BoGZbmdKmDuO796oSvx+5pz9I+57sgzHoKM/GjKeNrLPIe05yCiScolnnovKKJd2LigwA3sy55TjmX38tXwe+S5/eSJIU26082bUsWQDjP6Gm1AGoG8Z3Hkl7LdHx+/njcUw+SQ3+h1g+ED46zWw5xA3H35pERSXwMDajgeR+vr69PWDbDHtuNaqRw/omYKqsn5/HfuOHTuWDRs2MG3atKx9P/74YxobG9P75Apz5rFzXcWF3n0+HzNnzqRv376dzmtr2lHIa7By5cr0ct+qDiHPYmt+V7i/rRITzpgJSx5zjYHGN+GGS6BfpeTNN99kxowZeDwe+vbty/XXX59+0Mh85ba30HXJbPO6devSy7vtttuaWCymA622AYZpmkZ5ecWitjaxU1zQtJWioLgY9tlHGWedpWovvdQ+76c/Df/vuOM+aZ8711ITJljtlZXel4qKgqcPHTqkVinVZZDMzo5SCtu2CQQCewUCgf8KIZyGhoYvpZTn+n0ETzkCVrwAznuw+BH3j7A45xvLFL18y+Qsp8Qx648/vd1A+PfIWSeyrGKkYt8pExg+/lwefckEo/NNpOs6Ms5RFD7vrWtHD+pQIILjATtpn4usQm4AUtrOp8jj4aQzzuWBf5vpjPOZdXScYMc5ZV6X1M1ZZRfNstAz1xMcB8pBKMUeI4Zy7gWX0xK8gLueCBJOdDxQdFuHgk/XwtRvu1HsAP2r4E+XQ0kRfLLc3eeVj+Hwc9woeIC9RsDvLnb3/2KtO9rF5wG/Bw6Z0HFNX3nlFfda5FjCuZ8LueNzRSKzLzZf360QgmAwyLx58/j888/Za6+9AIhEItx8881Zw2rzCXeh/vPMZa/Xy3777ceMGTPIDdraVu3IXS+l5PXXX3fbCEyf4Ho3O3bcRr+rVDtSZRSU++DyU2HNs2C/D+88CjOmQijUwC9+8QsqKyvx+XycffbZrFmzJqsdqTYXiivIvN5vvfVWevuKFSveRfefbxPc34Dg4jlzuGXkyB19Oj0n82/IsqChAT76SMhPP1WrW1v9f1dKPHLqqacsuuOOO+M7c7+8UopgMDgmEoncA+ybWl9WAj85C358OviSN9aU5Z3Vl1zgPfWh0/bkh9RnkVGm45wA7yDoew4ChVICIVTypiFcazWjM1cpxeervmTx2/dwzIEqfcBu69ge7ehJHaIP1FyEG92eWi/SVnmn4CmlUMAbCz6gKPYEE0ZknEt3dWWcb+bKdBtS21KbjTJUvx+C8KQud0dZIXj19fmo5uc4ZFLy6yhQh1KwMQIjD3eHpQEMqIL/9yPXQyaEO/ph0AjY70S3Lx1gzHD45fc6Zr8L+GDWNPAkj1kfgeGHu1Hww4cPZ8GCBVnTdbrnUngYWFfvmftmUqgcuGJ477338pvf/IaqqiquvvrqvMcohJQSr9fL0KFDGTFiRNZsZ9urHS0tLUydOpUlS5YQ9MMn/4ahZdm/8a3+XdFZ1Av+dpMfmhLws1vhr/9yAyNT7T/xxBP5wx/+QHV1dcE2Zi5//vnnjB8/nkgkgs/nW3TppZdOve6669p69AVpukQkL/js2bN5Zt992Sms9K1FKVfkYzFYsgQWLhRtdXXiecex7h8+fNjrixYtDuX2mW1PHMdhyJAh1evWrbvJcZzvkLTzyorh+kvgrGPB65Dxl9rxx5xF5vqMv8LMsoX+cDsdM/dYpd+Akv1AyY6bWNoEUGnBg+RNDfjvS28wtPh5Rg8qcN7bux09qQNQwSlQfkxGi3DbKETyISb54JJ5LBQxJfnrHX/k3BMawM6oI+eO2emGmvE5fToZDy4ddQDBKVD+TZSS6btrx/fhHnhTcytP/P12zjoujHA61wcQBaadDu8vcpeDfvjzFW5Ue6p83IaLf+dmjAN3RrvbLst+SJISJu0FI/onT9ML3/4JPPqC+zu48847Ofvss7Mufe4NPXN9IZdtISEsZNUWqkMpd4rSTZs2EQqFiMVi6Wl8LcvC5/NRWlpKVVUVffr0SR9nc+rYlu1QSvHAAw8wd+5clFIcewg8disYiW38u8rYJ+vvrQd1ALRK+PXdcMtf3UBJcL0ZV1xxBZdddhmBQKDg9br88su56aabAAgEAte1trb+4uuSKc62bRBgGmbW9XGkY6CQW3sdRCwW4/LLLxs8b95tq44+WpLzu9xlSP0oDQPq6lxrfulS492WFvFIVVXVE599tnhZSUkpvfnDchwH0zSPA+4imdjFY8H3T4Vrf5DtQi/0xJy1PdckzdkOnZ/Mc8vkrUMK6H8ZwgyiMvOrZhXqfENSlsVf/vBbzjq2GUvsBO3oSR1KIKrOAN+wtAcisw73hpZhSWVa7obBI489x0F7vEm/kjyXKF1H4W0Zz2ydvQ0S6PtdlG+IW29m/enHD/eIceDeP93Cmd8METCz61ACLrsdbr7HXW8acM33YNTgjmtqGnDLg/DGh+6yzwv/72I3AC7XsvN74cjppCP9P1kD+5zkWm01NTWsWrUqPWVqIUs9Xx92+poU2NbdcXaFOsAdTlZXV4dlwnuPwt617oXfZr+rbrZtTh0KWNcKF1wH/365w7MzcuRIHnzwQaZMmZLVtjVr1rDHHnvQ1taGECJ8+eWX973mmmvCu3KE+8xLZhZ/uPqDvQJ+/88Mw5jt8/jcO71SRuqPSwhBwk7IuB1flHASfxo3dOy9z/36hfjmGp0CoLS0NFhe3vrl3LmqorsddnoU0IZr8wagK59Dpsg3N8PSpYKPPzbr1q0z/hMIBB+cM+fU12+77Y8Sti4DntfrLXYc589JaxyAcaPg7zfDHv0BmfF0TP4n7k4u1GRbM5+qRXJdlgarjj+8HtdhDYB+F6JUPNsiJNWHrjKOky00r775NtXms4wZvBO0o0d1eGHA1QjstKDnutpzSQmpAN5Z+BE0Psrk3TvXwf9n77zjrSjOxv+d3T3l9nvpcOmCoIiKCqKgomg0ViwYBVvs0QRLEjVq3l9i4puiKcZe8xqjRsVGYo8GEUQMghVQikiR3m6/55zd+f0xW2b37LlcOijP53M+u2d2Zp55ZmefMvPMM3qb3LIe3kIKTKjNpBFdbnIxeg/C7wMhfWFfn8nw+MN/4ZLTakOW+kdL4IBTgzYcPwwuPAUcO8D18Rdw6yNBMy48GY47NNQkHxwHBg+EXp1VumPAT++APz6qnl9yySXcf//9IUZeyOJtaW29tczsm4LDcRyuuOIK7r//fgB+OBbu+DGhd7l1xlVYUS5koW8KDilBmvCfD+Hc62GZ63BpWRY333wzN954I5Zlceyxx/pHxCaTyasaGxv/8k21zoddPbTTklVLnk+niob6XaZbFx5f0ztW6+zmbPPXGxo2nLvyqTVvWWbrtqEJgOuu+2nymWfue3vs2Nqh34gz5+sIu1ikgQSFB3sEpFRCPpuFpUthxgxRt2CBNaOpiUd79uz5wpVXXrF+3LirnI1N2w8ZMqTLjBkzns3lckMBLBOuvxhuvhRS2iEmIQtS04j1lyuk+zeiXmtyK1RGF3qbjKPiBCgdEtSVZxX6RfKs5eWrV/P+m3dx8uEyr03bnY7W4Cg+AFF1GlIGuyZCgkinXTvpRLj1fzLnCzZ8+TjD9tP6JGrVeMqICPeZPnJC7XITRPGBUDUKpIPXmoA+r389bwaV46tly5g56T5GHe7WY8Lx4+DVd9TfylL4y3XKAteZ9nV/gYVfq/seneH344I+85uk5S8pgu8cGmynkmllpX8wWz1/7LHHGDt2rN+fQR35gnBj17g6CgnRXRnHU089xdlnnw3AwD4w8zkwmoP3sFXGFYQOBQqlbyEOvb4NGbj8Vnj6JU8JF/Tu3Zv58+cDYJrmC4sWLTq1urqabxI40mHIlYMrV9Ysf6W0qHhotGMkLv+S8bqR/o15kMllVm9o2HBqfWPd5KZ/tXx+gQHw+9/fllm4sHZBU5PGPHdl0BU+ATQDtajwpq2gzxMilgU9esCpp8rSa6/NHn7TTdmHx46du2bhwmvsY44xs506GfNTKeu2ysrK/Q855JCklJJsNkt1dXVXIcQn77///tJcLje0vBT+dhtk/gu/uhjS7kvzrMi4l5inJmj5ieaXkTKa5rfJOEhA0Z5uP4hwRuGlqZ8ksn9XCIrLy1lfU+S/gx1GR2txFA9CSjtEh8eAfAEqRJiRiYD+mg31lLonv3sx33Ulw1Ms/Gh1Gu44ZorU2la0L9KdvhHCE+YyaJPXXoHfST2qq2kQ+7O2DqQDM76AN6YEeEaNgKKk+waFsrY/mhsIcyFg9Mj4ftX7saYelqzyOgtEE/zrXujWST2/4IILeOaZZ4Jujwg0/eov2USuer5CFnGckN0VcUyYMIFzzz0XUPv7X75fE+Zbe1yJgM+L6HUzcOjgcYeKBPzjF9A8A26+HCxT+sIcWJtKpS7p2LFjgVp2Tdj34n3S3cdUP9lM3ZrKitKhVtLASqifmTAwLIFpCkxD+I6ooI0XpCt7ZKh/k1ayXfvy9u90qOj4zwMu27+4pUOJDK9CKflk3bpvyNYBXaDrPdOEst4346hhnZlVVsJhh0nr8stl7xtuyP3kmmvWzzzqqKnNo0cLmUol6pcuXbpYSrlPRSm89ACsfxvOGYGKqiZ8OROq27cYReRj0T4mqae5Cf5HF9PezcKR7ARmWw2J9/ELTdnzmJreJm1jV4z2v93paA0OowqS1SGB7dHh0SqlpuRKEcKNYTB33lf07hrUH323XhNCOoWWIVRG7zejAlLdfSUmoEG9B78d7lV4DNtxOGTYEN7/2IAkPPisivoGaqvZkYODdU6kstRfnhy0p6wYhu6rNY6Ye9Sa+6y5BCFJhQpaMuNZ6FCl/EXOOussnnzyyVgBp3hO/D7mlhzcomWi5XdFHM8++yynnXYauVyONuUw83moLg8Lz602rgjnic2/iTi87yVahxRgOXDLRdA4Df58vfIZAto0NDSsKC8vf7hXr167dESZnJPjhSnPG52+1+7GrNFU26FT5VnJlGEkkgbez0qoazJpkkiaJFIGiZTpPhOYllpK9zio3+e+8qh+xeniE9fUr1k64JJ+3aPjzQMDIJvNGtXVXWatWrWNT1GRQAPKYi6k4m0NaGlJxmtD3Za3QRfyU6bA+PHgOBSnEir60tq34buDCIVe9X+axutNa3npeWmeRh3Rkn16PGauM365mTiK9gGc0IDykIRw4K7qug0TLoNqrKmlsrwR7B1MR2twpPcAkYylQ9Ung9kB9+pHgwNqGxqQdXMpKw4+OiJt0cnxx4sILvrPyyMAkd4TRMLH7T9zGy/0/8J7Gwp6VFezaKWaJXnxrYD8vXurgDCeUiRRh7B8Mi/IM3Sg1p5Q52mE4NEPi1dotAhom4Q5L8GAPZTgGjt2LFdeeWUoRoQu7Lz7ja076/kKldXL7Ao4HMfhmmuuYfTo0TiOQ/+e8MXL0C6F/xL8cbS1xlVMmS3FAflpOtlCgClh3BlQP00tOxoCo6Gh4cIvv/yyFhjDLgqdz+pw4o8fvGZDh3Ztby2rTFmJhIGVVL9E0iCRMkimDVeIGyS0+6Qn2FMqv2kJhEG48wjYsGkJSorSlTbO553P7nBQnFA3AEzTdLLZ3NerVomGbeqfIIBiF2stUONet7aAN9BGbgS/95Mu7iybDVLC2rXwm9/AxIkq7ZyTVTjF848BQ6NJRMr5lmKEaXqWl8+oZfDf+yj96TIRefcikrapOABRtJ8r1BQCgT6l7uEIT7N7eQFmfT6Pvj0CdX2H0NEaHBJE8b4Igk3+AR06lwtIV8Wkj3vS21M47KC60IyPxxj1csIr4Cko+aQEigYgHVQwGXdKJ5pXRJAIL5dUzwzLAqOETxbAuiAKKgfuFWwx8orPXhhY8ABDBqhY7egoPDNMa4iUat/6rHnKKU5vS1USPnwGLhil0u6991769evHkiVLCq5Je//j1pjj/FTiprAL1bmz4ZBS8vXXXzNgwADuuOMOAM4+AT5+DtqmgrEbrpMtH1f6+4zJu9k4dHQyQJEn8AUkJPzmMlg9GQarOEBJ4HHDMGZ36NChw8bOuN8ZwHYcyk8o7dN9bNev2pa1+WdJSaq0rDKpLG5XkCdTJom06QpsU/3XhbhmwSc04Z9IuILd5T9efzoO2DmJmTBIp5PpitKyt7uf23nfaMRUA9SBH4ceesjCtWtFTcy43vqQAMoIpuuaURbz1hLwgnjvdqn9vHyN7m9TUQh47z24805oaoI2FfDxBPjbzzWHN709GlpfQKF9ZAWee/fSzasLtqj27OeRbB6ORA+kWRbgkIEAE67pq3BoUbB8HBJpmXzx0WT6dt/BdLQGh5FGpnqHGJBHh/dPRh56TEoCXy5eQnbtJPbogm/xxkGoba5SETtb5rVLgjDLkKluwTMZPAsViKYL9b7UEoJkznx1KpoHvbqE+0UIWLpSU3pQwWb8mJEFFCkIaF5fC8tXh9sAYNrwyM3w+kNqz/v8+fPp3r07t9xyS8GwzdFp6ahTWlz+QnkKldvROGzb5re//S3V1dV8/vnnFKXgpfvh8VvA0mNPeOULtcu72ZRx5eXZ2jh0dIHm22KeqhRM+z94/i61FOQ4Tv+VK1cutizrxJbex44EKR2+/ke6d8O/un2+8ooOc2ecbnY/vBoq2qWwkgIrIQLBnMoX1smUEX4ezaNZ91bCwDA13iIU7800O5imIJVKFBcXpV8Z8IM922WywUduABQVFfHCCy+uXrYsV+Mz0m0NAihFeaBDwC3hr61VAAAgAElEQVQhX8C30pktBIXW0T21UU/LAfWtw+F9GH/7G7z2mkobeQis+A/s0zkw8HySNG1bN3R9i1izHIX+8WlMNu8nw/WEeKnQcG4qjuJ9EeTc/7og9xiBay16lruHw0XwyisTOergGkzZAo7tQcfGcEgQRfsAMo8OVU5xrqht7HnPL1u9hvf+/QinjJB5ywE6HZ5Fo5fX2x4HAqCoH0KfZtIuPg63cm+M+SChubkJy6hnvWadCwFF6aBPvf7cUBe0sbgIUqkwHq9vQqDRbJrw2TzCIW8J8By9H2yYCqcerdJ/8YtfUFJSwvjx48OKoQxvEYyb4s7rqxbyFHJS21E4AF588UXKysq48cYbATjpSFg/Fb57AIEPRBQHW2lchYfSVseRlx6Tz/v29LF1ysGwZCL0VOdFJoF/JhKJP2UymfwKdhBIabPy5eLuTZP2/Lx9dd/5qbKKPUUyQVmxRaoqHQjqpEkyZYSs8WTa/WnT68lkkN8T6rogV4JdKGc6y+NPwv9umxttTEtQXFTUxZbZv+pbqo2g0dLIZplRU7OdHeOSKGs9OgB0JuJ5qdegBH1rBLxHma4otITDZqPr6p4wv+cedQQswI2XwRt3K4sklA9XAGgKkmcdeVZP6L+WDopB+umagiA1hcGjyfsgQ/VsKg6SkO7j5hdBxSEcLgOWMmDgQqXPnrcAat6kbxcCibEj6GgNDkAWDwScPDqkwPca9+sVIFHP5i5czDsv38XZx+Xyj1DVcHj36DSJgAbd+NeZqEQgi/ZB4gV50gakpt3kD+UgNsD8eQvp263RP1gF1JbJVMyW1IbmAEMyofL51lyUjsh/D9ZtgBVrC49z04ZnfwOrJsMBe0FzczOjR4+msrKSJ598MrS+HhXycf+jz+Kc0nSICuPticNxHJ566ikqKysZNWoUjY2NDNgDVr0DL94GCd1RVoTedgTBlo6rbYtDn8XSiofSQg80ZbRtEr74F5x4hHqUy+WuLioqevzoo4/eoZFLpZ1l9Zvdjm/+76A1bTsP+CpZXrWnSCcQqQQkLf5bn2JWNul7sntr4coBzggL71T+z0qK0Hq7uhe+o5z6iWAKXoPmRgfDgpLiouP7X97zrJytlir8DstmsyQSJV+sX78DDmkRKKHu7o0NmVh6HtznrRHwZqQckTw6Dl3otyDUhYAHH4QVK9T9X26CX1+CH/ghClHryUcpQ+O6RQtT5yGhtEgbQ+VkoFS0GkeiA1htfSs4rwvcdN+ph8ANa95Xi5nz/mMcP0zG1r+ldNgywdPvDuCVqYKczB8im4zDrEAku7l5gnhr/syDjNYhwTD518tvsP7LhzjzaDt4hx7Dk5ExoGke/lDW7r31yrxnRjEi2SvIE6kuSBPoSLz8jmEw7d3JHDRAUqWdLJrLQVOMv0hZUTBOmppV7IXoLEheIyJgGPDpXNQ3J8l7h959uxRMfwyWvAmH7g81NTWMHTuWVCrFJZdcQm1trYtX+Nc4x7Pos0Ie5YUs7405vW0pDiklDQ0NXHnllaRSKc466yxqamoYsg989W/45CloVxTumxBfcMfsVh1X2xiHN4h8Nh2pQ3+WN1Mp1HLDC3+Ei05Xz2zbHjNx4sTHpk6dut3kkXdUd69Lq0v/5/Ye92en7GdXFrd9KWEm22AYwaEHLuHPrk6STgpfMEcFd1JbP096FnvSDK+ZJwUJ1xq3EsLf4qbuBZYm1L3+lFJi5xxyWUkiYRrpVPo3x9x8ZDprZwOBbhiGU1JSPneHbl1LoabhoyBj7r3R5KC2o+kCPkvgGBdlQnFMKZovRqibJjz9tAo0A8pT84fu4MvTeiOM2B/kQtOSZYDW++9pv34+7WOQ2i+kLet1QFij3hQcxWrPs0qTLk7XMnHT/KsMnOb+M2kqKz5/iFNH2O5g2/p0mKlenHnmWL57+i0ssi/h/udLWFufn7/VOFL9kCLpzp4In1YhVUQ2v69cmhcvX8HDd9/CMfu+w+B++IzQqx+dVo0OP117hn4VkfyATO+NFGaYBjScPm5XoZIqxIyXd8bMj9izywKKLWjbLsyoa+s1RuvWVVkW4GhshnotFoU+WxICGbogJaxeDyvXhmmM/ry+6lIBkx+E2mnwo3PAIMfDDz9MeXk5lZWV3HnnndTX17doLcelFdoa1pp6tgaO+vp67rvvPiorKyktLeXee+9FkOPys6BmGrz3V+hWqfWP0O4L9NVWG1fbGEdoFkDPq/HWuDbovNNw4IGfw3Huabi5XG7MsGHDbtyWjnKO4+A4Dvtc1aP7gKt73D3k+r0bO7apqn1lVdmlExZhONkcTnMW2dCM09CMbM7iZG1WNEi+aLb8KXJPSOetk/te7Wbs2rln3XtT7JZlaBa6cH8GpikwTKEpToLmBhvDEBQVJXsuXT9/DFKz0HO5HKlUasaKFRjbxTGuEBgooZ4gzE3iOEtcmoNycqt1n+vCWmj/oyM0Wm9kTf2jj2CWe7DFdw+DX12OmqaXmqaqofFuhJYQ0si9gSzDWnPIqtHzEORBhrXckBYtNgeHgKJ9EARrwqq9ItJu4RdqzGW57+472LfTKwwbuA3pkKijTZ0sSIc9unXlsiuvJ1M1jvueLaauOcjfKhwOiOKBgJ1nuQR9pSpZsnIV9935W8ob7uGikzOktQKx71xG6Ig89/PI/HR/VsA9KjXow6DxXp3C7x+3tFv2q6Vf89WnL3DofgrPfnuquOteV8z5ShkZHkiposIZWmMXLcOf1QjRIeNp8q6W6VrpXv9o31yh76PUhDuugqZpMP91GDEY6mprGDduHKWlpaTTaS644AJmz57tT8u3ZguZ/izOum5NPS0904X9F198wUUXXURRURGlpaVcccUV1NXWcPiBMPdVaJ4G9/wYyrRAIqF33pq+2tJxta1xEHyv0fQAQSR/TBuEAJGFJ2+Hvq5PqJTyVx07dtynkCNla8G2bRzHwXZsbnvmtuTeV/TsPfDaHrcO+Vn/VYf9z352RVn5V23KK69Ipay0lRCkkoI/f24yepLgk1UZco1NOI1NyMYMNGWYuM70p8l9i9qz1PX1cn3bmrZm7gtz72oZWJbATAhM0wgEuRuMxrCUQBfai5VSkmm2VX7TunLB8rlW9N2169+fL88+m9IWlou2H9ioPeO6YPauEIwISXh0RNsuCqTpEIfDBFkC2QzcdRfU1CgG+cmLsIdm/Xhrjno7dOtYRNrp9a1eRifBb4JsAcdGrpuEI9kT2l+ExEFEK9EqkFJiC8Gz4yfQvXw6Q9Uy9BbRISVgwBNvQroYOpZBZQWUlENlGirSaUT1j4G0KiuliowiAcPg3WkfULPkBY49NNgm2GJfGWXQ6Scqg3Tp8riZVP6Rb745ifVL/8OokQ4pT0CFX2+oTi8sZijdu422IVqPXkxUITv9CCG001XcdkoROO8p7UP4/Q2wbPVq3nzhTsaeIDGkIk0Ww4ATYc6XKk+vavjzj4OjL5EqGuKYmwJv+GH7wU/OVdvb9HemNSV01Z9Lqc7Oblce9Nmm9JX3f1kd/L+74PF/qlkDD0zT5OSTT+bcc8/lsMMOo23btv6zuG1iUdiU9Djv9rVr1zJ58mQee+wxXnzxRXTLMZ2Es0+EW34E1WUECtDG3nn02bYYV9sYR4u8yPsbw6v1WbUQ2wFemAJnXK0+dcMwJp922mlHjB8/vsXZYykljlQ87PCfDCufv3jB3mZCHJ6wksdIRw5NJpPFpmEaVR2SWEkjT+FQtyKPQKHR7rIrqjqksBIGwlB0eBa0aQkMQ/3U4nVAuOOAdCS2LXFsdbVzavrczknsrCSXc8LXrJpaV1Ps3r36wCVgmII2HVJksrmm2obag0NNHznyqOLZs99559JLswfkvTDvReSNju0Ajahp9EICfFNgU8pKkAmYsxCeekolnX8S/PUW/L3HnmXoCS5PG5YQCAJdq/WYcAtKhy9rCP8PKQjaxxjCH1PfRnFUnIwoPTDyPao7b2q9MZPh2fEv0rvdJxzqCvJNwtESHcDaJhh0BpxwKlRXQ2Mj1NdDba06OKehQSlUGzaoNO95LieoLEtTXtxI/57QqQO07wI92kP3dlBeAW0roE0llCchWX4wsmoUSBvhOCAM6hoaeHviu6z8ajInHZmjXRn+WQChdWzv/cb1uZsYFXqh/LoSE+UbEigegqw6CZBhHBE1KRxbH6a+P4OmlS9w5IEoRz0Nxy8egVvuUSUtE/54DXRsG9BlmnDXU/Cf6ep/OgkP3OTGeo/QHwWddA/aVsCRB+NHRdySvpIASfh4Pvzh/+DFN1XIWR2EEJSVlTFs2DCGDRvGIYccQvfu3enYsSMlJSV+nmiZ6NQ5QENDAytWrGDRokVMnTqVd999l8mTJ7Nhw4Y82suK4aSj4CcXwP59gcxmvHMf/5b31Y7EIfUb90E0PxQoQzi/11ZZBANPgVnzwTCMXJs2bQYvW7bsQ0c6WKaFBGYt+sz67s1H72kIc4RpmN81DHFUOlmUdtuvJp40pIYhaNclrcaDjpCYcS48HpjffmEKqjokldAWAunIkEVtmJ5Q1ytWAt3xhHnOFehZJczzBHjWIZfz/qs0/VmgREuqOqQQBk7STP00RMann35sDBly4PPjxmVPTKVamHrPoQRsTnuJ0TcU6Rz/uik/vayNmgZvaVTo+Apoi6E6KfBcq8M04ckJMOtzlfTawzByoPaetEGfpyxEPo6453ltjt639GxT6ixYTwI6jQOzPK8PbMPgvfemM3vGG4wa2UA7z79hk3G0jo41zbDvqXDCKUqot2bpR7cOhQh+pruHM5dTjl5NTVBXF/w8RaGmRikL9fXQ3KTinHeogs7tobIKunZW09Idq1R6ZTvoVAEdKyCR1Npgg7ADhSU0hmPIjXajOsb1AmSqF4FKJZFShBUjb85TQkM2y6MPPcL3jvmaNiUxXSphwRoYdJqK6gZw7CFw8ahwvy1aDtfdEZQ9/0Q4YXh4aEU/kzxlxE2wHTj6EGhbGqR5bQl9C1q5jQ0Tv68kYIBtwsw58MybMPk9mPm5mmHI+4RFy3vMW5PXNKF9JRx/BFx0BgzZ293R4im0m0NHNE9E6G2VvtrOOGL5kCb4ozM7MpI/jgXf+jj8/M9gWAZlfYvHtx9etTxhJL5jCNEnmUga3tbNPDYRleQutOmYUtPWoccivki0Xl/Aq+AuFW2VQEcoIe1tMbPcKXPDtdS9KXLpgON4wtwT4prgzrhpIeGtCfdImmO7XjQSSisTlJQmaMpkngu1OZPJGD16dLtt9OiVV1dVyU1bS3dQQj5LcNJZnMDRoZBgKiSMWxLmURyFRmAhHNGybpphwNuTYPIMNRA/fBb27BSuR5+SiWOqOo2haUrvA4vTfiXBlqoCWjPhqoMPlXB9LeJIdIMOl4PMQSLBV4sWM+k/75LIfsbIQ3K0Lyc43nVzcbSSDiQsrVWndo0aDTvi7IbojIIQ4XVnTznwFITGRnVdv14pB96voV4FzGhXoRzP2lRC+07QvYP6ta2Czh2gugO0L4OSkhKcLv+DIIfIOe6ygkQ67nSwz3HUssf48f+kR/l/1bKHRB+OfqcKoaK4XfMnuPMxVx4K+MtP1Tnn3jswDPj9o/D+Z6pcSRruuQHSbtSyuG8kNA0fedyhLRw+KGhQiJF7fUv+fd7nHcGBlt/lZaGKpAXCUMsFi9fCsuWwYg3UNsOGWvVbrxzpqSyDilKoKIPSNHRqC8nKbqxcM4DVX8+iIv01g/bK0aMj/vbE7UHHrogjjgdGHuexXQjzRmnCqgaLSUvK+M/icj5ZVUxDzsDUFu09HLaE07s7nN7NpmeRcmbN2Q6za+CvC5NMW29h6vwKSBWZlJQn/FbkLb3oAlyT7nG6QTJlUFqZwDDUDJkn0H2vdM+pzQThOqhICY5nicdY5Po1T4Bn8i11O+c5aArSJQYVbVM4OflxqKlNTU1UVlaOOemk5scHDMjXVjcZJMqKzwBRn4aowC50JXIfV55IXsgf1a3BFTOShYB33oE1G+DLJfDBP6FXacDo/Oz6R6INcND+x+AIWXQF2i2JKALE1K/VEc1TCIc04K0Pyvh6WY6+vRvp1xMqiyk43bw5ODaVDoCVjdD/RBhzDrRvH86zq0DU2PNo8Ha+OI765XJKKWhuVrME3uzB2rXqWlsLtTUCx0lQVpKktMQil1nPwP5q5qBfN7j8e2Dm3P7VlCpvLDYC7YYH69F9u8HvxqkQr16eDXVw6a1Be4fvD9eODfKEiXNxEAgM/ZtxHHW0aqUWNz4yw5n3LYTW5OPoiBT3+jiuzJbgIDIutwWO7UHHdscRg1Tq6VIJ7WZb8OHqEiZ9XcFHq0r4qjaJLQW+N5fOeyM3toQLe9uc31uCMIKGSAeZc5A5G5mzcbI5frmgmPdqkn7ZkooElhWuXOiVi0jzhXaJSPVkyqCk3EKYAhxlaQcnq4nAc11zZPPWy1sS2ln3Pizo7ViBnstKt48lyaRBm05pHFuuDoWaSKfTAEtWrSIjBMlWzlYVBoHyVk9oaRIlMDIEU/ZoV73v4hi5iLkvJMzj8hbCEYdTgmFCIgFVZbAiDY1rgVJNIBVqJzGC3c0aXfvW83uVCiI4CHdRqEK9zpgyBXFIOGpQLQwK6Mh751uKYxPpQECHIpj+DAwfC6PPhjZtdj2hXqi9WgwVhFBjK+F+H+3bF6pNImUG9dEEZYUAWULI8ziOmaclPH47nD5OvYu5i+Efr8MZIwMlq6IULj0NHnhOlZnyIey/Jxx+QP449tsQhwx3X/o8GD6I0MsOMXhaGA9xdOgJ7n1cF28pjqjz1rbAsT3o2F44ovyiGcG6JouP15UydUU5n60pYVWzhSMFpsjHlfIihUbSow0WQnBke5vz+giElUBYpppukoBt42RthGHgCIEhJb/qXc9lCxIszaq1t1RKKQChcVwAZ3TtXOEP7vV1cn0yOu9nuALdEEgcHBuEkO40vCcBggq8bzpUiceo/fVEXfNSbXUc5R/gOJSHBLpt2xx99MgvVq2a1GAYTnILdwrEg0AFoCiKpMdN2etldAjTlC9ZvGf680LKSZwiodVl21BRAcuXQ/fOMHkW7N0d37KReh8TIxBbQhP5KKLT8ToOL7++nipj6vK1Ye/ZLohDCOjdtpj337qEgcPv4bzzs7ukUN+aEEe7lMFRqL7Q1RQj72oIGDUMbroMfn2fSn76DejQxp0ad+s4dijM/Qr+84FKuutpladfj3gcIQ9pv1Hq+bJVsK4OqtyT6HQlVm9e9L4lOvw8Hl6Xn3v8bjeOrYsjNCUu3E1HjsnyxhQfrivlwzVlfF5TTE1GiRFfIBGuLJXOj3TmPctLFjG3Qh1S0r9cCW1hGoiEiUiY4EhkBoQjkY6DsAUYBlII+pY5rGpMIlDbybw2RpGE2iC0iwgnejQI1xlOGMI9f9xBSvdYY/ebdFxvdrcncRy1fObF8Ch4DkBsavyTqKItgDyB/sc//mH5d787uMa2qSykmW8TMFCBZVJammfJex7u0dHXUtu8/BvLu5F0KaFLF1i2TB0y8dpbcMkooIn89UVNYIVmHiI4fDJEvE7iC0wR4JB6Jq9aEalH+1h9gbur4kh1p2u79kz79zgOO/5uzh7TRFXVt1uotxZCyqOnSDnwi0vhq6/hsQnq2T3PqEhxg/qrV5Jz4IozYeV6+MwNbfzrh+HWK6B7p7CiJnRGWEDizJ4Ph2pnqxfUqaU7VCIZYukgMsYIK5a7cbQSh2NhZ4qQOQuEg0g08rVtMLe+mC9qSphdU8ri+jT1tokBGIbUXq1WuQmpogJjQU+KZmhBmBcSsK/VpDi2QyNpoSxNmXNQfiYO0lY/XKG5MmcwXZb4MwBWItB8ola6iP8TabfOp1Q+IQBTKGc3z+HNQPUnBlI62Ebwvrytap6nu1/O3c4mJeAoYS/9NDddupV4DBLwnGZN0++kmrw+bWpqMgYO7Pba6NGrj04k5LZloBKl9tmo6fe4dXY9b0v/N4YnmrdQ+QLpM2YqZyc7Bz88F0YfEZNtY210/4fWp9w0EVMkTqGKrsd7+aPrgHlr07sSDkBWjYbigSAlC5bXccCIP3PRxVnKy3cLdR2qSuCHR4NhhwWCPs58Zi+BBJz+E3j+30EdV46GEQfhWxiGATfdDV8sCvLc8gPo36MwDv2Fe2mOA8cfppzO/BmZuDHiKX2RsVCIDn0sQv5Y22lwSMCEJhtW10KDe6pjwoLSYuU0aRlgCnwnrmYbmnLQ2AR19VDXAPV1sKIGahuVg19doxui18BfSja0nzBQuy5SCUraJClul0aUFLOkIc1J5YJ9ik3tXUmw3TVo26Y+63DjqrasdAKrOw9aEMbqNk4y5wvI+PpE3MX/Ywm4vkMdexVrnrpSKoGes5G5HG81pHm0sTJ04GYoHrpGVEo47JdoZj+rmSJs1tgmH9pp5sg00n2ZIkwcAImE4U67Q+36LIYpVGAYfeuau37ufR6e0HfcLWu6l7sdcXiLervbUc/3XHB2Q3GZRWW7JHZOzogT6FRUlPzhwgudazt2LKQnthIcAoHt/bROCbh45FoIWpuvpbo3JshjnkupnJbefVdtY2lsggn3Qrt0MKZCDCDKJDbhCvF16P/9Lowwm5CgbaENOz0OKaDLz/ECrEhg1pfrOPykuzn/gt1CXYc8ga4L1uA29N5sAeNuh3ueDOo59Ug457vhs9F//yhMn63uBXDZ6TBySPDu8pDEQHUHOGQ/NrrOH21rIToKFYrm2dE4pITpc2H4+doZ9FsZ2u1ZxuAL96C4KolVZCJtcGzHb4Q+vSyQ3Natjs5pgbBMhKnEnbJsbWTWRmZz6prJ8bu6tsxxUkH5Qu84KtzjBHskvWB9eYqCILYKoRzkUkLSy8pRIhzWOgaL7IRWt8irT8fZ3crxk7L1pAwjUNocCbat+iSbY0VOcBtdaBLayYfeRySVf5VhCuprcji2xPQCy5jBGruuEEpHWd62vhdd93rPSU14h4PJRLewOW5wLYmksn2K4pIE9U0NT+iHjALw61//GtuWvfv25fi2bWnpZEElsL096U3ur9n9eVPlOTeft2/TlwoRaGHA5H05hb4q/T5abmMCoFA599a0oLgYVq1SCU+8BBecjgolqpXNQ6vXE8ERmmp2/+v3fploe7R+DOkhIqieaNquhCPVH0oGucJeZWpflea44wYy7sfTGTTIwbJ2C3VQ++aH9CYIuxv9Ftxvbfa6I3j21Q20b9dEVQkcPwwqquC1yer5nIXwwSw44kAw3ehXRxwADU2BpT59NsxfoqLJ+YwqMpajszG19Wovf8rS2uN9J3peLb0QHdG8uPmilvROgUNAdXs4YD+Y9N/8gDhbCgee24uhF+9BuszCSpgqkImpLEQvlKjp7Y22BP2LHY6vzGIkEhgpdWKYkTADBi+9nwQpKTEcPjDLsCzP8nTDklrBz7K8Iz7DPx9/TLlwffnlwnUJf2+3ZYVxJxIGhmWwwbBYbSSotyzfu9x0w6haEdzCUNPT/ZJZflpRQzKVwEhZiJSFSFhKyTEM/72XIDnKXsfERBV4QtoV2KCm0L1h1NxgB2PD23VqS2wbbCewyu3I1cmpeyen3WtR5IJp+mDK3vYiZbrtrGibwpY2lSVVd+WxxGw2ZwweMnho+/YfvXPowY4hPKHtBVMgqGirwkY0/Z0Fx/x56oCW2Qthz57w2v2QjHgu5+HTrVTcey9/XLskIYejFi2DFmjaJXFIoOosKO6Pd9SA1PLMnLuKkafcw8WX2JTGHeTzLQPdQgeCsQaaVWxC9S1ImSPjODz37ASqy2YyfH+Y8zUMGa0OZQEVTe6GC2C/vkG/z10MN98TWOYlRXDL5dDNjRMQt4TkGzISelbD4L1jdOWWLOQ4OiJjULeGQ8s/OxkOCWBAfQ5eeQ8eeBL+/R6bDfucXM3g83qF7CIfZ0TL9fhNkZDcXrUOK2kpC91S0+6eNSozroWeySIzOZ51KphoVIYV8Lwb7ZmIeSBishasT1DgbyhzfH0inF4Ah5OT5BzJmGQthxZlMRIWwu0PHImTzSEzOdUPzTmcbA6RyfBooiMfJ8pClUopaajN+YrUupXN6kA2zzJ3rfOgXzxnODX1LrX19CAEbH4Y2MBSD66OHUQ1shKC9tVFNGey62sbagfnC/ScTcIyuw/cx5g9+kyn2Il6nG8MoqMszhqXBe71tCYKhxctVPfG8uZxlJj0uPKRuhYuhBkfw8dzYZ8+MPnvUO7OdehrhTpTi6a3iMLLW+B/1DIo4DC5a+IQxSpynSgqUAimzlrBcac9wLhxuW+Npa53geFa0CtXAM3wl6vAyEQEjmcpSpBF+0Gb0QjtjPWcEIx/+nn6dZzJXn3gvBth/BtB+d7VSmgnXLfZnK0c5GZ/GeQ5cC+47jx1H2vZumDbMOooZaVHfTD05Rk9PY8Owspjwbwxde2UOCRICxauhH+8Cg8+DYu+DnYttAR7HduZI67uh/S8qPPlaGx6e8Pm+tRakqbhT7nj6OvParr9dVHOK8k2mDqjihO6MTi8PshvUOuEbl5dMbgL42ipPoU/0+ywl2zk4lSNssotdz+5RNHvLTtkcziZHGSy/L/y3jQbZjC03XrrN+TIZhwVzKjZoWFDVls7V7Hco6LGc3YLOdLZEQs+FxHiroD37r1gU9KRVHVMk0obTk1D3cQ3bp52TCwrPOusM9t8+OFL08aOre+zTbautQRZVCSMOKFb6H9LoOeNfq1bIAiWL4eH/qEYXTIBb/0VDu2fL6Bir3pb9PYU+h+BEAPxSGmF8NwlcKT6I9udCzIHuGFPo5VKybTZKzj2tHv5wQ8k6fSuL9RDfgfutakJFi+GefPgyy8BGwb1g5HD4OiDoVc1lCVRkcziFFPv2vZ8SPcOpQmholxtaGjkiUfu5ryTapi/DL5zsToG1YPDD4ArRqtiQqip+ZCs9HYAACAASURBVP/9qzo7Hbe6U4+Cs75TeLwIAT26wOAB+Ep6aE09ovwWpCOqNMSU0f/vSji8jssKmP4F3P4wvDJJO0gnAmbCYP/Tu3HQ2T1Il1kBvqCqPIHmXSqxOcapYW/ZSMKxcSSskBYTkxXMtkoIDiiRrkUJEoFwiRXeTwRX/UOPkb8q3VdovON+VUJorVlKRzrSQU3PGW5bdLPSCNqHv5Tnjbd8pKELCEFTg01fu5Hvi3WYhqGiuUl8b3lyNk7Opi4ruaeiGxuMRGzfOrZk5ZJGtX3NgA1rMji29A9m8ZcztHfsebAHAj3s/e5b6L6Ad1xhrtbRpQzoTSQFbTunyeUcp66h9oSZt335aiwb/OEPr+T+++/5z803yxGbY5ltFkhUrPZo2Fgi/wtdvTxEyrWmbCEBn6euB3kkSu+470Hl/Q5w8Wi470YwckGZWAtdb6qu2UeYelR4+mnehxGhP07g7nI4qs5EFA/AO6ccvJ0WQaxtL+72xA+Xcvo5D3PFFfYuY6nrPga2rcbOl1/CF1/AqhXQpgwO2R+OGQ6HDVLx41MQRO8D35NaeglQcKwJUeHOeCSQUjvYxe1DIQSOEDzzzAQO6vVfenWBR16CH/06ECYCOH44nHuCujcMeO4teOr1wKIUAs47wY0BL/PfhRBw7HAoTeILwk2iI9qH5CsN/jjzvrFdHQcqbv37s+HGP8GUmcqA2Ci09jtogbcXpeB/r4UfngGm9BqpXePqEpH/entinjcC37lEBTECEEK8N3jw4JFT35vSIJEIieuxZiAQCCEcT0i+suQBo9GuTTrSTs5Y97pRk1uT7F9+cOWi+lldJE53W+Y6JYx0Z8tI7pmxG7qbIlFpGlapQJSDMOwsTqbZMcqkzcHZWvrYjaQdmxoMPkuUMDNVTs7FJXQ63BtvfK9f1cSGNVl/1mzdymY13W4ECoz3UqVLt+NvTdMEuhM4yuUJdtdi92fLJUgk7TqnMUyDpuamt8489Nxjf3rKz3Oxr9790O+/9louLivbDueje4500PrBuDGIG2AtDcjNqF+mwEjDG2/AZNe5qKQIxv8Fjj3AxSUDjS5vHS4q6GTwHcRZr/690EiRgWUQhVjhuzPjwIQuN4F3fKivFAnNSvem7pT1MHHmUk4750GuuEKSSOx4oa4rM0Ko3RGrVysre+5cqK+BPt3hqCEwcjgMHaAc20Q2zDhCoTZphR6q5fUeSAEiPQjanE6gKXuMRffAVbW89fa7VPEqg/qqGPB/fBx+9meleHgwsA9cM0ZtuwJ4/BWYMCk8Bg7ZV22FS2hKlpSwR3c4oF8g/DaJDv271fpmq/bVTozDgzXN6p08+oIKy7s1QQi1I+Gx26BXFWGlpDV0ePXolUb4rZSACX95Gq79na8QOkKIS44++uhH3njjDbY25OwsQggMoZwAbSdrgOE8Of831qRlT59jGFa7YrO0TAq6C9jfNBJdDSHaGJiG3va4aX4BfDmrVk29C8hmHWrXZPw1dK8/vL7x1tE9j3d96j1sqXtWuhu33QkqkEB5VZJUsUk2m2tYunbpEV/eu2Z6wkzEi7ZMJmP06bPHFSNGLL2zVy9n2zFJB2WVxwnbOMEbN8KJlIlLJ/K8JRxx+QvhSABF6gU1N8NDDynmDbBHV3jtQejdPsAT9xHo9UWFYEF9RBOaLabBroUj3R/Rdiy4a70CETmvOSgt3cJSCp56aSZXXPc8P/xhcMratgRPeHlx2WtqVOChuXPVT2bh0ANh5FA4dhj06+nGWvfiortjwWeMEOpQX/GL/JduB/p9Tj6zDTFWB2S78xGp3gESF7FnpXscWgoJhsm//vU6/dpPom8XF6cB4yfBxTeFPbWLUnDhKTDiQIX69ffgkQlhC7K8BH70PbVM4Liev6eOhIQRkNwqOrZHX+1iOBDqIKPzb4D/vB+Myc2B6g5w7y/gxEMALb5/q+kIsqg8QsuH9p0b8ObHcMaP1NkBAEKIl4uLi0fX19c3bD4FWxdsO4sQBkIYXDdzRB8pc5ckzPQ5lkh2CZRgl16ptgp+Nm2tf/paNuNQ4wn1SN1KoEtfqPtHqmpC3XeUc9fW1TS7wiiRFJdZFJclkA5sqK/76cd/mne7aSgnrljW51roxx9zjPHi8OGOtU2m3fUzzjcmWP2Gafm8e0v7CcLWPq2sNw5HrLoZSfdC2Gp5liyBxx9XB24AHLQPvHAXdClVZfUBAdrH4VWv9XXeFJ+MCEO9WTKcFtJFdgUcEqj6HrJ4b/TPQGo3whXk3tOM4/D3R//OkQfMY/ZCGPsz+NGPAkG7JeC11zTV/erV8NVXSmAvWSLo0qEthw7rxTGH92H4wT3o2rkMmbMRjuCR//s7Y0Z+TtoIv+9AMaGglRPCH+1PQSyDKIwjjejyMzdB5GVQwiK0foltGDz64AOcdcxiii3t3RkwawmcfyN88Fm4DV3awwUnwQkj1BTxhTfAvMXhPH27wfknwUmHKyt/0+jYHn21i+KQqs/vfAZ+djs0t3LPe1EKrrsEbrwQEnbL3/pG6Yjkj4IUMONLOOEydfodgGmac8rLy09Yt27dgta1eMeDbee4asZBexdbFb9MGqlRBqblMbKGuiyfz1iPEOp7yuUk61c15ytiukCXQZQ4XajbdnBuuq44gKSkPEG62MRxJHWN9X8/f8TF5//01Jt9H4PYV9DY2Mjo0af3XrHi1dknneQkN9nTvcVeARoISwOvJVHNVX9moSxis1CrCZ+ZHi2/KfhSQJJgf32h8hb5MelRA3vOHHjmGXWaFkCvavjnPbB3V1f4eULNE3Ca0Auttxdoo669x7ZPL7Yr4KAIOl0DQo/9i88l9Gl/xxC8/sbbyJp/c+xQMFwcz02BK2+FSy9rnVD32mlZ6j0tWaLWs+fNg9WrDfbbq5qjRu7JkcN6csDALlSUphE52ydKX/f06xSC8c+/yhH9pqijZ3XmJzXSdUYtvbIa2XqZCFOPU6xicRQPQlSdpmlTEs8RyVcu3XLB+q1k/qLFzJ3+CMcd6ujHr/sWl2PCU2/CVbfC6vXhPm1TDi/cAUP3hXueg5v+CPWN4TzdOsEbD6pjiFtFx/boq28CDgOmzYOzr1EhfqMssLwUrr0QrhqDf4rHZtHh1al9y3E0SgNeng5jrw1Z5F+n0+lT6uvrpxv6ucS7GEgpueHDEV0Exu8sIzHGEKbRUJfj03fX+PvUAdavypDLOqFygPJ21wV6xEku+vKkhIp2SbVW78C6ug2v/uWi+044ZtB3Hc86d7MWhq5dqb/sMoq3mqd7PfHR4vTWJFCCsiXBXQjqtPp0AeL9j6vPy+cJ8Wgeb1kgrlw09nwMfP01/P3v0OBOKFWUwl0/hzHfAeE6zwmtfaGpLsLCUf+ovDz+f60f4/Ls9DiS/aD9+QgZLAwGa4tKU80Bzz//ChViGkcfLDFicIx/By7/NVx1led8ooR7IqHewfz5SmAvWgSZRuXF/Z0RKuZ4v26QThiIqksg0QXPASfM6DwCw0zX53uuQB/RfwrtyjSaNfDzavVG+0vvg2gf633t92NcubbnItN98L0OWkOHlGBZPHD3HVx08krlENUCDtuEf06B62+Dr5apCIrf0QPPAM0C/vEa3PQnFcr0nb/DwK6Exn5BOrZHX30DcWBCQxYaM1Be5C5z2PG4NwVHRsDp18JrU2DwQLh1HBwxUB8/kDHh1w/C7x4M1vkNw5hVVVV1ypo1a+bxDYRrPjh4n6RR9LCRSw2ZOWmVH3QGXGt9ZbNaN4fAUnekCkDjeIe3EJJTnvAvKrVcq1zl3VBX88jJB5922e3f/0ueF0VBkek4Du3bG1Mvv5yhiUShXK0EbyuaBwaBxa1H1dsS0KfwIV+dhGDUelBIiEdBEljrXj0ApRsv633AdXVwzz3CaWiQBqi0Ew6HB26BTiXx7Y06nMXSo7WxUP6W6tkpcAC0ORPSA9z8gUe7IwT//eAjPnzvdU4cUUu166xTEIeAx9+Ey38JRUVQUay2eB19OBy0F3Rpo+JBiwJKqjCrkJ2uRZ2S4HZAdNz4mb10jSUaBn/9698YM3IuKYMwp4wolb5ntF6fB3qZKL6NpQtAlEKnaxHCxN8x0Fo6DMG//vkag3pMprptTN2F6Ih5ltfePA1oI3REceh5C+GIq2s3ji3GIRNqWesfr6jH+/SBmc+CmQEM+HQJnHMdfPy5X4MjhHi6qKjosrq6uppd2SJvDUgpuWbGwZZlGmfOn565e/0yu1Lve+lA3foMDbU5/3OTMvz6vKl1wxSUlCUwLYEjlbDP5XKZdXXrz1740IrnCoVwtWJTgVwuZ5hmxZz6+pqhFRWbeUiLJwhNoIzwwNqa4IWfjYOogG+tEI/WkXR/0QEfAU+A2zZ88gnOtGnWgpUrnd/06NFjfG3tvJqysjIrm82Oy2azv/zX25R2OQLSSfjJRXDdBVDqnc4T5cGCkIXcUlM9ckXkA402d6fBIVLIVF8QAhtYtORr3nlrIik5n6OGZjm4Mxw8KhDaLeKQMHYkjBkZ4AlZQZK8gEUhOor2UWUic56xU6CehqNZwHWNjSRyX5FUh1iFrR29vJemtStvVkMvI2KuUisbxZHaEylSgB1ZEtg4HUhJWVUZNXXQtW0LOOLoYCN0xNHTEh1xOLZ2X+3G0TIOCVkDTr8K/vW2erR3b3jnMVi1Hn78e3jm1cAh0jCM5YZh/OD4449/YcKECTQ0NPBNF+aAN6OXA55wHPnEMb/dt+f6moaXipMle4NaN0+XWKRLLJDgyMAJzq1A8TjtxLVc1sFxcOqbGv6xR8c9vv/hHV9kxMOFhVfBJ47jkE4X3XjOOZlfde0qd973Eecpr4NACfEE20Sh0AX4nDkwaZKYt2aNuLtPnz0feuutt+ratm1LKpU/L19fX09lZWUn4E/ZbPYMXOWqqhxuuBSuPBOKDUVPniDT6RRBG/xpbS1d/x8nwHQGsCNwSGD+UoP3PjHYb68cfbq6EcWccJntRkeHH0KiPcE+vYgEDDD7lXn7uSXw77cm0b303+xZHbRnYxauXmWhq97uuLXvvPHf7vuQ6hl0sk9s6+h465136ZJ4lb26F8CxPejYjWOH45AS6hw44nyYOUdVOagfDB0Mf31ai1UgRANw11577fXLzz77rKHlQ0C+PSClZMDV3dqYwrwjnSg+yzQMy1OS1DUICaum4oP/2Wy2rjHTdFvSSvx+1r0LmlrTpwVzNDU1Gb179z7uoIOWvTRo0DY+RnVLQF+X92AbCnF9oH/xBbz9tli4apV5f8+ePe+aNOmdug4dOmCaeWfetAiLFy+mV69eewN35HK5o730VBJ+ciFccw60SRNSVnTBVWiPeBRaNfUdk/9bg8PqCB2vBNwALNLb2qU+TK8CX7FwwfswV2/YwGvP3cXY4zMhXxHdsShUxm2bl+A/jikTyq89k3H5DTXdLoXlVRfgbQUdWBZPPzmeo/afSbuSHUjHbhw7HMeHi+CQ7xX0ns9ZlvVEr169rv/ss8+WW5b1rbDENwekI7n6kcupqhalf3/xxVFJo+h00xD7IkU7KWUxiDpHOiubmjPvN2can92n5wETXv3V686mKkYby73PkCHMPOEEts3WtS0Fb4uaR4U3nb4NQAglwKdONZcvWiTu7du3zz2ffTZrNbBVB3Eul6OsrKxnJpP5k23bJ6ItixxzCPxyHAztj5rYETEvUPtQY1+uCOfLS4t+2N8mHKUjoOIoN9EzWYJ7X8BLcP3F1X54KclIyf/ddwcXnLyOpMgrGiKkkDOUx0ijZWOaEpvm90XxAciqUar6TaBDoAR+Tjo8cv8fuPS0uvC+5O1Jx24cOwwHABbcfC/87wPhqoQQTaZpPtK5c+efL1q0aO1uS3znghbfRklJSYf27ev/e8kldM9uozN9Nxu8qfZtYIlLqfYfz58PU6ea65csSd5XWlp69/LlK5aofYbbZxA7jkN1dXXl6tWrf5zL5X4opaz0nrUphyvOhivHQMcywrMUcYKN8L3/8bppLn8PlVMMPvzxS/lNxWFC+0sh0SlcsS/otGStGECz4/DIfffx/ZOXk4oIc13piiUjUq9Og47DLxN5kIdDgmh7HjLdJ9pVG6XDq+udqTMot19gv17h/t2udOzGsUNwSAmzV8DI82H5Gq8OsTKZTP48lUr9be3atU2WVdD1ajfsYGhRMk2dOtk6/vijpl17beaAnBfl6hsI7iwjixbBxIk0LFggHkom03fU19cv2J4CvOU2So477jjrjTfeGC6E+JVt24finS8K9O4KF39PHaRRniRvyldG6oudhta5AhHBlP84SPgm4DCqoNOPEdKOGP0qt8/43MLSfbq2rpZn/3YH3x+VIeGh9xin3hbtKjV6dK4chyNu5LWIg2LofJ07c+AeqKHFxY/Fod3XN2d45tHfc/7JWQy5A+nYjWO74pBAo4Tv/QT+NclPziUSiauy2ew9MU3aDTshtCipbNs22rSpfObKK2tHJRLbIab7dgCPqZumOjFt4kTRsHCh+Q8hUr+7/fbb5l166WXOpq6Bb29obm4mkUjQv3//4mXLlp1YV1f3/4D+UkpfwHdsA2NOgqvPg65tUHveydfOfX6QJ+XioaCGv5H8Oz2O0sOh4hiE7wIfbYD0tQcJYJpMmPAKHdJTGDoA5cQXYbL+NL9OQIzCEQLd7NLKhKJztYSjeBBUebHb9Qz5dOgLrVJCRkoeue8eLjp5JQmhCYAdQccm4JDA/JVwx9/hd9dowRt3hvexs+MAcibccCf8+VHQgohNAM5GhQHbDbsItCiim5qaaNeu7Y1nn91wa5cuO7Fj3EZAShVcZNUqmDxZNMydaz5XXFx127hx4z694YYbnMQWb7TfseA4DrlcjqFDh6Y//vjjo4Gf5nK5oWgeBcmECgRx6ffg1BFQahE+2Y7INx/h+/oUHmzG/zCqnQ+H690ePBdahYqLStRAmvnRJ3z63+c4+7hc4OAQw3yjU6O6FpGnpHhlwlUUtr7icDhAuwsh3R0QuKvj7vY9EaLDP20N9d8WgofuvZMxx61SR7LuSDpai0MCFpxzIzzxCj6cfiw88D9QlSqAe2ejYwfgkFIF/Ln0V/D4hODUPMMwPiwqKjpmzZo1q9PpNLth14LWiOgxp5zC44MGbfO2bDXwBPiaNTBlimj47DP5MqR/d9BBB304ceLE3Dd9DSiTyWBZFldffbV133337Qlcls1mzwQ66FZ8aTEMPwAuOh2OHAJVafztYh7DCAlCETAFwBeWEBaUupGg59cHW6i+HY3D7Ijo8AMQBl70ZPU88Ha3hWDSO++xfP5rnH60TVLEIIy0hUgWvw0uXTpTjkvTaWgVDlGO7HQVQiTc+hTCMA7p4hBu/ZIVa9fx8vj7ueCURhVwJ9o/25uOTcQhgZosnHM9vPROME76doe/3QYH9wF94mVnpWN74JDA3FVw9o9hxqxQdW8lk8nvNTc3r94Zlhh3w+ZBi2/OcRzatWvbv1+/dZ8ddxyGjI6onQQ8Ab5+PUyZQtOnn1qvNzTkbj3ssOEzJk16JwfsFOvgOxIcx8G2baqrq5O2bY+or6+/qLm5+SigndRebNKC/r3hvFPh+MOgbxcwvZCEkG/tulwiav3GWsIaQ40KVj1W+3bFAcjSo6DiSJDSTVcVS8Ngztz5THr9JQ4/cBV7dccXDCFLyr2PjrBQnG49XVdS/MT8tE3CIUEUHwBVp+ItC0QVIQhvw3OE4Pnn/ske7f/L/n1cXDuaji3AIaWaPv7VQ/D7h6DZ3SNtGnDxGfDrcdA2HS6/M9KxtXEgoM6GOx6HW++HpuDwqlwymbwvk8n8LJfL1X3TDZ1vA7Qo5TKZDBdddGH6vfeeqD3nnG106tpmgCfAa2pg6lSRmTFDTEyny3+5evXqdw3D2L0XchPAcRyklFRXVw9fvXr196WU37Ftuwuaw50QYJlw7KFwynfgyIOgR0dX0Dth5qiDJotDFkPcfVw5CDOpbYLDMZAdfwCJjmAYLF+1iimT3mPtsukcc4hNz86EYmDH1albSIWaGqUj73+0zk3FIQWy3bmI1B5+jvw61YyDIwSTp05nxdx/ctpIR8VrFzsJHVsJBwbMWgoX3QzTPg6emQaMPQn+9yroUoE/fndWOjYHB6gCtTl45EX4+R0qhr4GC0tLS8+vq6ubxG74RsFGzdY77viz9cADt3x2xhlr99xRRq4nwOvrYdo0nOnTxSTHSf/u5Zdfen3YsOFOMrmNNp9/C6G5uRnLsnAch169enVYtWrViVLK0ZlMZrgQotibsveEqGXCQQPguCNgxGAY3B/SKRB+BKl8Aa1L4DzrI8LEQtOJLnjFoxbOJuOQsKHR4JnXLfbaI8PAPdVBFp4Aj+L08IQQaplEDHf2+WuEKxeia/NxJBFd/kc13k30rHSveFZKXnnpdWTdVE483AkOXtmp6Ni6OKQEEvDWDLj+dvhgVvj54H3g+kvghGGQjPhD7kx0bAyHlCAT8PECtXf8udfBdvQ6xMpUKnX9/vvv/7epU6ducsCS3bBrwEbf6hNPPMEPfnDBM5ddljmjuHh7NCkQ4M3N8P77MH26+V5dnbjtrLO+N+Fvf3sst9sC3/7gedYDdO3atUtdXd3h9fX1p0opj3Ycp41+oAqogVWcVo54hw2BEQfCQXtDaQmIZpCOeseFQBe6oSsxApqwld4ay2ZTcWwMQmUiFu/WgoI4AIoGQRvPu11lkBIcw2D69A+Z+d5rHDesjp4dIc6Rf6egYzvgwIRPF8HP/gSvTlYhmz0wDDh4IFw8Gk4ZAW1KgWzhdu1QOgzIGPDGe/DIc/DyxPxoboZhvJ9IJH7e2Nj4upRykyNY7oZdD1o79G77wQ+4tkOHbbN1zfsIHAemTYMZM8xPa2sTv+nfv98LM2bMbICtG41tN2w98BzwpJQMHz48/f777/dPJBIjHMf5bjabHYEb6U53xgM19dm9E+zVF0YcBIP3h6F7QTIFNKOc84ywoIZ4YVvwedRCcq9xQ7hQHbEzA9H/2kxAnnLRQr44YjYLhwTaXgDpXkgrwby583j7rXeoTC3giINs2rtTyx6unZaOHYAjZ8LED+G398N/3kfzN1CQsGBgXzh2OIw8FI7Y31VEmxXPCs0mbU06ZDC7IBIwZyG89DZMeAs+nA21kc1krkI9J5FI3FZSUjJ+7dq13/jTzXZDPmxUPOdyOaNTp45nHnXU2sf79986h7ToFtEHH+DMnJlcsHq1+NUee/R+4dNPP6uB3QL8mwDNzc24Pg3GqaeeasyePbv7woUL9wWOlVKOsG27p5Qy6TKjYM0exeAqS6G6gxL6QwfCQfvCwN5QUgZJG6QdGcAidMnf3kNYqMcKbs3CiioAUYYdKkf+x9Sa8pBf36biaMjC2/812LO3Q9f26nCbqG/DluLYHnTsMBxSe27B0hp4fjLcdT/MXURBSFrQpQN07Qx776FOIOteDb27QccKKE2BkUKdNqmDDZkGqGmCNbWwdAXMXQhzvlS/pctg6crg4BMd3G8lZxjG18ATFRUVj51wwglfPProoznDML71zr/fdtjo289kMqTT6aGHHso7xxyzeY5xngC3bfj0U5wpU8TCNWuM29Lp9BPr16+vAdjV94Lvhk2DTCaDYRhks1ksyzJuvPFG6w9/+EObTp067b127drh2Wz2ECnl/kC5lDLtTueHtDwhwBBQVgyV5bB3H9i3L+y1FwzuCx3aqWn/lMv1hSTM8bV6ZJxUiCoIWnLsZyBj6gpXVbCe3Ti2Eo4YiS6F+jU50NAIi5fD9Dkw5wuYMQcWLIL1tVDfqIrH8DhHCIFpmg22ba+XUn4M9Lcsq51t26VxY3MLwHEFc4PjOCsty3o3mUxOLS4untS2bdsFH330UVMymXQcx9k9hb4b8mCjAr2pqYkxY87q/dVXr04bNaqpnR092SwG9Cn0zz8XvP02C1avNu7s2rXr/7388ivr+/fvv9sC3w0bhUwmg2maSCkNwzCc8847z3rzzTdLHcfpumLFiqElJSV7NTc3HyCE6JnNZtsIIcoBJzq97wVJkagp1KIUlJfCHt2gfw+o7gb9ekLvztCuCkrLoDKplgWkqwQIJ9y2OO9lf/3UFVJ5a/IRkzIkywqZpN9CHH49fmcrgSyEus/kYH0G6mtg+TqYv0RZuIsWw+eLYOESqG+CxiZ1Rrf+/jVwhBD/v717j43iuOMA/p3Z3fP6gLN9Plln80jstOYweQEJiqhrh0BIIhqlQijKgz+SiqIUNSJO1VZR1UoR6j+RqioKURT+KKIRjUhKhCBI1JUiASaJmoaHX+FhzsgHtuN7+Hy27867szP9Y2/D1YTg8Izh95FWd76zz+PH7W9n5je/4XD/X9K6rqdM0+wRQhzJ5/MdkUjkv7lcbmj16tVjb7/9trRtmzPGpOM48Aqu2LYNKSUMw+Ccc/niiy/qAMwPPvhAD4fD5qJFi2b29vbOPHr06P9l7YbDYbFkyZLU4cOHxxhjYuXKlSIUCuW3bNkilFLctm2paRpoGRn5vqYU0Dds+CXft2/HVy0tsl6Iiz+n+E166hRw6BDvSySMd2tr67bs3bsnU1tbR1eT5LqwLAuMMei6Dtu2wRjjAwMDWLduHW9ra+ONjY13fvHFF3ebplk3MjKyUErZ4PP5qoQQQQAzpVvr8pJXl8VBhxfm9DkHKsvcC4Jw0L0ICFUD4SpgbhAIVwD+GcCsGcAMP2DqgMndcsOSXRgp+OYWk3qFlwiGXqCcnMD37V3g7+FyX/8dZ4mL1j0Xgi8KB5OALYEJ6Q4hj2aB0XEgOwacTwHnUsDQ10B8EEik3Q1Bzpxze8zePLIsmv+/zAih17vNaJo2ZFnWoM/n6zRN86RhGD319fXtn3/++dDGjRvl5s2bZSAQgJRSGoYBIQSUUqAVM2Q6m9KEi1IKpaXsX6++ipWTa7r3fHJ11wAABvlJREFU9ACHDmEoFtO2ahp/K5fLDRWGp65Xmwm5JizL4pqmQdM0mc/nwRjjvb29aGtrC2zcuHGeZVl1hmGEysvLa8fHx+ts267SdT2Yy+WCAMK4zGa93iAU54XpAe5OEXwzlM8ufXDujhCUmkCJDygx3PK9jF1400oASrplO2WhByoLH3uPK1XIe2fu63EOcK3o/uSDuc9zBjCv3YX7Fz3mPc4vbnvxrXd4P/Pk5zm//H1Nc49EAjh5ksn2dnZKCO1jIcR2y7I6DcOAlJILIWgZK7ltTWlMRwjBKyur2kdH4yvHxhTa2niqp0f9LRCY9dbwcLrP25FMSoeGici04fP5vhlILwyjeh+nC0e7bduIx+MA3AvbwtxlU2kpf23xYrlq2TLwmTPdQEr5SFdPKTdw2zYQjQIdHTx79qx+cHzc2TF37pz90Whv4sIOiO6fy8u/4ZzL73hpQm55UzoFWZaFmpqae0dHR7PJZLLHNE0K3OS2IKVEIBD4kW3n/zB/vvN0U5PyV1W5CZ4UwK+OUoBhAKkU0NXF0NXFzw0P+z4C2I6XX/71sc2b/2xpmkb5NoRMEZ2SCCmwbRuxWB8ikQXlSqnfhsNi/aOPqqo77qAe+JXy5ry9mByNMhw7psT5875P02m5vbS0tHX//v39S5culRS8Cbk6dIoity3HcWBZFurr6/2JRPy5QMD6fWOjrGtoANe0okxsMiXFwXtsDOjoYKKrC4l02rfbNGdtB1j7wMBAlvZbIOT6oNMVuW0UMtrx4IMPmF1d3Y/MmuW8ft994v6lS5Xu8934AF6csV2cNOZldo+PA36/O6f8Q1K8y52UwLlzDMeOqWwsZh5Lp+0dc+fO27Nw4YL+3bv3SqUUTc8RcoNQQCe3NKUUWlpe4du2bXvAccb+2NCgVjU3K9+1TmSbvJWrdyuluydBNguk05DJJOfxOLLZrJGOxax0Pq/OlpT4O3M5qzcSifQMDw9Hh4YGXlu+XP5i2TLwmz3U713kMAaMjgJffcVkZyfvTyS0/bpe+l4gEPjPgQMH8rW1tbBtm5Z9EXITUUAntxSvGE0gEKgXIr/prruc9cuXwxcOA0JMLTgW95y9ZVPua7uBOZNhcnQUfHAQSCZVJpXS+oeHWYJzX1QpdGSz2b6KiorOd99952wsdj6/atUq3HPPPVJKCSGEV8wESimYpullzodmzNB2PPmkWLVgQWHt9Q3mbYrkOEB/P3D8OPJdXfzIxAQ+rKu76x9bt25NNDc3ywtZ5oSQHxJ6V5JpzQuMfr8/ZFnWhjlzVEtTkwrNn+8GcE/xumYhgIkJhpERYGREIZWCTCS4zGR4IpPRzg0OTgxqmtZZXl5xxrKssw8/3NSTy+X7Wlv/LR3H4UopqZTiUkrpVQ27UtXV1fPy+YF9Tz+Nu6urr/KXMUXeBYumub3ukyeBEyf0od5e9nF5ednOxx577JPt2/8uClXQbkyjCCFXjQI6mXaUUlizZo1vz54968rL8Zv775eRefMYHxlRMpkEHxpSYmyMRW17Rl8yme8LBoMnk8nUCSFEf3Nz09k33ngjtXjxEjiOI0tKSiClhOM4N3q4+KGaGux65hnUBALX75t4Q+acA7EY0NHB5Jkz/Eg6zXf6/aW7U6nhqFJKSilpuJyQaY4COpl2Wlpa0NraemckEkns2rVrTErJbduGYRiysJnQzW7it0omE6isDK2pr8e2tWsRMIxrP4ev60A+D3R3Ax0dLDswoO/O5+X7Tz31s4MffvhRhpLUCLl1UUAn5DqybRsLFzbwWCz2u3vvzb/++ONuudgrDeTF8/uaBnz9NXD0KBOnT6ue8fHS9wC2+803/3pq/foNwnGociMhtxMK6IRcB0op1NTU+OLxgXdWrMALy5aBf9+th4vXdU9MAKdPMxw/zlLxuPHJ+LjcUVMz+2A0Gk1RkhohBKCATsg1F4lEQoODPTufeEI8EolMbXlc8Y6FySTQ3s6s7m5ER0e190tK/P9sbPxJz969H1tSSkgpUVJScmN+GELItEEBnZBroDCHH6mowK5nn0VDVdWlC9V4y8NyOaCvj+HLL1VqcND8NJOZ2FZf/+PWTZs2ZV966Vfuvq5UUY0QMkUU0Am5Cp999hkaG3/6cHW12Pncc6jy+y88V1yUJR4HuruZ7OjgpzIZ/aOyssD7+/bt6160aLGkeW5CyLVAAZ2QKyCEQGVl5QuzZ2feWrtWzfT27LYsIBplsr1d5aNRfpAx872ysrL958/3pwoFZG520wkhhJDbm1IKR44cAef4U2Mj7FdegVqxAs7s2bxX1/lfqqurH3r++Wd1IQS3LOtmN5cQQgghkwkhEAqFfq7r+oFgMPhCMBicc+LECUgpMTExcbObRwgh+B+yarAh5aYb0gAAAABJRU5ErkJggg==\n", + "text/plain": "<IPython.core.display.Image object>" + }, + "metadata": {}, + "execution_count": 1 + } + ], + "source": [ + "from IPython.display import Image\n", + "Image(\"fun-fish.png\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Link: [swim to the fish](fun-fish)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.1" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/tests/test_render_outputs.py b/tests/test_render_outputs.py index b2dd8c02..f9b83df6 100644 --- a/tests/test_render_outputs.py +++ b/tests/test_render_outputs.py @@ -1,6 +1,4 @@ -from unittest.mock import patch - -from importlib_metadata import EntryPoint +"""Tests for rendering code cell outputs.""" import pytest from myst_nb.render import EntryPointError, load_renderer @@ -15,6 +13,7 @@ def test_load_renderer_not_found(): # TODO sometimes fails in full tests # def test_load_renderer_not_subclass(monkeypatch): # """Test that an error is raised when the renderer is not a subclass.""" +# from importlib_metadata import EntryPoint # monkeypatch.setattr(EntryPoint, "load", lambda self: object) # with pytest.raises(EntryPointError, match="Entry Point .* not a subclass"): # load_renderer("default") @@ -101,7 +100,18 @@ def test_metadata_image(sphinx_run, clean_doctree, file_regression): ) -# TODO add test for figures +@pytest.mark.sphinx_params( + "metadata_figure.ipynb", + conf={"nb_execution_mode": "off", "nb_cell_render_key": "myst"}, +) +def test_metadata_figure(sphinx_run, clean_doctree, file_regression): + """Test configuring figure attributes to be rendered from cell metadata.""" + sphinx_run.build() + assert sphinx_run.warnings() == "" + doctree = clean_doctree(sphinx_run.get_resolved_doctree("metadata_figure")) + file_regression.check( + doctree.pformat().replace(".jpeg", ".jpg"), extension=".xml", encoding="utf8" + ) @pytest.mark.sphinx_params("unknown_mimetype.ipynb", conf={"nb_execution_mode": "off"}) diff --git a/tests/test_render_outputs/test_metadata_figure.xml b/tests/test_render_outputs/test_metadata_figure.xml new file mode 100644 index 00000000..f78f18d4 --- /dev/null +++ b/tests/test_render_outputs/test_metadata_figure.xml @@ -0,0 +1,23 @@ +<document source="metadata_figure"> + <section classes="tex2jax_ignore mathjax_ignore" ids="formatting-code-outputs" names="formatting\ code\ outputs"> + <title> + Formatting code outputs + <container cell_index="1" cell_metadata="{'myst': {'figure': {'caption': 'Hey everyone its **party** time!\n', 'name': 'fun-fish'}}}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> + <literal_block language="ipython3" linenos="False" xml:space="preserve"> + from IPython.display import Image + Image("fun-fish.png") + <container classes="cell_output" nb_element="cell_code_output"> + <figure ids="fun-fish" names="fun-fish"> + <container mime_type="image/png"> + <image candidates="{'*': '_build/jupyter_execute/3eacaf6adad1a4305807616181bbee897bb29177e79e2092ddd0264b848ddb4e.png'}" uri="_build/jupyter_execute/3eacaf6adad1a4305807616181bbee897bb29177e79e2092ddd0264b848ddb4e.png"> + <caption> + Hey everyone its + <strong> + party + time! + <paragraph> + Link: + <reference internal="True" refid="fun-fish"> + <inline classes="std std-ref"> + swim to the fish From 65734614cee9760993a5dd6a3ca7fea44beb5cb0 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Thu, 6 Jan 2022 23:57:42 +0100 Subject: [PATCH 36/75] Do not create code cell container, if removing input & output --- docs/use/formatting_outputs.md | 10 ++++++++-- myst_nb/docutils_.py | 34 ++++++++++++++++++++------------ myst_nb/parse.py | 1 - myst_nb/render.py | 11 +++++++---- myst_nb/sphinx_.py | 36 ++++++++++++++++++++-------------- 5 files changed, 57 insertions(+), 35 deletions(-) diff --git a/docs/use/formatting_outputs.md b/docs/use/formatting_outputs.md index 853fc258..676a3133 100644 --- a/docs/use/formatting_outputs.md +++ b/docs/use/formatting_outputs.md @@ -101,7 +101,7 @@ This also makes cell outputs more deterministic. Normally, slight differences in timing may result in different orders of `stderr` and `stdout` in the cell output, while this setting will sort them properly. (use/format/images)= -## Images +## Images and Figures With the default renderer, for any image types output by the code, we can apply formatting *via* cell metadata. The top-level metadata key can be set using `nb_cell_render_key` in your `conf.py`, and is set to `render` by default. @@ -116,7 +116,13 @@ Then for the image we can apply all the variables of the standard [image directi Units of length are: 'em', 'ex', 'px', 'in', 'cm', 'mm', 'pt', 'pc' -We can also set a `caption`, which must be a single paragraph and is rendered as MyST Markdown, and `name`, by which to reference the figure: +You can also wrap the output in a `figure`, that can include: + +- **align**: "left", "center", or "right" +- **caption**: a string, which must be a single paragraph and is rendered as MyST Markdown +- **caption_before**: a boolean, if true, the caption is rendered before the figure (default is false) +- **name**: by which to reference the figure +- **classes**: space separated strings ````md ```{code-cell} ipython3 diff --git a/myst_nb/docutils_.py b/myst_nb/docutils_.py index 23d165d9..7b58a83e 100644 --- a/myst_nb/docutils_.py +++ b/myst_nb/docutils_.py @@ -251,7 +251,24 @@ def render_nb_cell_code(self, token: SyntaxTreeNode) -> None: """Render a notebook code cell.""" cell_index = token.meta["index"] tags = token.meta["metadata"].get("tags", []) - # create a container for all the output + + # TODO do we need this -/_ duplication of tag names, or can we deprecate one? + remove_input = ( + self.get_cell_render_config(cell_index, "remove_code_source") + or ("remove_input" in tags) + or ("remove-input" in tags) + ) + remove_output = ( + self.get_cell_render_config(cell_index, "remove_code_outputs") + or ("remove_output" in tags) + or ("remove-output" in tags) + ) + + # if we are remove both the input and output, we can skip the cell + if remove_input and remove_output: + return + + # create a container for all the input/output classes = ["cell"] for tag in tags: classes.append(f"tag_{tag.replace(' ', '_')}") @@ -267,30 +284,21 @@ def render_nb_cell_code(self, token: SyntaxTreeNode) -> None: with self.current_node_context(cell_container, append=True): # TODO do we need this -/_ duplication of tag names, or can deprecate one? - # TODO it would be nice if remove_input/remove_output were also config # render the code source code - if ( - (not self.get_cell_render_config(cell_index, "remove_code_source")) - and ("remove_input" not in tags) - and ("remove-input" not in tags) - ): + if not remove_input: cell_input = nodes.container( nb_element="cell_code_source", classes=["cell_input"] ) self.add_line_and_source_path(cell_input, token) with self.current_node_context(cell_input, append=True): self.render_nb_cell_code_source(token) + # render the execution output, if any has_outputs = self.config["notebook"]["cells"][cell_index].get( "outputs", [] ) - if ( - has_outputs - and (not self.get_cell_render_config(cell_index, "remove_code_outputs")) - and ("remove_output" not in tags) - and ("remove-output" not in tags) - ): + if (not remove_output) and has_outputs: cell_output = nodes.container( nb_element="cell_code_output", classes=["cell_output"] ) diff --git a/myst_nb/parse.py b/myst_nb/parse.py index 7f2d8bdd..95dba193 100644 --- a/myst_nb/parse.py +++ b/myst_nb/parse.py @@ -54,7 +54,6 @@ def notebook_to_tokens( continue # skip cells tagged for removal - # TODO make configurable tags = nb_cell.metadata.get("tags", []) if ("remove_cell" in tags) or ("remove-cell" in tags): continue diff --git a/myst_nb/render.py b/myst_nb/render.py index 0e138791..1497593c 100644 --- a/myst_nb/render.py +++ b/myst_nb/render.py @@ -368,8 +368,7 @@ def render_image( cell_index, "image", "render_image_options" ) for key, spec in [ - ("classes", options_spec.class_option), # only for back-compatibility - ("class", options_spec.class_option), + ("classes", options_spec.class_option), ("alt", options_spec.unchanged), ("height", options_spec.length_or_unitless), ("width", options_spec.length_or_percentage_or_unitless), @@ -540,11 +539,15 @@ def create_figure_context( # create figure node figure_node = nodes.figure() - if figure_options.get("align") in ("center", "left", "right"): - figure_node["align"] = figure_options["align"] figure_node.line = line figure_node.source = self.document["source"] + # add attributes to figure node + if figure_options.get("classes"): + figure_node["classes"] += str(figure_options["classes"]).split() + if figure_options.get("align") in ("center", "left", "right"): + figure_node["align"] = figure_options["align"] + # add target name if figure_options.get("name"): name = nodes.fully_normalize_name(str(figure_options.get("name"))) diff --git a/myst_nb/sphinx_.py b/myst_nb/sphinx_.py index 3e1a2be7..039db496 100644 --- a/myst_nb/sphinx_.py +++ b/myst_nb/sphinx_.py @@ -409,7 +409,24 @@ def render_nb_cell_code(self, token: SyntaxTreeNode) -> None: """Render a notebook code cell.""" cell_index = token.meta["index"] tags = token.meta["metadata"].get("tags", []) - # create a container for all the output + + # TODO do we need this -/_ duplication of tag names, or can we deprecate one? + remove_input = ( + self.get_cell_render_config(cell_index, "remove_code_source") + or ("remove_input" in tags) + or ("remove-input" in tags) + ) + remove_output = ( + self.get_cell_render_config(cell_index, "remove_code_outputs") + or ("remove_output" in tags) + or ("remove-output" in tags) + ) + + # if we are remove both the input and output, we can skip the cell + if remove_input and remove_output: + return + + # create a container for all the input/output classes = ["cell"] for tag in tags: classes.append(f"tag_{tag.replace(' ', '_')}") @@ -424,31 +441,20 @@ def render_nb_cell_code(self, token: SyntaxTreeNode) -> None: self.add_line_and_source_path(cell_container, token) with self.current_node_context(cell_container, append=True): - # TODO do we need this -/_ duplication of tag names, or can deprecate one? - # TODO it would be nice if remove_input/remove_output were also config - # render the code source code - if ( - (not self.get_cell_render_config(cell_index, "remove_code_source")) - and ("remove_input" not in tags) - and ("remove-input" not in tags) - ): + if not remove_input: cell_input = nodes.container( nb_element="cell_code_source", classes=["cell_input"] ) self.add_line_and_source_path(cell_input, token) with self.current_node_context(cell_input, append=True): self.render_nb_cell_code_source(token) + # render the execution output, if any has_outputs = self.config["notebook"]["cells"][cell_index].get( "outputs", [] ) - if ( - has_outputs - and (not self.get_cell_render_config(cell_index, "remove_code_outputs")) - and ("remove_output" not in tags) - and ("remove-output" not in tags) - ): + if (not remove_output) and has_outputs: cell_output = nodes.container( nb_element="cell_code_output", classes=["cell_output"] ) From 39a2b10e0ba353634bc0f8226f732f24ba8b29e4 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Fri, 7 Jan 2022 02:08:41 +0100 Subject: [PATCH 37/75] Fix more tests --- myst_nb/sphinx_.py | 8 +- tests/nb_fixtures/basic.txt | 105 ++++++++++++------------ tests/nb_fixtures/reporter_warnings.txt | 10 +-- tests/test_docutils.py | 67 +++++++++++++++ tests/test_nb_render.py | 55 ------------- 5 files changed, 129 insertions(+), 116 deletions(-) create mode 100644 tests/test_docutils.py delete mode 100644 tests/test_nb_render.py diff --git a/myst_nb/sphinx_.py b/myst_nb/sphinx_.py index 039db496..87c03a99 100644 --- a/myst_nb/sphinx_.py +++ b/myst_nb/sphinx_.py @@ -68,7 +68,7 @@ def sphinx_setup(app: Sphinx): app.connect("builder-inited", create_mystnb_config) # add parser and default associated file suffixes - app.add_source_parser(MystNbParser) + app.add_source_parser(Parser) app.add_source_suffix(".md", "myst-nb", override=True) app.add_source_suffix(".ipynb", "myst-nb") # add additional file suffixes for parsing @@ -83,8 +83,8 @@ def sphinx_setup(app: Sphinx): # (just to keep it "tidy", but won't affect run) # add directive to ensure all notebook cells are converted - app.add_directive("code-cell", UnexpectedCellDirective) - app.add_directive("raw-cell", UnexpectedCellDirective) + app.add_directive("code-cell", UnexpectedCellDirective, override=True) + app.add_directive("raw-cell", UnexpectedCellDirective, override=True) # add directive for downloading an executed notebook app.add_role("nb-download", NbDownloadRole()) @@ -219,7 +219,7 @@ def update_togglebutton_classes(app: Sphinx, config): config.togglebutton_selector += f", {selector}" -class MystNbParser(MystParser): +class Parser(MystParser): """Sphinx parser for Jupyter Notebook formats, containing MyST Markdown.""" supported = ("myst-nb",) diff --git a/tests/nb_fixtures/basic.txt b/tests/nb_fixtures/basic.txt index dc35b4fc..549b8f66 100644 --- a/tests/nb_fixtures/basic.txt +++ b/tests/nb_fixtures/basic.txt @@ -6,10 +6,9 @@ cells: source: | # A Title . -<document source="notset"> - <section ids="a-title" names="a\ title"> - <title> - A Title +<document ids="a-title" names="a\ title" nb_kernelspec="True" nb_language_info="True" source="<string>" title="A Title"> + <title> + A Title . Code Cell (no output): @@ -23,10 +22,10 @@ cells: print(a) outputs: [] . -<document source="notset"> - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> - <literal_block xml:space="preserve"> +<document nb_kernelspec="True" nb_language_info="True" source="<string>"> + <container cell_index="0" cell_metadata="{}" classes="cell" exec_count="True" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> + <literal_block classes="code" xml:space="preserve"> a=1 print(a) . @@ -35,7 +34,8 @@ Code Cell (with lexer): . metadata: language_info: - pygments_lexer: mylexer + name: python + pygments_lexer: ipython3 cells: - cell_type: code metadata: {} @@ -43,19 +43,16 @@ cells: source: a=1 outputs: [] . -<document source="notset"> - <field_list> - <field> - <field_name> - language_info - <field_body> - <paragraph> - <literal> - {"pygments_lexer": "mylexer"} - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> - <literal_block language="mylexer" xml:space="preserve"> - a=1 +<document nb_kernelspec="True" nb_language_info="{'name': 'python', 'pygments_lexer': 'ipython3'}" source="<string>"> + <container cell_index="0" cell_metadata="{}" classes="cell" exec_count="True" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> + <literal_block classes="code ipython3" xml:space="preserve"> + <inline classes="n"> + a + <inline classes="o"> + = + <inline classes="mi"> + 1 . Code Cell (simple output): @@ -70,16 +67,17 @@ cells: outputs: - name: stdout output_type: stream - text: 1 + text: "1" . -<document source="notset"> - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> - <literal_block xml:space="preserve"> +<document nb_kernelspec="True" nb_language_info="True" source="<string>"> + <container cell_index="0" cell_metadata="{}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> + <literal_block classes="code" xml:space="preserve"> a=1 print(a) - <CellOutputNode classes="cell_output"> - <CellOutputBundleNode output_count="1"> + <container classes="cell_output" nb_element="cell_code_output"> + <literal_block classes="code myst-ansi output stream" xml:space="preserve"> + 1 . Mixed Cells: @@ -101,17 +99,16 @@ cells: source: | b . -<document source="notset"> - <section ids="a-title" names="a\ title"> - <title> - A Title - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> - <literal_block xml:space="preserve"> - a=1 - print(a) - <paragraph> - b +<document ids="a-title" names="a\ title" nb_kernelspec="True" nb_language_info="True" source="<string>" title="A Title"> + <title> + A Title + <container cell_index="1" cell_metadata="{}" classes="cell" exec_count="True" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> + <literal_block classes="code" xml:space="preserve"> + a=1 + print(a) + <paragraph> + b . Reference definitions defined in different cells: @@ -130,15 +127,13 @@ cells: source: | [b]: after . -<document source="notset"> +<document nb_kernelspec="True" nb_language_info="True" source="<string>"> <paragraph> - <pending_xref refdoc="mock_docname" refdomain="True" refexplicit="True" reftarget="before" reftype="myst" refwarn="True"> - <inline classes="xref myst"> - a + <reference refuri="before"> + a - <pending_xref refdoc="mock_docname" refdomain="True" refexplicit="True" reftarget="after" reftype="myst" refwarn="True"> - <inline classes="xref myst"> - b + <reference refuri="after"> + b . Footnote definitions defined in different cells: @@ -157,16 +152,22 @@ cells: source: | [^b]: after . -<document source="notset"> +<document nb_kernelspec="True" nb_language_info="True" source="<string>"> <paragraph> - <footnote_reference auto="1" ids="id1" refname="a"> + <footnote_reference auto="1" ids="id1" refid="a"> + 1 - <footnote_reference auto="1" ids="id2" refname="b"> + <footnote_reference auto="1" ids="id2" refid="b"> + 2 <transition classes="footnotes"> - <footnote auto="1" ids="a" names="a"> + <footnote auto="1" backrefs="id1" ids="a" names="a"> + <label> + 1 <paragraph> before - <footnote auto="1" ids="b" names="b"> + <footnote auto="1" backrefs="id2" ids="b" names="b"> + <label> + 2 <paragraph> after . diff --git a/tests/nb_fixtures/reporter_warnings.txt b/tests/nb_fixtures/reporter_warnings.txt index 3324011f..8656650f 100644 --- a/tests/nb_fixtures/reporter_warnings.txt +++ b/tests/nb_fixtures/reporter_warnings.txt @@ -10,7 +10,7 @@ cells: source: | {unknown}`a` . -source/path:20002: (ERROR/3) Unknown interpreted text role "unknown". +<string>:20002: (ERROR/3) Unknown interpreted text role "unknown". . @@ -24,7 +24,7 @@ cells: ```{xyz} ``` . -source/path:10003: (ERROR/3) Unknown directive type "xyz". +<string>:10003: (ERROR/3) Unknown directive type "xyz". . Directive parsing error: @@ -36,7 +36,7 @@ cells: ```{class} ``` . -source/path:10002: (ERROR/3) Directive 'class': 1 argument(s) required, 0 supplied +<string>:10002: (ERROR/3) Directive 'class': 1 argument(s) required, 0 supplied . Directive run error: @@ -49,7 +49,7 @@ cells: x ``` . -source/path:10002: (ERROR/3) Invalid context: the "date" directive can only be used within a substitution definition. +<string>:10002: (ERROR/3) Invalid context: the "date" directive can only be used within a substitution definition. . Duplicate reference definition: @@ -66,5 +66,5 @@ cells: [a]: c . -source/path:20004: (WARNING/2) Duplicate reference definition: A [myst.ref] +<string>:20004: (WARNING/2) Duplicate reference definition: A . \ No newline at end of file diff --git a/tests/test_docutils.py b/tests/test_docutils.py new file mode 100644 index 00000000..ffd73e48 --- /dev/null +++ b/tests/test_docutils.py @@ -0,0 +1,67 @@ +"""Run parsing tests against the docutils parser.""" +from io import StringIO +import json +from pathlib import Path + +from docutils.core import publish_doctree +from markdown_it.utils import read_fixture_file +import pytest +import yaml + +from myst_nb.docutils_ import Parser + +FIXTURE_PATH = Path(__file__).parent.joinpath("nb_fixtures") + + +@pytest.mark.parametrize( + "line,title,input,expected", + read_fixture_file(FIXTURE_PATH.joinpath("basic.txt")), + ids=[f"{i[0]}-{i[1]}" for i in read_fixture_file(FIXTURE_PATH / "basic.txt")], +) +def test_basic(line, title, input, expected): + """Test basic parsing.""" + dct = yaml.safe_load(input) + dct.update({"nbformat": 4, "nbformat_minor": 4}) + dct.setdefault("metadata", {}) + report_stream = StringIO() + doctree = publish_doctree( + json.dumps(dct), + parser=Parser(), + settings_overrides={ + "nb_execution_mode": "off", + "myst_all_links_external": True, + "warning_stream": report_stream, + }, + ) + assert report_stream.getvalue().rstrip() == "" + + try: + assert doctree.pformat().rstrip() == expected.rstrip() + except AssertionError: + print(doctree.pformat().rstrip()) + raise + + +@pytest.mark.parametrize( + "line,title,input,expected", + read_fixture_file(FIXTURE_PATH.joinpath("reporter_warnings.txt")), + ids=[ + f"{i[0]}-{i[1]}" + for i in read_fixture_file(FIXTURE_PATH / "reporter_warnings.txt") + ], +) +def test_reporting(line, title, input, expected): + """Test that warnings and errors are reported as expected.""" + dct = yaml.safe_load(input) + dct.update({"metadata": {}, "nbformat": 4, "nbformat_minor": 4}) + report_stream = StringIO() + publish_doctree( + json.dumps(dct), + parser=Parser(), + settings_overrides={ + "nb_execution_mode": "off", + "warning_stream": report_stream, + }, + ) + + assert report_stream.getvalue().rstrip() == expected.rstrip() diff --git a/tests/test_nb_render.py b/tests/test_nb_render.py deleted file mode 100644 index 3bd7529f..00000000 --- a/tests/test_nb_render.py +++ /dev/null @@ -1,55 +0,0 @@ -from pathlib import Path - -from markdown_it.utils import read_fixture_file -from myst_parser.docutils_renderer import make_document -from myst_parser.main import MdParserConfig -from myst_parser.sphinx_renderer import mock_sphinx_env -import nbformat -import pytest -import yaml - -FIXTURE_PATH = Path(__file__).parent.joinpath("nb_fixtures") - - -@pytest.mark.parametrize( - "line,title,input,expected", read_fixture_file(FIXTURE_PATH.joinpath("basic.txt")) -) -def test_render(line, title, input, expected): - from myst_nb.parse import nb_to_tokens, tokens_to_docutils - - dct = yaml.safe_load(input) - dct.setdefault("metadata", {}) - ntbk = nbformat.from_dict(dct) - md, env, tokens = nb_to_tokens(ntbk, MdParserConfig(), "default") - document = make_document() - with mock_sphinx_env(document=document): - tokens_to_docutils(md, env, tokens, document) - output = document.pformat().rstrip() - if output != expected.rstrip(): - print(output) - assert output == expected.rstrip() - - -@pytest.mark.parametrize( - "line,title,input,expected", - read_fixture_file(FIXTURE_PATH.joinpath("reporter_warnings.txt")), -) -def test_reporting(line, title, input, expected): - from myst_nb.parse import nb_to_tokens, tokens_to_docutils - - dct = yaml.safe_load(input) - dct.setdefault("metadata", {}) - ntbk = nbformat.from_dict(dct) - md, env, tokens = nb_to_tokens(ntbk, MdParserConfig(), "default") - document = make_document("source/path") - messages = [] - - def observer(msg_node): - if msg_node["level"] > 1: - messages.append(msg_node.astext()) - - document.reporter.attach_observer(observer) - with mock_sphinx_env(document=document): - tokens_to_docutils(md, env, tokens, document) - - assert "\n".join(messages).rstrip() == expected.rstrip() From 24604f65115ddb6b421aaf4df7b89d2cb11c10bb Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Fri, 7 Jan 2022 02:42:28 +0100 Subject: [PATCH 38/75] Render raw cells --- myst_nb/docutils_.py | 10 +++++++++- myst_nb/parse.py | 1 + myst_nb/render.py | 19 +++++++++++++++++-- myst_nb/sphinx_.py | 10 +++++++++- tests/nb_fixtures/basic.txt | 17 +++++++++++++++++ 5 files changed, 53 insertions(+), 4 deletions(-) diff --git a/myst_nb/docutils_.py b/myst_nb/docutils_.py index 7b58a83e..9789e856 100644 --- a/myst_nb/docutils_.py +++ b/myst_nb/docutils_.py @@ -245,7 +245,12 @@ def render_nb_cell_markdown(self, token: SyntaxTreeNode) -> None: def render_nb_cell_raw(self, token: SyntaxTreeNode) -> None: """Render a notebook raw cell.""" - # TODO + line = token_line(token, 0) + _nodes = self.nb_renderer.render_raw_cell( + token.content, token.meta["metadata"], token.meta["index"], line + ) + self.add_line_and_source_path_r(_nodes, token) + self.current_node.extend(_nodes) def render_nb_cell_code(self, token: SyntaxTreeNode) -> None: """Render a notebook code cell.""" @@ -351,6 +356,9 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: self.current_node.extend(_nodes) elif output.output_type in ("display_data", "execute_result"): + # TODO these output have their own 'metadata' key, + # we should parse these to render_mime_type + # TODO unwrapped Markdown (so you can output headers) # maybe in a transform, we grab the containers and move them # "below" the code cell container? diff --git a/myst_nb/parse.py b/myst_nb/parse.py index 95dba193..cf2be298 100644 --- a/myst_nb/parse.py +++ b/myst_nb/parse.py @@ -91,6 +91,7 @@ def notebook_to_tokens( ) elif nb_cell["cell_type"] == "raw": # https://nbformat.readthedocs.io/en/5.1.3/format_description.html#raw-nbconvert-cells + metadata = nb_node_to_dict(nb_cell["metadata"]) tokens = [ Token( "nb_cell_raw", diff --git a/myst_nb/render.py b/myst_nb/render.py index 1497593c..6c9ecb86 100644 --- a/myst_nb/render.py +++ b/myst_nb/render.py @@ -187,6 +187,21 @@ def render_error( node["classes"] += ["output", "traceback"] return [node] + def render_raw_cell( + self, content: str, metadata: dict, cell_index: int, source_line: int + ) -> List[nodes.Element]: + """Render a raw cell. + + https://nbformat.readthedocs.io/en/5.1.3/format_description.html#raw-nbconvert-cells + + :param content: the raw cell content + :param metadata: the cell metadata + :param cell_index: the index of the cell + :param source_line: the line number of the cell in the source document + """ + mime_type = metadata.get("format", "text/plain") + return self.render_mime_type(mime_type, content, cell_index, source_line) + def render_mime_type( self, mime_type: str, data: Union[str, bytes], cell_index: int, source_line: int ) -> List[nodes.Element]: @@ -329,8 +344,8 @@ def render_text_latex( def render_image( self, - mime_type: Union[str, bytes], - data: bytes, + mime_type: str, + data: Union[str, bytes], cell_index: int, source_line: int, ) -> List[nodes.Element]: diff --git a/myst_nb/sphinx_.py b/myst_nb/sphinx_.py index 87c03a99..3a89c77e 100644 --- a/myst_nb/sphinx_.py +++ b/myst_nb/sphinx_.py @@ -403,7 +403,12 @@ def render_nb_cell_markdown(self, token: SyntaxTreeNode) -> None: def render_nb_cell_raw(self, token: SyntaxTreeNode) -> None: """Render a notebook raw cell.""" - # TODO + line = token_line(token, 0) + _nodes = self.nb_renderer.render_raw_cell( + token.content, token.meta["metadata"], token.meta["index"], line + ) + self.add_line_and_source_path_r(_nodes, token) + self.current_node.extend(_nodes) def render_nb_cell_code(self, token: SyntaxTreeNode) -> None: """Render a notebook code cell.""" @@ -505,6 +510,9 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: self.current_node.extend(_nodes) elif output.output_type in ("display_data", "execute_result"): + # TODO these output have their own 'metadata' key, + # we should parse these to render_mime_type + # TODO unwrapped Markdown (so you can output headers) # maybe in a transform, we grab the containers and move them # "below" the code cell container? diff --git a/tests/nb_fixtures/basic.txt b/tests/nb_fixtures/basic.txt index 549b8f66..17d06aa4 100644 --- a/tests/nb_fixtures/basic.txt +++ b/tests/nb_fixtures/basic.txt @@ -80,6 +80,23 @@ cells: 1 . +Raw Cell +. +cells: + - cell_type: raw + metadata: {"format": "text/html"} + source: | + <div> + <h1>A Title</h1> + </div> +. +<document nb_kernelspec="True" nb_language_info="True" source="<string>"> + <raw classes="output text_html" format="html" xml:space="preserve"> + <div> + <h1>A Title</h1> + </div> +. + Mixed Cells: . cells: From 3a191934cd23dfb632510cd7baf3a233bc7ef2bc Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Fri, 7 Jan 2022 11:24:00 +0100 Subject: [PATCH 39/75] initial glue implementation --- docs/use/formatting_outputs.md | 8 +- myst_nb/configuration.py | 6 +- myst_nb/docutils_.py | 69 ++++--- myst_nb/execute.py | 7 +- myst_nb/nb_glue/__init__.py | 37 ++++ myst_nb/nb_glue/domain.py | 338 ++------------------------------- myst_nb/nb_glue/elements.py | 146 ++++++++++++++ myst_nb/nb_glue/transform.py | 43 ----- myst_nb/nb_glue/utils.py | 126 ------------ myst_nb/preprocess.py | 90 +++++++++ myst_nb/render.py | 133 +++++-------- myst_nb/sphinx_.py | 33 ++-- tests/nb_fixtures/basic.txt | 16 +- tests/test_docutils.py | 2 + tests/test_glue.py | 60 +++--- 15 files changed, 445 insertions(+), 669 deletions(-) create mode 100644 myst_nb/nb_glue/elements.py delete mode 100644 myst_nb/nb_glue/transform.py delete mode 100644 myst_nb/nb_glue/utils.py create mode 100644 myst_nb/preprocess.py diff --git a/docs/use/formatting_outputs.md b/docs/use/formatting_outputs.md index 676a3133..6308edc5 100644 --- a/docs/use/formatting_outputs.md +++ b/docs/use/formatting_outputs.md @@ -116,10 +116,10 @@ Then for the image we can apply all the variables of the standard [image directi Units of length are: 'em', 'ex', 'px', 'in', 'cm', 'mm', 'pt', 'pc' -You can also wrap the output in a `figure`, that can include: +You can also wrap the output in a [`figure`](https://docutils.sourceforge.io/docs/ref/rst/directives.html#figure), that can include: - **align**: "left", "center", or "right" -- **caption**: a string, which must be a single paragraph and is rendered as MyST Markdown +- **caption**: a string, which must contain a single paragraph and is rendered as MyST Markdown (subsequent paragraphs are added as a legend) - **caption_before**: a boolean, if true, the caption is rendered before the figure (default is false) - **name**: by which to reference the figure - **classes**: space separated strings @@ -136,6 +136,8 @@ render: figure: caption: | Hey everyone its **party** time! + + (and I'm a legend) name: fun-fish-ref --- from IPython.display import Image @@ -154,6 +156,8 @@ render: figure: caption: | Hey everyone its **party** time! + + (and I'm a legend) name: fun-fish-ref --- from IPython.display import Image diff --git a/myst_nb/configuration.py b/myst_nb/configuration.py index 1a0b6ef8..8b6fc4ca 100644 --- a/myst_nb/configuration.py +++ b/myst_nb/configuration.py @@ -201,14 +201,12 @@ class NbParserConfig: # notebook execution options execution_mode: Literal["off", "force", "auto", "cache"] = attr.ib( - # TODO different default for docutils (off) and sphinx (cache)? - # TODO deprecate auto and set cache as default instead default="auto", validator=in_( [ "off", - "force", "auto", + "force", "cache", ] ), @@ -273,7 +271,7 @@ class NbParserConfig: default="build", validator=instance_of(str), metadata={ - "help": "Output folder for external outputs", + "help": "Folder for external outputs (like images), skipped if empty", "sphinx_exclude": True, # in sphinx we always output to the build folder }, ) diff --git a/myst_nb/docutils_.py b/myst_nb/docutils_.py index 9789e856..c723ef71 100644 --- a/myst_nb/docutils_.py +++ b/myst_nb/docutils_.py @@ -5,7 +5,7 @@ from docutils import nodes from docutils.core import default_description, publish_cmdline -from docutils.parsers.rst.directives import register_directive +from docutils.parsers.rst.directives import _directives from markdown_it.token import Token from markdown_it.tree import SyntaxTreeNode from myst_parser.docutils_ import DOCUTILS_EXCLUDED_ARGS as DOCUTILS_EXCLUDED_ARGS_MYST @@ -17,21 +17,18 @@ from nbformat import NotebookNode from myst_nb.configuration import NbParserConfig -from myst_nb.execute import update_notebook +from myst_nb.execute import execute_notebook from myst_nb.loggers import DEFAULT_LOG_TYPE, DocutilsDocLogger +from myst_nb.nb_glue.elements import PasteDirective, PasteFigureDirective from myst_nb.parse import nb_node_to_dict, notebook_to_tokens +from myst_nb.preprocess import preprocess_notebook from myst_nb.read import ( NbReader, UnexpectedCellDirective, read_myst_markdown_notebook, standard_nb_read, ) -from myst_nb.render import ( - NbElementRenderer, - coalesce_streams, - create_figure_context, - load_renderer, -) +from myst_nb.render import NbElementRenderer, create_figure_context, load_renderer DOCUTILS_EXCLUDED_ARGS = { f.name for f in NbParserConfig.get_fields() if f.metadata.get("docutils_exclude") @@ -55,6 +52,23 @@ class Parser(MystParser): config_section = "myst-nb parser" def parse(self, inputstring: str, document: nodes.document) -> None: + # register/unregister special directives and roles + new_directives = ( + ("code-cell", UnexpectedCellDirective), + ("raw-cell", UnexpectedCellDirective), + ("glue:", PasteDirective), + ("glue:any", PasteDirective), + ("glue:figure", PasteFigureDirective), + ) + for name, directive in new_directives: + _directives[name] = directive + try: + return self._parse(inputstring, document) + finally: + for name, _ in new_directives: + _directives.pop(name, None) + + def _parse(self, inputstring: str, document: nodes.document) -> None: """Parse source text. :param inputstring: The source string to parse @@ -62,10 +76,6 @@ def parse(self, inputstring: str, document: nodes.document) -> None: """ document_source = document["source"] - # register special directives - register_directive("code-cell", UnexpectedCellDirective) - register_directive("raw-cell", UnexpectedCellDirective) - # get a logger for this document logger = DocutilsDocLogger(document) @@ -118,7 +128,7 @@ def parse(self, inputstring: str, document: nodes.document) -> None: ) # potentially execute notebook and/or populate outputs from cache - notebook, exec_data = update_notebook( + notebook, exec_data = execute_notebook( notebook, document_source, nb_config, logger ) if exec_data: @@ -138,22 +148,32 @@ def parse(self, inputstring: str, document: nodes.document) -> None: mdit_parser.renderer, logger ) mdit_parser.options["nb_renderer"] = nb_renderer + # we currently do this early, so that the nb_renderer has access to things + mdit_parser.renderer.setup_render(mdit_parser.options, mdit_env) + + # pre-process notebook and store resources for render + resources = preprocess_notebook( + notebook, logger, mdit_parser.renderer.get_cell_render_config + ) + mdit_parser.renderer.config["nb_resources"] = resources + # we temporarily store nb_renderer on the document, + # so that roles/directives can access it + document.attributes["nb_renderer"] = nb_renderer # parse to tokens mdit_tokens = notebook_to_tokens(notebook, mdit_parser, mdit_env, logger) # convert to docutils AST, which is added to the document mdit_parser.renderer.render(mdit_tokens, mdit_parser.options, mdit_env) - # write updated notebook to output folder - # TODO currently this has to be done after the render has been called/setup - # TODO maybe docutils should be optional on whether to do this? - # utf-8 is the de-facto standard encoding for notebooks. + # write final (updated) notebook to output folder (utf8 is standard encoding) content = nbformat.writes(notebook).encode("utf-8") - path = ["rendered.ipynb"] + path = ["processed.ipynb"] nb_renderer.write_file(path, content, overwrite=True) # TODO also write CSS to output folder if necessary or always? # TODO we also need to load JS URLs if ipywidgets are present and HTML + document.attributes.pop("nb_renderer") + class DocutilsNbRenderer(DocutilsRenderer): """A docutils-only renderer for Jupyter Notebooks.""" @@ -203,8 +223,9 @@ def render_nb_metadata(self, token: SyntaxTreeNode) -> None: metadata = dict(token.meta) # save these special keys on the document, rather than as docinfo - self.document["nb_kernelspec"] = metadata.pop("kernelspec", None) - self.document["nb_language_info"] = metadata.pop("language_info", None) + for key in ("kernelspec", "language_info", "source_map"): + if key in metadata: + self.document[f"nb_{key}"] = metadata.pop(key) # TODO should we provide hook for NbElementRenderer? @@ -288,8 +309,6 @@ def render_nb_cell_code(self, token: SyntaxTreeNode) -> None: self.add_line_and_source_path(cell_container, token) with self.current_node_context(cell_container, append=True): - # TODO do we need this -/_ duplication of tag names, or can deprecate one? - # render the code source code if not remove_input: cell_input = nodes.container( @@ -332,12 +351,8 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: outputs: List[NotebookNode] = self.config["notebook"]["cells"][cell_index].get( "outputs", [] ) - if self.get_cell_render_config(cell_index, "merge_streams"): - outputs = coalesce_streams(outputs) - - mime_priority = self.get_cell_render_config(cell_index, "mime_priority") - # render the outputs + mime_priority = self.get_cell_render_config(cell_index, "mime_priority") for output in outputs: if output.output_type == "stream": if output.name == "stdout": diff --git a/myst_nb/execute.py b/myst_nb/execute.py index f6f5337c..eaf991b2 100644 --- a/myst_nb/execute.py +++ b/myst_nb/execute.py @@ -34,15 +34,16 @@ class ExecutionResult(TypedDict): """traceback if the notebook failed""" -def update_notebook( +def execute_notebook( notebook: NotebookNode, source: str, nb_config: NbParserConfig, logger: Logger, ) -> Tuple[NotebookNode, Optional[ExecutionResult]]: - """Update a notebook using the given configuration. + """Update a notebook's outputs using the given configuration. - This function may execute the notebook if necessary, to update its outputs. + This function may execute the notebook if necessary, to update its outputs, + or populate from a cache. :param notebook: The notebook to update. :param source: Path to or description of the input source being processed. diff --git a/myst_nb/nb_glue/__init__.py b/myst_nb/nb_glue/__init__.py index f6ed5efd..159115b2 100644 --- a/myst_nb/nb_glue/__init__.py +++ b/myst_nb/nb_glue/__init__.py @@ -1,5 +1,12 @@ +"""Functionality for storing special data in notebook code cells, +which can then be inserted into the document body. +""" +from logging import Logger +from typing import Any, Dict, List + import IPython from IPython.display import display as ipy_display +from nbformat import NotebookNode GLUE_PREFIX = "application/papermill.record/" @@ -26,3 +33,33 @@ def glue(name: str, variable, display: bool = True) -> None: ipy_display( {mime_prefix + k: v for k, v in mimebundle.items()}, raw=True, metadata=metadata ) + + +def extract_glue_data( + notebook: NotebookNode, + resources: Dict[str, Any], + source_map: List[int], + logger: Logger, +) -> None: + """Extract all the glue data from the notebook, into the resources dictionary.""" + data = resources.setdefault("glue", {}) + for index, cell in enumerate(notebook.cells): + if cell.cell_type != "code": + continue + outputs = [] + for output in cell.get("outputs", []): + meta = output.get("metadata", {}) + if "scrapbook" not in meta: + outputs.append(output) + continue + key = meta["scrapbook"]["name"] + mime_prefix = len(meta["scrapbook"].get("mime_prefix", "")) + if key in data: + logger.warning( + f"glue key {key!r} duplicate", + subtype="glue", + line=source_map[index], + ) + output["data"] = {k[mime_prefix:]: v for k, v in output["data"].items()} + data[key] = output + cell.outputs = outputs diff --git a/myst_nb/nb_glue/domain.py b/myst_nb/nb_glue/domain.py index 2ab7a976..91175e15 100644 --- a/myst_nb/nb_glue/domain.py +++ b/myst_nb/nb_glue/domain.py @@ -1,253 +1,10 @@ -import copy -import json -from pathlib import Path -from typing import Dict, List, cast +from typing import List -from docutils import nodes -from docutils.parsers.rst import directives from sphinx.domains import Domain -from sphinx.domains.math import MathDomain - -# from myst_nb.nodes import CellOutputBundleNode, CellOutputNode from sphinx.ext.autodoc.directive import DummyOptionSpec -from sphinx.util import logging from sphinx.util.docutils import SphinxDirective, SphinxRole -from myst_nb.nb_glue import GLUE_PREFIX -from myst_nb.nb_glue.utils import find_all_keys - -SPHINX_LOGGER = logging.getLogger(__name__) - - -class PasteNode(nodes.container): - """Represent a MimeBundle in the Sphinx AST, to be transformed later.""" - - def __init__(self, key, **attributes): - attributes["key"] = key - super().__init__("", **attributes) - - @property - def key(self): - return self.attributes["key"] - - def copy(self): - obj = self.__class__( - self.key, **{k: v for k, v in self.attributes.items() if k != "key"} - ) - obj.document = self.document - obj.source = self.source - obj.line = self.line - return obj - - def create_node(self, output: dict, document, env): - """Create the output node, given the cell output.""" - # the whole output chunk is deposited and rendered later - # TODO move these nodes to separate module, to avoid cyclic imports - output_node = CellOutputBundleNode([output], env.config["nb_render_plugin"]) - out_node = CellOutputNode(classes=["cell_output"]) - out_node.source, out_node.line = self.source, self.line - out_node += output_node - return out_node - - -class PasteInlineNode(PasteNode): - def create_node(self, output: dict, document, env): - """Create the output node, given the cell output.""" - # the whole output chunk is deposited and rendered later - bundle_node = CellOutputBundleNode([output], "inline") - inline_node = nodes.inline("", "", bundle_node, classes=["pasted-inline"]) - inline_node.source, inline_node.line = self.source, self.line - return inline_node - - -class PasteTextNode(PasteNode): - """A subclass of ``PasteNode`` that only supports plain text.""" - - @property - def formatting(self): - return self.attributes["formatting"] - - def create_node(self, output: dict, document, env): - """Create the output node, given the cell output.""" - mimebundle = output["data"] - if "text/plain" in mimebundle: - text = mimebundle["text/plain"].strip("'") - # If formatting is specified, see if we have a number of some kind - if self.formatting: - try: - newtext = float(text) - text = f"{newtext:>{self.formatting}}" - except ValueError: - pass - node = nodes.inline(text, text, classes=["pasted-text"]) - node.source, node.line = self.source, self.line - return node - return None - - -class PasteMathNode(PasteNode): - """A subclass of ``PasteNode`` that only supports plain text. - - Code mainly copied from sphinx.directives.patches.MathDirective - """ - - def create_node(self, output: dict, document, env): - """Create the output node, given the cell output.""" - mimebundle = output["data"] - if "text/latex" in mimebundle: - text = mimebundle["text/latex"].strip("$") - node = nodes.math_block( - text, - text, - classes=["pasted-math"], - docname=env.docname, - number=self["math_number"], - nowrap=self["math_nowrap"], - label=self["math_label"], - ) - node.line, node.source = self.line, self.source - if "math_class" in self and self["math_class"]: - node["classes"].append(self["math_class"]) - return node - return None - - -# Role and directive for pasting -class Paste(SphinxDirective): - required_arguments = 1 - final_argument_whitespace = True - has_content = False - - option_spec = {"id": directives.unchanged} - - def run(self): - node = PasteNode(self.arguments[0]) - self.set_source_info(node) - return [node] - - -class PasteMath(Paste): - - option_spec = Paste.option_spec.copy() - option_spec["class"] = directives.class_option - option_spec["label"] = directives.unchanged - option_spec["nowrap"] = directives.flag - has_content = False - - def run(self): - paste_node = PasteMathNode(self.arguments[0]) - self.set_source_info(paste_node) - paste_node["math_class"] = self.options.pop("class", None) - paste_node["math_label"] = self.options.pop("label", None) - paste_node["math_nowrap"] = "nowrap" in self.options - target = self.add_target(paste_node) - if target: - return [target, paste_node] - return [paste_node] - - def add_target(self, node): - if not node["math_label"]: - return None - # register label to domain - domain = cast(MathDomain, self.env.get_domain("math")) - domain.note_equation(self.env.docname, node["math_label"], location=node) - node["math_number"] = domain.get_equation_number_for(node["math_label"]) - - # add target node - node_id = nodes.make_id("equation-%s" % node["math_label"]) - target = nodes.target("", "", ids=[node_id]) - self.state.document.note_explicit_target(target) - return target - - -class PasteFigure(Paste): - def align(argument): - return directives.choice(argument, ("left", "center", "right")) - - def figwidth_value(argument): - return directives.length_or_percentage_or_unitless(argument, "px") - - option_spec = Paste.option_spec.copy() - option_spec["figwidth"] = figwidth_value - option_spec["figclass"] = directives.class_option - option_spec["align"] = align - option_spec["name"] = directives.unchanged - has_content = True - - def run(self): - figwidth = self.options.pop("figwidth", None) - figclasses = self.options.pop("figclass", None) - align = self.options.pop("align", None) - # On the Paste node we should add an attribute to specify that only image - # type mimedata is allowed, then this would be used by - # PasteNodesToDocutils -> CellOutputsToNodes to alter the render priority - # and/or log warnings if that type of mimedata is not available - (paste_node,) = Paste.run(self) - if isinstance(paste_node, nodes.system_message): - return [paste_node] - figure_node = nodes.figure("", paste_node) - figure_node.line = paste_node.line - figure_node.source = paste_node.source - if figwidth is not None: - figure_node["width"] = figwidth - if figclasses: - figure_node["classes"] += figclasses - if align: - figure_node["align"] = align - self.add_name(figure_node) - # note: this is copied directly from sphinx.Figure - if self.content: - node = nodes.Element() # anonymous container for parsing - self.state.nested_parse(self.content, self.content_offset, node) - first_node = node[0] - if isinstance(first_node, nodes.paragraph): - caption = nodes.caption(first_node.rawsource, "", *first_node.children) - caption.source = first_node.source - caption.line = first_node.line - figure_node += caption - elif not (isinstance(first_node, nodes.comment) and len(first_node) == 0): - error = self.state_machine.reporter.error( - "Figure caption must be a paragraph or empty comment.", - nodes.literal_block(self.block_text, self.block_text), - line=self.lineno, - ) - return [figure_node, error] - if len(node) > 1: - figure_node += nodes.legend("", *node[1:]) - return [figure_node] - - -def paste_any_role(name, rawtext, text, lineno, inliner, options=None, content=()): - """This role will simply add the cell output""" - path = inliner.document.current_source - # Remove line number if we have a notebook because it is unreliable - if path.endswith(".ipynb"): - lineno = None - path = str(Path(path).with_suffix("")) - return [PasteInlineNode(text, location=(path, lineno))], [] - - -def paste_text_role(name, rawtext, text, lineno, inliner, options=None, content=()): - """This role will be parsed as text, with some formatting fanciness. - - The text can have a final ``:``, - whereby everything to the right will be treated as a formatting string, e.g. - ``key:.2f`` - """ - # First check if we have both key:format in the key - parts = text.rsplit(":", 1) - if len(parts) == 2: - key, formatting = parts - else: - key = parts[0] - formatting = None - - path = inliner.document.current_source - # Remove line number if we have a notebook because it is unreliable - if path.endswith(".ipynb"): - lineno = None - path = str(Path(path).with_suffix("")) - return [PasteTextNode(key, formatting=formatting, location=(path, lineno))], [] +from myst_nb.nb_glue.elements import PasteDirective, PasteFigureDirective class DummyDirective(SphinxDirective): @@ -266,98 +23,25 @@ class DummyDirective2(DummyDirective): class DummyRole(SphinxRole): def run(self): - return [nodes.inline(text=self.text)], [] + return [], [] class NbGlueDomain(Domain): - """A sphinx domain for handling glue data""" + """A sphinx domain for defining glue roles and directives.""" name = "glue" label = "NotebookGlue" + # data version, bump this when the format of self.data changes - data_version = 0.1 - # data value for a fresh environment - # - cache is the mapping of all keys to outputs - # - docmap is the mapping of docnames to the set of keys it contains - initial_data = {"cache": {}, "docmap": {}} + data_version = 0.2 - # TODO placeholders for glue roles/directives which need re-working - # directives = {"": Paste, "any": Paste, "figure": PasteFigure, "math": PasteMath} - # roles = {"": paste_any_role, "any": paste_any_role, "text": paste_text_role} directives = { - "": DummyDirective, - "any": DummyDirective, - "figure": DummyDirective2, + "": PasteDirective, + "any": PasteDirective, + "figure": PasteFigureDirective, "math": DummyDirective, } roles = {"": DummyRole(), "any": DummyRole(), "text": DummyRole()} - @property - def cache(self) -> dict: - return self.env.domaindata[self.name]["cache"] - - @property - def docmap(self) -> dict: - return self.env.domaindata[self.name]["docmap"] - - def __contains__(self, key): - return key in self.cache - - def get(self, key, view=True, replace=True): - """Grab the output for this key and replace `glue` specific prefix info.""" - output = self.cache.get(key) - if view: - output = copy.deepcopy(output) - if replace: - output["data"] = { - key.replace(GLUE_PREFIX, ""): val for key, val in output["data"].items() - } - return output - - @classmethod - def from_env(cls, env) -> "NbGlueDomain": - return env.domains[cls.name] - - def write_cache(self, path=None): - """If None, write to doctreedir""" - if path is None: - path = Path(self.env.doctreedir).joinpath("glue_cache.json") - if isinstance(path, str): - path = Path(path) - with path.open("w", encoding="utf8") as handle: - json.dump( - { - d: {k: self.cache[k] for k in vs if k in self.cache} - for d, vs in self.docmap.items() - if vs - }, - handle, - indent=2, - ) - - def add_notebook(self, ntbk, docname): - """Find all glue keys from the notebook and add to the cache.""" - new_keys = find_all_keys( - ntbk, - existing_keys={v: k for k, vs in self.docmap.items() for v in vs}, - path=str(docname), - logger=SPHINX_LOGGER, - ) - self.docmap[str(docname)] = set(new_keys) - self.cache.update(new_keys) - - def clear_doc(self, docname: str) -> None: - """Remove traces of a document in the domain-specific inventories.""" - for key in self.docmap.get(docname, []): - self.cache.pop(key, None) - self.docmap.pop(docname, None) - - def merge_domaindata(self, docnames: List[str], otherdata: Dict) -> None: - """Merge in data regarding *docnames* from a different domaindata - inventory (coming from a subprocess in parallel builds). - """ - # TODO need to deal with key clashes - # raise NotImplementedError( - # "merge_domaindata must be implemented in %s " - # "to be able to do parallel builds!" % self.__class__ - # ) + def merge_domaindata(self, docnames: List[str], otherdata: dict) -> None: + pass diff --git a/myst_nb/nb_glue/elements.py b/myst_nb/nb_glue/elements.py new file mode 100644 index 00000000..56ef3f1c --- /dev/null +++ b/myst_nb/nb_glue/elements.py @@ -0,0 +1,146 @@ +"""Directives and roles which can be used by both docutils and sphinx.""" +from typing import Any, Dict, List + +from docutils import nodes +from docutils.parsers.rst import Directive, directives + +from myst_nb.loggers import DocutilsDocLogger, SphinxDocLogger +from myst_nb.render import NbElementRenderer + + +class PasteDirective(Directive): + """A directive for pasting code outputs from notebooks.""" + + required_arguments = 1 # the key + final_argument_whitespace = True + has_content = False + + @property + def is_sphinx(self) -> bool: + """Return True if we are in sphinx, otherwise docutils.""" + return hasattr(self.state.document.settings, "env") + + def warning(self, message: str) -> nodes.system_message: + if self.is_sphinx: + logger = SphinxDocLogger(self.state.document) + else: + logger = DocutilsDocLogger(self.state.document) + logger.warning(message, subtype="glue") + return nodes.system_message( + message, + type="WARNING", + level=2, + line=self.lineno, + source=self.state.document["source"], + ) + + def set_source_info(self, node: nodes.Node) -> None: + """Set source and line number to the node.""" + node.source, node.line = self.state_machine.get_source_and_line(self.lineno) + + def run(self) -> List[nodes.Node]: + """Run the directive.""" + key = self.arguments[0] + if "nb_renderer" not in self.state.document: + return self.warning("No 'nb_renderer' found on the document.") + nb_renderer: NbElementRenderer = self.state.document["nb_renderer"] + resources = nb_renderer.get_resources() + if "glue" not in resources: + return self.warning("No glue data found in the notebook resources.") + if key not in resources["glue"]: + return self.warning(f"No key {key!r} found in glue data.") + if not resources["glue"][key].get("data"): + return self.warning(f"{key!r} does not contain any data.") + if self.is_sphinx: + return self.render_output_sphinx(nb_renderer, resources["glue"][key]) + else: + return self.render_output_docutils(nb_renderer, resources["glue"][key]) + + def render_output_docutils( + self, nb_renderer: NbElementRenderer, output: Dict[str, Any] + ) -> List[nodes.Node]: + mime_priority = nb_renderer.renderer.get_nb_config("mime_priority") + try: + mime_type = next(x for x in mime_priority if x in output["data"]) + except StopIteration: + return self.warning("No output mime type found from render_priority") + else: + cell_index = 0 # TODO make this optional, and actually just pass metadata? + return nb_renderer.render_mime_type( + mime_type, output["data"][mime_type], cell_index, self.lineno + ) + + def render_output_sphinx( + self, nb_renderer: NbElementRenderer, output: Dict[str, Any] + ) -> List[nodes.Node]: + mime_bundle = nodes.container(nb_element="mime_bundle") + self.set_source_info(mime_bundle) + for mime_type, data in output["data"].items(): + mime_container = nodes.container(mime_type=mime_type) + self.set_source_info(mime_container) + cell_index = 0 # TODO make this optional, and actually just pass metadata? + nds = nb_renderer.render_mime_type(mime_type, data, cell_index, self.lineno) + if nds: + mime_container.extend(nds) + mime_bundle.append(mime_container) + return [mime_bundle] + + +class PasteFigureDirective(PasteDirective): + def align(argument): + return directives.choice(argument, ("left", "center", "right")) + + def figwidth_value(argument): + return directives.length_or_percentage_or_unitless(argument, "px") + + option_spec = (PasteDirective.option_spec or {}).copy() + option_spec["figwidth"] = figwidth_value + option_spec["figclass"] = directives.class_option + option_spec["align"] = align + option_spec["name"] = directives.unchanged + has_content = True + + def run(self): + paste_nodes = super().run() + if not paste_nodes or isinstance(paste_nodes[0], nodes.system_message): + return paste_nodes + + # note: most of this is copied directly from sphinx.Figure + + # create figure node + figure_node = nodes.figure("", *paste_nodes) + self.set_source_info(figure_node) + + # add attributes + figwidth = self.options.pop("figwidth", None) + figclasses = self.options.pop("figclass", None) + align = self.options.pop("align", None) + if figwidth is not None: + figure_node["width"] = figwidth + if figclasses: + figure_node["classes"] += figclasses + if align: + figure_node["align"] = align + + # add target + self.add_name(figure_node) + + # create the caption and legend + if self.content: + node = nodes.Element() # anonymous container for parsing + self.state.nested_parse(self.content, self.content_offset, node) + first_node = node[0] + if isinstance(first_node, nodes.paragraph): + caption = nodes.caption(first_node.rawsource, "", *first_node.children) + caption.source = first_node.source + caption.line = first_node.line + figure_node += caption + elif not (isinstance(first_node, nodes.comment) and len(first_node) == 0): + error = self.warning( + "Figure caption must be a paragraph or empty comment." + ) + return [figure_node, error] + if len(node) > 1: + figure_node += nodes.legend("", *node[1:]) + + return [figure_node] diff --git a/myst_nb/nb_glue/transform.py b/myst_nb/nb_glue/transform.py deleted file mode 100644 index 5670132c..00000000 --- a/myst_nb/nb_glue/transform.py +++ /dev/null @@ -1,43 +0,0 @@ -from sphinx.transforms import SphinxTransform -from sphinx.util import logging - -from myst_nb.nb_glue.domain import NbGlueDomain, PasteNode - -SPHINX_LOGGER = logging.getLogger(__name__) - - -class PasteNodesToDocutils(SphinxTransform): - """Use the builder context to transform a CellOutputNode into Sphinx nodes.""" - - default_priority = 3 # must be applied before CellOutputsToNodes - - def apply(self): - glue_domain = NbGlueDomain.from_env(self.app.env) # type: NbGlueDomain - for paste_node in self.document.traverse(PasteNode): - - if paste_node.key not in glue_domain: - SPHINX_LOGGER.warning( - ( - f"Couldn't find key `{paste_node.key}` " - "in keys defined across all pages." - ), - location=(paste_node.source, paste_node.line), - ) - continue - - # Grab the output for this key - output = glue_domain.get(paste_node.key) - - out_node = paste_node.create_node( - output=output, document=self.document, env=self.app.env - ) - if out_node is None: - SPHINX_LOGGER.warning( - ( - "Couldn't find compatible output format for key " - f"`{paste_node.key}`" - ), - location=(paste_node.source, paste_node.line), - ) - else: - paste_node.replace_self(out_node) diff --git a/myst_nb/nb_glue/utils.py b/myst_nb/nb_glue/utils.py deleted file mode 100644 index e6ddd83e..00000000 --- a/myst_nb/nb_glue/utils.py +++ /dev/null @@ -1,126 +0,0 @@ -import json -from pathlib import Path - -import nbformat as nbf - -from myst_nb.nb_glue import GLUE_PREFIX - - -def read_glue_cache(path): - """Read a glue cache generated by a Sphinx build. - - Parameters - ---------- - path : str - Path to a doctree directory, or directly to a glue cache .json file. - - Returns - ------- - data : dictionary - A dictionary containing the JSON data in your glue cache. - """ - path = Path(path) - if path.is_dir(): - # Assume our folder is doctrees and append the glue data name to it. - path = path.joinpath("glue_cache.json") - if not path.exists(): - raise FileNotFoundError(f"A glue cache was not found at: {path}") - - data = json.load(path.open(encoding="utf8")) - return data - - -def find_glued_key(path_ntbk, key): - """Find an output mimebundle in a notebook based on a key. - - Parameters - ---------- - path_ntbk : path - The path to a Jupyter Notebook that has variables "glued" in it. - key : string - The unique string to use as a look-up in `path_ntbk`. - - Returns - ------- - mimebundle - The output mimebundle associated with the given key. - """ - # Read in the notebook - if isinstance(path_ntbk, Path): - path_ntbk = str(path_ntbk) - ntbk = nbf.read(path_ntbk, nbf.NO_CONVERT) - outputs = [] - for cell in ntbk.cells: - if cell.cell_type != "code": - continue - - # If we have outputs, look for scrapbook metadata and reference the key - for output in cell["outputs"]: - meta = output.get("metadata", {}) - if "scrapbook" in meta: - this_key = meta["scrapbook"]["name"].replace(GLUE_PREFIX, "") - if key == this_key: - bundle = output["data"] - bundle = {this_key: val for key, val in bundle.items()} - outputs.append(bundle) - if len(outputs) == 0: - raise KeyError(f"Did not find key {key} in notebook {path_ntbk}") - if len(outputs) > 1: - raise KeyError( - f"Multiple variables found for key: {key}. Returning first value." - ) - return outputs[0] - - -def find_all_keys(ntbk, existing_keys=None, path=None, logger=None, strip_stored=True): - """Find all `glue` keys in a notebook and return a dictionary with key: outputs. - - :param existing_keys: a map of key to docname - :param strip_stored: if the content of a mimetype is already stored on disc - (referenced in output.metadata.filenames) then replace it by None - """ - if isinstance(ntbk, (str, Path)): - ntbk = nbf.read(str(ntbk), nbf.NO_CONVERT) - - if existing_keys is None: - existing_keys = {} - new_keys = {} - - for i, cell in enumerate(ntbk.cells): - if cell.cell_type != "code": - continue - - for output in cell["outputs"]: - meta = output.get("metadata", {}) - if "scrapbook" in meta: - this_key = meta["scrapbook"]["name"] - if this_key in existing_keys: - msg = ( - f"Skipping glue key `{this_key}`, in cell {i}, " - f"that already exists in: '{existing_keys[this_key]}'" - ) - if logger is None: - print(msg) - else: - logger.warning(msg, location=(path, None)) - continue - if this_key in new_keys: - msg = ( - f"Glue key `{this_key}`, in cell {i}, overwrites one " - "previously defined in the notebook." - ) - if logger is None: - print(msg) - else: - logger.warning(msg, location=(path, None)) - - if strip_stored: - output = output.copy() - filenames = output["metadata"].get("filenames", {}) - output["data"] = { - k: None if k.replace(GLUE_PREFIX, "") in filenames else v - for k, v in output.get("data", {}).items() - } - - new_keys[this_key] = output - return new_keys diff --git a/myst_nb/preprocess.py b/myst_nb/preprocess.py new file mode 100644 index 00000000..d9f3323d --- /dev/null +++ b/myst_nb/preprocess.py @@ -0,0 +1,90 @@ +"""notebook "pre-processing" (after execution, but before parsing)""" +from logging import Logger +import re +from typing import Any, Dict, List + +from nbformat import NotebookNode + +from myst_nb.nb_glue import extract_glue_data + + +def preprocess_notebook( + notebook: NotebookNode, logger: Logger, get_cell_render_config +) -> Dict[str, Any]: + """Modify notebook and resources in-place.""" + # TODO parsing get_cell_render_config is a stop-gap here + # TODO make this pluggable + # (similar to nbconvert preprocessors, but parse config, source map and logger) + + resources: Dict[str, Any] = {} + + # create source map + source_map = notebook.metadata.get("source_map", None) + # use 1-based indexing rather than 0, or pseudo base of the cell index + source_map = [ + (source_map[i] if source_map else ((i + 1) * 10000)) + 1 + for i, _ in enumerate(notebook.cells) + ] + + # coalesce_streams + for index, cell in enumerate(notebook.cells): + if cell.cell_type == "code": + if get_cell_render_config(index, "merge_streams"): + cell["outputs"] = coalesce_streams(cell.get("outputs", [])) + + # extract all scrapbook (aka glue) outputs from notebook + extract_glue_data(notebook, resources, source_map, logger) + + return resources + + +_RGX_CARRIAGERETURN = re.compile(r".*\r(?=[^\n])") +_RGX_BACKSPACE = re.compile(r"[^\n]\b") + + +def coalesce_streams(outputs: List[NotebookNode]) -> List[NotebookNode]: + """Merge all stream outputs with shared names into single streams. + + This ensure deterministic outputs. + + Adapted from: + https://github.com/computationalmodelling/nbval/blob/master/nbval/plugin.py. + """ + if not outputs: + return [] + + new_outputs = [] + streams = {} + for output in outputs: + if output["output_type"] == "stream": + if output["name"] in streams: + streams[output["name"]]["text"] += output["text"] + else: + new_outputs.append(output) + streams[output["name"]] = output + else: + new_outputs.append(output) + + # process \r and \b characters + for output in streams.values(): + old = output["text"] + while len(output["text"]) < len(old): + old = output["text"] + # Cancel out anything-but-newline followed by backspace + output["text"] = _RGX_BACKSPACE.sub("", output["text"]) + # Replace all carriage returns not followed by newline + output["text"] = _RGX_CARRIAGERETURN.sub("", output["text"]) + + # We also want to ensure stdout and stderr are always in the same consecutive order, + # because they are asynchronous, so order isn't guaranteed. + for i, output in enumerate(new_outputs): + if output["output_type"] == "stream" and output["name"] == "stderr": + if ( + len(new_outputs) >= i + 2 + and new_outputs[i + 1]["output_type"] == "stream" + and new_outputs[i + 1]["name"] == "stdout" + ): + stdout = new_outputs.pop(i + 1) + new_outputs.insert(i, stdout) + + return new_outputs diff --git a/myst_nb/render.py b/myst_nb/render.py index 6c9ecb86..a24f5014 100644 --- a/myst_nb/render.py +++ b/myst_nb/render.py @@ -26,11 +26,7 @@ WIDGET_STATE_MIMETYPE = "application/vnd.jupyter.widget-state+json" WIDGET_VIEW_MIMETYPE = "application/vnd.jupyter.widget-view+json" RENDER_ENTRY_GROUP = "myst_nb.renderers" - -# useful regexes _ANSI_RE = re.compile("\x1b\\[(.*?)([@-~])") -_RGX_CARRIAGERETURN = re.compile(r".*\r(?=[^\n])") -_RGX_BACKSPACE = re.compile(r"[^\n]\b") class NbElementRenderer: @@ -53,7 +49,11 @@ def renderer(self) -> "DocutilsNbRenderer": @property def logger(self) -> logging.Logger: - """The logger for this renderer.""" + """The logger for this renderer. + + In extension to a standard logger, + this logger also for `line` and `subtype` kwargs to the `log` methods. + """ # TODO the only problem with logging here, is that we cannot generate # nodes.system_message to append to the document. return self._logger @@ -63,6 +63,14 @@ def source(self): """The source of the notebook.""" return self.renderer.document["source"] + def get_cell_metadata(self, cell_index: int) -> NotebookNode: + # TODO handle key/index error + return self.renderer.config["notebook"]["cells"][cell_index]["metadata"] + + def get_resources(self) -> Dict[str, Any]: + """Get the resources from the notebook preprocessing.""" + return self.renderer.config["nb_resources"] + def write_file( self, path: List[str], content: bytes, overwrite=False, exists_ok=False ) -> str: @@ -75,9 +83,11 @@ def write_file( :returns: URI to use for referencing the file """ - output_folder = Path(self.renderer.get_nb_config("output_folder")) - filepath = output_folder.joinpath(*path) - if filepath.exists(): + output_folder = self.renderer.get_nb_config("output_folder") + filepath = Path(output_folder).joinpath(*path) + if not output_folder: + pass # do not output anything if output_folder is not set (docutils only) + elif filepath.exists(): if overwrite: filepath.write_bytes(content) elif not exists_ok: @@ -99,9 +109,23 @@ def write_file( else: return str(filepath) - def get_cell_metadata(self, cell_index: int) -> NotebookNode: - # TODO handle key/index error - return self.renderer.config["notebook"]["cells"][cell_index]["metadata"] + def render_raw_cell( + self, content: str, metadata: dict, cell_index: int, source_line: int + ) -> List[nodes.Element]: + """Render a raw cell. + + https://nbformat.readthedocs.io/en/5.1.3/format_description.html#raw-nbconvert-cells + + :param content: the raw cell content + :param metadata: the cell metadata + :param cell_index: the index of the cell + :param source_line: the line number of the cell in the source document + """ + mime_type = metadata.get("format") + if not mime_type: + # skip without warning, since e.g. jupytext saves raw cells with no format + return [] + return self.render_mime_type(mime_type, content, cell_index, source_line) def render_stdout( self, output: NotebookNode, cell_index: int, source_line: int @@ -187,21 +211,6 @@ def render_error( node["classes"] += ["output", "traceback"] return [node] - def render_raw_cell( - self, content: str, metadata: dict, cell_index: int, source_line: int - ) -> List[nodes.Element]: - """Render a raw cell. - - https://nbformat.readthedocs.io/en/5.1.3/format_description.html#raw-nbconvert-cells - - :param content: the raw cell content - :param metadata: the cell metadata - :param cell_index: the index of the cell - :param source_line: the line number of the cell in the source document - """ - mime_type = metadata.get("format", "text/plain") - return self.render_mime_type(mime_type, content, cell_index, source_line) - def render_mime_type( self, mime_type: str, data: Union[str, bytes], cell_index: int, source_line: int ) -> List[nodes.Element]: @@ -492,54 +501,6 @@ def strip_latex_delimiters(source): return source -def coalesce_streams(outputs: List[NotebookNode]) -> List[NotebookNode]: - """Merge all stream outputs with shared names into single streams. - - This ensure deterministic outputs. - - Adapted from: - https://github.com/computationalmodelling/nbval/blob/master/nbval/plugin.py. - """ - if not outputs: - return [] - - new_outputs = [] - streams = {} - for output in outputs: - if output["output_type"] == "stream": - if output["name"] in streams: - streams[output["name"]]["text"] += output["text"] - else: - new_outputs.append(output) - streams[output["name"]] = output - else: - new_outputs.append(output) - - # process \r and \b characters - for output in streams.values(): - old = output["text"] - while len(output["text"]) < len(old): - old = output["text"] - # Cancel out anything-but-newline followed by backspace - output["text"] = _RGX_BACKSPACE.sub("", output["text"]) - # Replace all carriage returns not followed by newline - output["text"] = _RGX_CARRIAGERETURN.sub("", output["text"]) - - # We also want to ensure stdout and stderr are always in the same consecutive order, - # because they are asynchronous, so order isn't guaranteed. - for i, output in enumerate(new_outputs): - if output["output_type"] == "stream" and output["name"] == "stderr": - if ( - len(new_outputs) >= i + 2 - and new_outputs[i + 1]["output_type"] == "stream" - and new_outputs[i + 1]["name"] == "stdout" - ): - stdout = new_outputs.pop(i + 1) - new_outputs.insert(i, stdout) - - return new_outputs - - @contextmanager def create_figure_context( self: "DocutilsNbRenderer", figure_options: Optional[Dict[str, Any]], line: int @@ -552,6 +513,8 @@ def create_figure_context( yield return + # note: most of this is copied directly from sphinx.Figure + # create figure node figure_node = nodes.figure() figure_node.line = line @@ -572,16 +535,18 @@ def create_figure_context( # create caption node caption = None if figure_options.get("caption", ""): - caption = nodes.caption(str(figure_options["caption"])) - caption.line = line - caption.source = self.document["source"] - with self.current_node_context(caption): + node = nodes.Element() # anonymous container for parsing + with self.current_node_context(node): self.nested_render_text(str(figure_options["caption"]), line) - if caption.children and isinstance(caption.children[0], nodes.paragraph): - caption.children = caption.children[0].children - else: + first_node = node.children[0] + legend_nodes = node.children[1:] + if isinstance(first_node, nodes.paragraph): + caption = nodes.caption(first_node.rawsource, "", *first_node.children) + caption.source = self.document["source"] + caption.line = line + elif not (isinstance(first_node, nodes.comment) and len(first_node) == 0): self.create_warning( - "Figure caption is not a single paragraph", + "Figure caption must be a paragraph or empty comment.", line=line, wtype=DEFAULT_LOG_TYPE, subtype="fig_caption", @@ -593,10 +558,14 @@ def create_figure_context( if caption and figure_options.get("caption_before", False): figure_node.append(caption) + if legend_nodes: + figure_node += nodes.legend("", *legend_nodes) yield if caption and not figure_options.get("caption_before", False): figure_node.append(caption) + if legend_nodes: + figure_node += nodes.legend("", *legend_nodes) self.current_node = old_current_node diff --git a/myst_nb/sphinx_.py b/myst_nb/sphinx_.py index 3a89c77e..eb8fc795 100644 --- a/myst_nb/sphinx_.py +++ b/myst_nb/sphinx_.py @@ -26,15 +26,15 @@ from myst_nb import __version__ from myst_nb.configuration import NbParserConfig -from myst_nb.execute import ExecutionResult, update_notebook +from myst_nb.execute import ExecutionResult, execute_notebook from myst_nb.loggers import DEFAULT_LOG_TYPE, SphinxDocLogger from myst_nb.nb_glue.domain import NbGlueDomain from myst_nb.parse import nb_node_to_dict, notebook_to_tokens +from myst_nb.preprocess import preprocess_notebook from myst_nb.read import UnexpectedCellDirective, create_nb_reader from myst_nb.render import ( WIDGET_STATE_MIMETYPE, NbElementRenderer, - coalesce_streams, create_figure_context, load_renderer, sanitize_script_content, @@ -168,6 +168,7 @@ def create_mystnb_config(app): app.env.mystnb_config = app.env.mystnb_config.copy( output_folder=str(output_folder), execution_cache_path=str(exec_cache_path) ) + SPHINX_LOGGER.info(f"Using jupyter-cache at: {exec_cache_path}") def add_exclude_patterns(app: Sphinx, config): @@ -268,7 +269,7 @@ def parse(self, inputstring: str, document: nodes.document) -> None: ) # potentially execute notebook and/or populate outputs from cache - notebook, exec_data = update_notebook( + notebook, exec_data = execute_notebook( notebook, document_path, nb_config, logger ) if exec_data: @@ -289,7 +290,7 @@ def parse(self, inputstring: str, document: nodes.document) -> None: mdit_parser = create_md_parser(nb_reader.md_config, SphinxNbRenderer) mdit_parser.options["document"] = document mdit_parser.options["notebook"] = notebook - mdit_parser.options["nb_config"] = nb_config.as_dict() + mdit_parser.options["nb_config"] = nb_config mdit_env: Dict[str, Any] = {} # load notebook element renderer class from entry-point name @@ -299,20 +300,31 @@ def parse(self, inputstring: str, document: nodes.document) -> None: mdit_parser.renderer, logger ) mdit_parser.options["nb_renderer"] = nb_renderer + # we currently do this early, so that the nb_renderer has access to things + mdit_parser.renderer.setup_render(mdit_parser.options, mdit_env) + + # pre-process notebook and store resources for render + resources = preprocess_notebook( + notebook, logger, mdit_parser.renderer.get_cell_render_config + ) + mdit_parser.renderer.config["nb_resources"] = resources + # we temporarily store nb_renderer on the document, + # so that roles/directives can access it + document.attributes["nb_renderer"] = nb_renderer # parse to tokens mdit_tokens = notebook_to_tokens(notebook, mdit_parser, mdit_env, logger) # convert to docutils AST, which is added to the document mdit_parser.renderer.render(mdit_tokens, mdit_parser.options, mdit_env) - # write final (updated) notebook to output folder - # TODO currently this has to be done after the render has been called/setup - # utf-8 is the de-facto standard encoding for notebooks. + # write final (updated) notebook to output folder (utf8 is standard encoding) content = nbformat.writes(notebook).encode("utf-8") path = self.env.docname.split("/") path[-1] += ".ipynb" nb_renderer.write_file(path, content, overwrite=True) + document.attributes.pop("nb_renderer") + class SphinxNbRenderer(SphinxRenderer): """A sphinx renderer for Jupyter Notebooks.""" @@ -369,6 +381,7 @@ def render_nb_metadata(self, token: SyntaxTreeNode) -> None: ) # TODO should we provide hook for NbElementRenderer? + # Also add method to NbElementRenderer, to store scripts to load # store ipywidgets state in metadata, # which will be later added to HTML page context @@ -488,9 +501,6 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: outputs: List[NotebookNode] = self.config["notebook"]["cells"][cell_index].get( "outputs", [] ) - if self.get_cell_render_config(cell_index, "merge_streams"): - outputs = coalesce_streams(outputs) - # render the outputs for output in outputs: if output.output_type == "stream": @@ -532,9 +542,6 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: mime_bundle = nodes.container(nb_element="mime_bundle") with self.current_node_context(mime_bundle): for mime_type, data in output["data"].items(): - if mime_type.startswith("application/papermill.record/"): - # TODO this is the glue prefix, just ignore this for now - continue mime_container = nodes.container(mime_type=mime_type) with self.current_node_context(mime_container): _nodes = self.nb_renderer.render_mime_type( diff --git a/tests/nb_fixtures/basic.txt b/tests/nb_fixtures/basic.txt index 17d06aa4..2793a434 100644 --- a/tests/nb_fixtures/basic.txt +++ b/tests/nb_fixtures/basic.txt @@ -6,7 +6,7 @@ cells: source: | # A Title . -<document ids="a-title" names="a\ title" nb_kernelspec="True" nb_language_info="True" source="<string>" title="A Title"> +<document ids="a-title" names="a\ title" source="<string>" title="A Title"> <title> A Title . @@ -22,7 +22,7 @@ cells: print(a) outputs: [] . -<document nb_kernelspec="True" nb_language_info="True" source="<string>"> +<document source="<string>"> <container cell_index="0" cell_metadata="{}" classes="cell" exec_count="True" nb_element="cell_code"> <container classes="cell_input" nb_element="cell_code_source"> <literal_block classes="code" xml:space="preserve"> @@ -43,7 +43,7 @@ cells: source: a=1 outputs: [] . -<document nb_kernelspec="True" nb_language_info="{'name': 'python', 'pygments_lexer': 'ipython3'}" source="<string>"> +<document nb_language_info="{'name': 'python', 'pygments_lexer': 'ipython3'}" source="<string>"> <container cell_index="0" cell_metadata="{}" classes="cell" exec_count="True" nb_element="cell_code"> <container classes="cell_input" nb_element="cell_code_source"> <literal_block classes="code ipython3" xml:space="preserve"> @@ -69,7 +69,7 @@ cells: output_type: stream text: "1" . -<document nb_kernelspec="True" nb_language_info="True" source="<string>"> +<document source="<string>"> <container cell_index="0" cell_metadata="{}" classes="cell" exec_count="1" nb_element="cell_code"> <container classes="cell_input" nb_element="cell_code_source"> <literal_block classes="code" xml:space="preserve"> @@ -90,7 +90,7 @@ cells: <h1>A Title</h1> </div> . -<document nb_kernelspec="True" nb_language_info="True" source="<string>"> +<document source="<string>"> <raw classes="output text_html" format="html" xml:space="preserve"> <div> <h1>A Title</h1> @@ -116,7 +116,7 @@ cells: source: | b . -<document ids="a-title" names="a\ title" nb_kernelspec="True" nb_language_info="True" source="<string>" title="A Title"> +<document ids="a-title" names="a\ title" source="<string>" title="A Title"> <title> A Title <container cell_index="1" cell_metadata="{}" classes="cell" exec_count="True" nb_element="cell_code"> @@ -144,7 +144,7 @@ cells: source: | [b]: after . -<document nb_kernelspec="True" nb_language_info="True" source="<string>"> +<document source="<string>"> <paragraph> <reference refuri="before"> a @@ -169,7 +169,7 @@ cells: source: | [^b]: after . -<document nb_kernelspec="True" nb_language_info="True" source="<string>"> +<document source="<string>"> <paragraph> <footnote_reference auto="1" ids="id1" refid="a"> 1 diff --git a/tests/test_docutils.py b/tests/test_docutils.py index ffd73e48..9db9d69c 100644 --- a/tests/test_docutils.py +++ b/tests/test_docutils.py @@ -29,6 +29,7 @@ def test_basic(line, title, input, expected): parser=Parser(), settings_overrides={ "nb_execution_mode": "off", + "nb_output_folder": "", "myst_all_links_external": True, "warning_stream": report_stream, }, @@ -60,6 +61,7 @@ def test_reporting(line, title, input, expected): parser=Parser(), settings_overrides={ "nb_execution_mode": "off", + "nb_output_folder": "", "warning_stream": report_stream, }, ) diff --git a/tests/test_glue.py b/tests/test_glue.py index f8367f2a..7b434ffa 100644 --- a/tests/test_glue.py +++ b/tests/test_glue.py @@ -1,10 +1,9 @@ from IPython.core.displaypub import DisplayPublisher from IPython.core.interactiveshell import InteractiveShell +import nbformat import pytest -from myst_nb.nb_glue import glue, utils -from myst_nb.nb_glue.domain import NbGlueDomain -from myst_nb.nb_glue.transform import PasteNodesToDocutils +from myst_nb.nb_glue import extract_glue_data, glue class MockDisplayPublisher(DisplayPublisher): @@ -19,19 +18,13 @@ def publish(self, data, **kwargs): @pytest.fixture() def mock_ipython(): + """A mock IPython shell for testing notebook cell executions.""" shell = InteractiveShell.instance() # type: InteractiveShell shell.display_pub = MockDisplayPublisher() yield shell.display_pub InteractiveShell.clear_instance() -def test_check_priority(): - """Assert that the default transform priority is less than CellOutputsToNodes""" - from myst_nb.render_outputs import CellOutputsToNodes - - assert PasteNodesToDocutils.default_priority < CellOutputsToNodes.default_priority - - def test_glue_func_text(mock_ipython): glue("a", "b") assert mock_ipython.publish_calls == [ @@ -84,18 +77,13 @@ def _repr_html_(self): ] -def test_find_glued_key(get_test_path): - - bundle = utils.find_glued_key(get_test_path("with_glue.ipynb"), "key_text1") - assert bundle == {"key_text1": "'text1'"} - - with pytest.raises(KeyError): - utils.find_glued_key(get_test_path("with_glue.ipynb"), "unknown") - - -def test_find_all_keys(get_test_path): - keys = utils.find_all_keys(get_test_path("with_glue.ipynb")) - assert set(keys) == { +def test_extract_glue_data(get_test_path): + path = get_test_path("with_glue.ipynb") + with open(path, "r") as handle: + notebook = nbformat.read(handle, as_version=4) + resources = {} + extract_glue_data(notebook, resources, [], None) + assert set(resources["glue"]) == { "key_text1", "key_float", "key_undisplayed", @@ -107,8 +95,11 @@ def test_find_all_keys(get_test_path): @pytest.mark.sphinx_params("with_glue.ipynb", conf={"nb_execution_mode": "off"}) def test_parser(sphinx_run, clean_doctree, file_regression): + """Test a sphinx build.""" + # TODO test duplicate warning in docutils sphinx_run.build() # print(sphinx_run.status()) + # print(sphinx_run.warnings()) assert sphinx_run.warnings() == "" doctree = clean_doctree(sphinx_run.get_resolved_doctree("with_glue")) file_regression.check( @@ -116,15 +107,16 @@ def test_parser(sphinx_run, clean_doctree, file_regression): extension=f"{sphinx_run.software_versions}.xml", encoding="utf8", ) - glue_domain = NbGlueDomain.from_env(sphinx_run.app.env) - assert set(glue_domain.cache) == { - "key_text1", - "key_float", - "key_undisplayed", - "key_df", - "key_plt", - "sym_eq", - } - glue_domain.clear_doc("with_glue") - assert glue_domain.cache == {} - assert glue_domain.docmap == {} + # from myst_nb.nb_glue.domain import NbGlueDomain + # glue_domain = NbGlueDomain.from_env(sphinx_run.app.env) + # assert set(glue_domain.cache) == { + # "key_text1", + # "key_float", + # "key_undisplayed", + # "key_df", + # "key_plt", + # "sym_eq", + # } + # glue_domain.clear_doc("with_glue") + # assert glue_domain.cache == {} + # assert glue_domain.docmap == {} From baed2c69bc7620d87ebc2f8f5caf79339249101a Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Fri, 7 Jan 2022 11:33:06 +0100 Subject: [PATCH 40/75] Add some docstrings --- myst_nb/nb_glue/elements.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/myst_nb/nb_glue/elements.py b/myst_nb/nb_glue/elements.py index 56ef3f1c..360d0b17 100644 --- a/myst_nb/nb_glue/elements.py +++ b/myst_nb/nb_glue/elements.py @@ -21,6 +21,7 @@ def is_sphinx(self) -> bool: return hasattr(self.state.document.settings, "env") def warning(self, message: str) -> nodes.system_message: + """Create a warning.""" if self.is_sphinx: logger = SphinxDocLogger(self.state.document) else: @@ -35,7 +36,7 @@ def warning(self, message: str) -> nodes.system_message: ) def set_source_info(self, node: nodes.Node) -> None: - """Set source and line number to the node.""" + """Set source and line number for the node.""" node.source, node.line = self.state_machine.get_source_and_line(self.lineno) def run(self) -> List[nodes.Node]: @@ -59,6 +60,7 @@ def run(self) -> List[nodes.Node]: def render_output_docutils( self, nb_renderer: NbElementRenderer, output: Dict[str, Any] ) -> List[nodes.Node]: + """Render the output in docutils (select mime priority directly).""" mime_priority = nb_renderer.renderer.get_nb_config("mime_priority") try: mime_type = next(x for x in mime_priority if x in output["data"]) @@ -73,6 +75,7 @@ def render_output_docutils( def render_output_sphinx( self, nb_renderer: NbElementRenderer, output: Dict[str, Any] ) -> List[nodes.Node]: + """Render the output in sphinx (defer mime priority selection).""" mime_bundle = nodes.container(nb_element="mime_bundle") self.set_source_info(mime_bundle) for mime_type, data in output["data"].items(): @@ -87,6 +90,8 @@ def render_output_sphinx( class PasteFigureDirective(PasteDirective): + """A directive for pasting code outputs from notebooks, wrapped in a figure.""" + def align(argument): return directives.choice(argument, ("left", "center", "right")) From 91232e86fab65b2e9fc497bf92a7693cba43379b Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Fri, 7 Jan 2022 13:12:05 +0100 Subject: [PATCH 41/75] add MimeData data class --- myst_nb/docutils_.py | 55 ++++++--- myst_nb/loggers.py | 6 +- myst_nb/nb_glue/elements.py | 20 +++- myst_nb/preprocess.py | 4 +- myst_nb/render.py | 223 ++++++++++++++++++++---------------- myst_nb/sphinx_.py | 50 +++++--- 6 files changed, 215 insertions(+), 143 deletions(-) diff --git a/myst_nb/docutils_.py b/myst_nb/docutils_.py index c723ef71..b7d7df6c 100644 --- a/myst_nb/docutils_.py +++ b/myst_nb/docutils_.py @@ -28,7 +28,12 @@ read_myst_markdown_notebook, standard_nb_read, ) -from myst_nb.render import NbElementRenderer, create_figure_context, load_renderer +from myst_nb.render import ( + MimeData, + NbElementRenderer, + create_figure_context, + load_renderer, +) DOCUTILS_EXCLUDED_ARGS = { f.name for f in NbParserConfig.get_fields() if f.metadata.get("docutils_exclude") @@ -192,7 +197,7 @@ def get_nb_config(self, key: str) -> Any: def get_cell_render_config( self, - cell_index: int, + cell_metadata: Dict[str, Any], key: str, nb_key: Optional[str] = None, has_nb_key: bool = True, @@ -206,17 +211,17 @@ def get_cell_render_config( :raises: IndexError if the cell index is out of range :raises: KeyError if the key is not found """ - cell = self.config["notebook"].cells[cell_index] + # TODO allow output level configuration? cell_metadata_key = self.get_nb_config("cell_render_key") if ( - cell_metadata_key not in cell.metadata - or key not in cell.metadata[cell_metadata_key] + cell_metadata_key not in cell_metadata + or key not in cell_metadata[cell_metadata_key] ): if not has_nb_key: raise KeyError(key) return self.get_nb_config(nb_key if nb_key is not None else key) # TODO validate? - return cell.metadata[cell_metadata_key][key] + return cell_metadata[cell_metadata_key][key] def render_nb_metadata(self, token: SyntaxTreeNode) -> None: """Render the notebook metadata.""" @@ -280,12 +285,12 @@ def render_nb_cell_code(self, token: SyntaxTreeNode) -> None: # TODO do we need this -/_ duplication of tag names, or can we deprecate one? remove_input = ( - self.get_cell_render_config(cell_index, "remove_code_source") + self.get_cell_render_config(token.meta["metadata"], "remove_code_source") or ("remove_input" in tags) or ("remove-input" in tags) ) remove_output = ( - self.get_cell_render_config(cell_index, "remove_code_outputs") + self.get_cell_render_config(token.meta["metadata"], "remove_code_outputs") or ("remove_output" in tags) or ("remove-output" in tags) ) @@ -332,12 +337,13 @@ def render_nb_cell_code(self, token: SyntaxTreeNode) -> None: def render_nb_cell_code_source(self, token: SyntaxTreeNode) -> None: """Render a notebook code cell's source.""" - cell_index = token.meta["index"] lexer = token.meta.get("lexer", None) node = self.create_highlighted_code_block( token.content, lexer, - number_lines=self.get_cell_render_config(cell_index, "number_source_lines"), + number_lines=self.get_cell_render_config( + token.meta["metadata"], "number_source_lines" + ), source=self.document["source"], line=token_line(token), ) @@ -347,26 +353,33 @@ def render_nb_cell_code_source(self, token: SyntaxTreeNode) -> None: def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: """Render a notebook code cell's outputs.""" cell_index = token.meta["index"] + metadata = token.meta["metadata"] line = token_line(token) outputs: List[NotebookNode] = self.config["notebook"]["cells"][cell_index].get( "outputs", [] ) # render the outputs - mime_priority = self.get_cell_render_config(cell_index, "mime_priority") - for output in outputs: + mime_priority = self.get_cell_render_config(metadata, "mime_priority") + for output_index, output in enumerate(outputs): if output.output_type == "stream": if output.name == "stdout": - _nodes = self.nb_renderer.render_stdout(output, cell_index, line) + _nodes = self.nb_renderer.render_stdout( + output, metadata, cell_index, line + ) self.add_line_and_source_path_r(_nodes, token) self.current_node.extend(_nodes) elif output.name == "stderr": - _nodes = self.nb_renderer.render_stderr(output, cell_index, line) + _nodes = self.nb_renderer.render_stderr( + output, metadata, cell_index, line + ) self.add_line_and_source_path_r(_nodes, token) self.current_node.extend(_nodes) else: pass # TODO warning elif output.output_type == "error": - _nodes = self.nb_renderer.render_error(output, cell_index, line) + _nodes = self.nb_renderer.render_error( + output, metadata, cell_index, line + ) self.add_line_and_source_path_r(_nodes, token) self.current_node.extend(_nodes) elif output.output_type in ("display_data", "execute_result"): @@ -394,14 +407,22 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: figure_options = None with suppress(KeyError): figure_options = self.get_cell_render_config( - cell_index, "figure", has_nb_key=False + metadata, "figure", has_nb_key=False ) with create_figure_context(self, figure_options, line): container = nodes.container(mime_type=mime_type) with self.current_node_context(container, append=True): _nodes = self.nb_renderer.render_mime_type( - mime_type, output["data"][mime_type], cell_index, line + MimeData( + mime_type, + output["data"][mime_type], + cell_metadata=metadata, + output_metadata=output.get("metadata", {}), + cell_index=cell_index, + output_index=output_index, + line=line, + ), ) self.current_node.extend(_nodes) self.add_line_and_source_path_r([container], token) diff --git a/myst_nb/loggers.py b/myst_nb/loggers.py index 0204987a..648c56de 100644 --- a/myst_nb/loggers.py +++ b/myst_nb/loggers.py @@ -43,7 +43,7 @@ def process(self, msg, kwargs): if "type" in kwargs: # override type self.extra["type"] = kwargs.pop("type") subtype = ("." + kwargs["subtype"]) if "subtype" in kwargs else "" - if "line" in kwargs: # add line to location + if kwargs.get("line", None) is not None: # add line to location # note this will be overridden by the location keyword self.extra["location"] = (self.extra["docname"], kwargs.pop("line")) else: @@ -120,7 +120,9 @@ def emit(self, record: logging.LogRecord) -> None: levelname = record.levelname.upper() level = self._name_to_level.get(levelname, self._document.reporter.DEBUG_LEVEL) node = self._document.reporter.system_message( - level, record.msg, **({"line": record.line} if record.line else {}) + level, + record.msg, + **({"line": record.line} if record.line is not None else {}), ) if record.parent is not None: record.parent.append(node) diff --git a/myst_nb/nb_glue/elements.py b/myst_nb/nb_glue/elements.py index 360d0b17..0b45b162 100644 --- a/myst_nb/nb_glue/elements.py +++ b/myst_nb/nb_glue/elements.py @@ -5,7 +5,7 @@ from docutils.parsers.rst import Directive, directives from myst_nb.loggers import DocutilsDocLogger, SphinxDocLogger -from myst_nb.render import NbElementRenderer +from myst_nb.render import MimeData, NbElementRenderer class PasteDirective(Directive): @@ -67,9 +67,13 @@ def render_output_docutils( except StopIteration: return self.warning("No output mime type found from render_priority") else: - cell_index = 0 # TODO make this optional, and actually just pass metadata? return nb_renderer.render_mime_type( - mime_type, output["data"][mime_type], cell_index, self.lineno + MimeData( + mime_type, + output["data"][mime_type], + output_metadata=output.get("metadata", {}), + line=self.lineno, + ) ) def render_output_sphinx( @@ -81,8 +85,14 @@ def render_output_sphinx( for mime_type, data in output["data"].items(): mime_container = nodes.container(mime_type=mime_type) self.set_source_info(mime_container) - cell_index = 0 # TODO make this optional, and actually just pass metadata? - nds = nb_renderer.render_mime_type(mime_type, data, cell_index, self.lineno) + nds = nb_renderer.render_mime_type( + MimeData( + mime_type, + data, + output_metadata=output.get("metadata", {}), + line=self.lineno, + ) + ) if nds: mime_container.extend(nds) mime_bundle.append(mime_container) diff --git a/myst_nb/preprocess.py b/myst_nb/preprocess.py index d9f3323d..25af2b3f 100644 --- a/myst_nb/preprocess.py +++ b/myst_nb/preprocess.py @@ -27,9 +27,9 @@ def preprocess_notebook( ] # coalesce_streams - for index, cell in enumerate(notebook.cells): + for _, cell in enumerate(notebook.cells): if cell.cell_type == "code": - if get_cell_render_config(index, "merge_streams"): + if get_cell_render_config(cell.metadata, "merge_streams"): cell["outputs"] = coalesce_streams(cell.get("outputs", [])) # extract all scrapbook (aka glue) outputs from notebook diff --git a/myst_nb/render.py b/myst_nb/render.py index a24f5014..a58384f5 100644 --- a/myst_nb/render.py +++ b/myst_nb/render.py @@ -11,6 +11,7 @@ import re from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Union +import attr from docutils import nodes from docutils.parsers.rst import directives as options_spec from importlib_metadata import entry_points @@ -29,6 +30,39 @@ _ANSI_RE = re.compile("\x1b\\[(.*?)([@-~])") +@attr.s() +class MimeData: + """Mime data from an execution output (display_data / execute_result) + + e.g. notebook.cells[0].outputs[0].data['text/plain'] = "Hello, world!" + + see: https://nbformat.readthedocs.io/en/5.1.3/format_description.html#display-data + """ + + mime_type: str = attr.ib() + """Mime type key of the output.data""" + content: Union[str, bytes] = attr.ib() + """Data value of the output.data""" + cell_metadata: Dict[str, Any] = attr.ib(factory=dict) + """Cell level metadata of the output""" + output_metadata: Dict[str, Any] = attr.ib(factory=dict) + """Output level metadata of the output""" + cell_index: Optional[int] = attr.ib(default=None) + """Index of the cell in the notebook""" + output_index: Optional[int] = attr.ib(default=None) + """Index of the output in the cell""" + line: Optional[int] = attr.ib(default=None) + """Source line of the cell""" + + @property + def string(self) -> str: + """Get the content as a string.""" + try: + return self.content.decode("utf-8") + except AttributeError: + return self.content + + class NbElementRenderer: """A class for rendering notebook elements.""" @@ -63,12 +97,8 @@ def source(self): """The source of the notebook.""" return self.renderer.document["source"] - def get_cell_metadata(self, cell_index: int) -> NotebookNode: - # TODO handle key/index error - return self.renderer.config["notebook"]["cells"][cell_index]["metadata"] - def get_resources(self) -> Dict[str, Any]: - """Get the resources from the notebook preprocessing.""" + """Get the resources from the notebook pre-processing.""" return self.renderer.config["nb_resources"] def write_file( @@ -117,7 +147,7 @@ def render_raw_cell( https://nbformat.readthedocs.io/en/5.1.3/format_description.html#raw-nbconvert-cells :param content: the raw cell content - :param metadata: the cell metadata + :param metadata: the cell level metadata :param cell_index: the index of the cell :param source_line: the line number of the cell in the source document """ @@ -125,24 +155,32 @@ def render_raw_cell( if not mime_type: # skip without warning, since e.g. jupytext saves raw cells with no format return [] - return self.render_mime_type(mime_type, content, cell_index, source_line) + return self.render_mime_type( + MimeData( + mime_type, content, metadata, cell_index=cell_index, line=source_line + ) + ) def render_stdout( - self, output: NotebookNode, cell_index: int, source_line: int + self, + output: NotebookNode, + cell_metadata: Dict[str, Any], + cell_index: int, + source_line: int, ) -> List[nodes.Element]: """Render a notebook stdout output. https://nbformat.readthedocs.io/en/5.1.3/format_description.html#stream-output :param output: the output node + :param metadata: the cell level metadata :param cell_index: the index of the cell containing the output :param source_line: the line number of the cell in the source document """ - metadata = self.get_cell_metadata(cell_index) - if "remove-stdout" in metadata.get("tags", []): + if "remove-stdout" in cell_metadata.get("tags", []): return [] lexer = self.renderer.get_cell_render_config( - cell_index, "text_lexer", "render_text_lexer" + cell_metadata, "text_lexer", "render_text_lexer" ) node = self.renderer.create_highlighted_code_block( output["text"], lexer, source=self.source, line=source_line @@ -151,21 +189,25 @@ def render_stdout( return [node] def render_stderr( - self, output: NotebookNode, cell_index: int, source_line: int + self, + output: NotebookNode, + cell_metadata: Dict[str, Any], + cell_index: int, + source_line: int, ) -> List[nodes.Element]: """Render a notebook stderr output. https://nbformat.readthedocs.io/en/5.1.3/format_description.html#stream-output :param output: the output node + :param metadata: the cell level metadata :param cell_index: the index of the cell containing the output :param source_line: the line number of the cell in the source document """ - metadata = self.get_cell_metadata(cell_index) - if "remove-stderr" in metadata.get("tags", []): + if "remove-stderr" in cell_metadata.get("tags", []): return [] output_stderr = self.renderer.get_cell_render_config( - cell_index, "output_stderr" + cell_metadata, "output_stderr" ) msg = f"stderr was found in the cell outputs of cell {cell_index + 1}" outputs = [] @@ -181,7 +223,7 @@ def render_stderr( elif output_stderr == "severe": self.logger.critical(msg, subtype="stderr", line=source_line) lexer = self.renderer.get_cell_render_config( - cell_index, "text_lexer", "render_text_lexer" + cell_metadata, "text_lexer", "render_text_lexer" ) node = self.renderer.create_highlighted_code_block( output["text"], lexer, source=self.source, line=source_line @@ -191,19 +233,24 @@ def render_stderr( return outputs def render_error( - self, output: NotebookNode, cell_index: int, source_line: int + self, + output: NotebookNode, + cell_metadata: Dict[str, Any], + cell_index: int, + source_line: int, ) -> List[nodes.Element]: """Render a notebook error output. https://nbformat.readthedocs.io/en/5.1.3/format_description.html#error :param output: the output node + :param metadata: the cell level metadata :param cell_index: the index of the cell containing the output :param source_line: the line number of the cell in the source document """ traceback = strip_ansi("\n".join(output["traceback"])) lexer = self.renderer.get_cell_render_config( - cell_index, "error_lexer", "render_error_lexer" + cell_metadata, "error_lexer", "render_error_lexer" ) node = self.renderer.create_highlighted_code_block( traceback, lexer, source=self.source, line=source_line @@ -211,74 +258,51 @@ def render_error( node["classes"] += ["output", "traceback"] return [node] - def render_mime_type( - self, mime_type: str, data: Union[str, bytes], cell_index: int, source_line: int - ) -> List[nodes.Element]: - """Render a notebook mime output. - - https://nbformat.readthedocs.io/en/5.1.3/format_description.html#display-data - - :param mime_type: the key from the "data" dict - :param data: the value from the "data" dict - :param cell_index: the index of the cell containing the output - :param source_line: the line number of the cell in the source document - """ - if mime_type == "text/plain": - return self.render_text_plain(data, cell_index, source_line) - if mime_type in { + def render_mime_type(self, data: MimeData) -> List[nodes.Element]: + """Render a notebook mime output.""" + if data.mime_type == "text/plain": + return self.render_text_plain(data) + if data.mime_type in { "image/png", "image/jpeg", "application/pdf", "image/svg+xml", "image/gif", }: - return self.render_image(mime_type, data, cell_index, source_line) - if mime_type == "text/html": - return self.render_text_html(data, cell_index, source_line) - if mime_type == "text/latex": - return self.render_text_latex(data, cell_index, source_line) - if mime_type == "application/javascript": - return self.render_javascript(data, cell_index, source_line) - if mime_type == WIDGET_VIEW_MIMETYPE: - return self.render_widget_view(data, cell_index, source_line) - if mime_type == "text/markdown": - return self.render_markdown(data, cell_index, source_line) - - return self.render_unknown(mime_type, data, cell_index, source_line) - - def render_unknown( - self, mime_type: str, data: Union[str, bytes], cell_index: int, source_line: int - ) -> List[nodes.Element]: - """Render a notebook output of unknown mime type. - - :param mime_type: the key from the "data" dict - :param data: the value from the "data" dict - :param cell_index: the index of the cell containing the output - :param source_line: the line number of the cell in the source document - """ + return self.render_image(data) + if data.mime_type == "text/html": + return self.render_text_html(data) + if data.mime_type == "text/latex": + return self.render_text_latex(data) + if data.mime_type == "application/javascript": + return self.render_javascript(data) + if data.mime_type == WIDGET_VIEW_MIMETYPE: + return self.render_widget_view(data) + if data.mime_type == "text/markdown": + return self.render_markdown(data) + + return self.render_unknown(data) + + def render_unknown(self, data: MimeData) -> List[nodes.Element]: + """Render a notebook output of unknown mime type.""" self.logger.warning( - f"skipping unknown output mime type: {mime_type}", + f"skipping unknown output mime type: {data.mime_type}", subtype="unknown_mime_type", - line=source_line, + line=data.line, ) return [] - def render_markdown( - self, data: str, cell_index: int, source_line: int - ) -> List[nodes.Element]: - """Render a notebook text/markdown mime data output. - - :param data: the value from the "data" dict - :param cell_index: the index of the cell containing the output - :param source_line: the line number of the cell in the source document - """ + def render_markdown(self, data: MimeData) -> List[nodes.Element]: + """Render a notebook text/markdown mime data output.""" # create a container to parse the markdown into temp_container = nodes.container() # setup temporary renderer config md = self.renderer.md match_titles = self.renderer.md_env.get("match_titles", None) - if self.renderer.get_cell_render_config(cell_index, "embed_markdown_outputs"): + if self.renderer.get_cell_render_config( + data.cell_metadata, "embed_markdown_outputs" + ): # this configuration is used in conjunction with a transform, # which move this content outside & below the output container # in this way the Markdown output can contain headings, @@ -294,7 +318,7 @@ def render_markdown( # parse markdown with self.renderer.current_node_context(temp_container): - self.renderer.nested_render_text(data, source_line) + self.renderer.nested_render_text(data.string, data.line) # restore renderer config self.renderer.md = md @@ -302,9 +326,7 @@ def render_markdown( return temp_container.children - def render_text_plain( - self, data: str, cell_index: int, source_line: int - ) -> List[nodes.Element]: + def render_text_plain(self, data: MimeData) -> List[nodes.Element]: """Render a notebook text/plain mime data output. :param data: the value from the "data" dict @@ -312,17 +334,15 @@ def render_text_plain( :param source_line: the line number of the cell in the source document """ lexer = self.renderer.get_cell_render_config( - cell_index, "text_lexer", "render_text_lexer" + data.cell_metadata, "text_lexer", "render_text_lexer" ) node = self.renderer.create_highlighted_code_block( - data, lexer, source=self.source, line=source_line + data.string, lexer, source=self.source, line=data.line ) node["classes"] += ["output", "text_plain"] return [node] - def render_text_html( - self, data: str, cell_index: int, source_line: int - ) -> List[nodes.Element]: + def render_text_html(self, data: MimeData) -> List[nodes.Element]: """Render a notebook text/html mime data output. :param data: the value from the "data" dict @@ -330,11 +350,11 @@ def render_text_html( :param source_line: the line number of the cell in the source document :param inline: create inline nodes instead of block nodes """ - return [nodes.raw(text=data, format="html", classes=["output", "text_html"])] + return [ + nodes.raw(text=data.string, format="html", classes=["output", "text_html"]) + ] - def render_text_latex( - self, data: str, cell_index: int, source_line: int - ) -> List[nodes.Element]: + def render_text_latex(self, data: MimeData) -> List[nodes.Element]: """Render a notebook text/latex mime data output. :param data: the value from the "data" dict @@ -344,7 +364,7 @@ def render_text_latex( # TODO should we always assume this is math? return [ nodes.math_block( - text=strip_latex_delimiters(data), + text=strip_latex_delimiters(data.string), nowrap=False, number=None, classes=["output", "text_latex"], @@ -353,10 +373,7 @@ def render_text_latex( def render_image( self, - mime_type: str, - data: Union[str, bytes], - cell_index: int, - source_line: int, + data: MimeData, ) -> List[nodes.Element]: """Render a notebook image mime data output. @@ -369,14 +386,21 @@ def render_image( # https://github.com/jupyter/nbconvert/blob/45df4b6089b3bbab4b9c504f9e6a892f5b8692e3/nbconvert/preprocessors/extractoutput.py#L43 # ensure that the data is a bytestring - if mime_type in {"image/png", "image/jpeg", "image/gif", "application/pdf"}: + if data.mime_type in { + "image/png", + "image/jpeg", + "image/gif", + "application/pdf", + }: # data is b64-encoded as text - data_bytes = a2b_base64(data) - elif isinstance(data, str): + data_bytes = a2b_base64(data.content) + elif isinstance(data.content, str): # ensure corrent line separator data_bytes = os.linesep.join(data.splitlines()).encode("utf-8") # create filename - extension = guess_extension(mime_type) or "." + mime_type.rsplit("/")[-1] + extension = ( + guess_extension(data.mime_type) or "." + data.mime_type.rsplit("/")[-1] + ) # latex does not recognize the '.jpe' extension extension = ".jpeg" if extension == ".jpe" else extension # ensure de-duplication of outputs by using hash as filename @@ -389,7 +413,7 @@ def render_image( # apply attributes to the image node # TODO backwards-compatible re-naming to image_options? image_options = self.renderer.get_cell_render_config( - cell_index, "image", "render_image_options" + data.cell_metadata, "image", "render_image_options" ) for key, spec in [ ("classes", options_spec.class_option), @@ -405,19 +429,17 @@ def render_image( image_node[key] = spec(image_options[key]) except Exception as exc: msg = f"Invalid image option ({key!r}; {image_options[key]!r}): {exc}" - self.logger.warning(msg, subtype="image", line=source_line) + self.logger.warning(msg, subtype="image", line=data.line) return [image_node] - def render_javascript( - self, data: str, cell_index: int, source_line: int - ) -> List[nodes.Element]: + def render_javascript(self, data: MimeData) -> List[nodes.Element]: """Render a notebook application/javascript mime data output. :param data: the value from the "data" dict :param cell_index: the index of the cell containing the output :param source_line: the line number of the cell in the source document """ - content = sanitize_script_content(data) + content = sanitize_script_content(data.string) mime_type = "application/javascript" return [ nodes.raw( @@ -426,16 +448,15 @@ def render_javascript( ) ] - def render_widget_view( - self, data: str, cell_index: int, source_line: int - ) -> List[nodes.Element]: + def render_widget_view(self, data: MimeData) -> List[nodes.Element]: """Render a notebook application/vnd.jupyter.widget-view+json mime output. :param data: the value from the "data" dict :param cell_index: the index of the cell containing the output :param source_line: the line number of the cell in the source document """ - content = sanitize_script_content(json.dumps(data)) + # TODO note ipywidgets present? + content = sanitize_script_content(json.dumps(data.string)) return [ nodes.raw( text=f'<script type="{WIDGET_VIEW_MIMETYPE}">{content}</script>', diff --git a/myst_nb/sphinx_.py b/myst_nb/sphinx_.py index eb8fc795..9fe9a81c 100644 --- a/myst_nb/sphinx_.py +++ b/myst_nb/sphinx_.py @@ -34,6 +34,7 @@ from myst_nb.read import UnexpectedCellDirective, create_nb_reader from myst_nb.render import ( WIDGET_STATE_MIMETYPE, + MimeData, NbElementRenderer, create_figure_context, load_renderer, @@ -343,7 +344,7 @@ def get_nb_config(self, key: str) -> Any: def get_cell_render_config( self, - cell_index: int, + cell_metadata: Dict[str, Any], key: str, nb_key: Optional[str] = None, has_nb_key: bool = True, @@ -357,17 +358,17 @@ def get_cell_render_config( :raises: IndexError if the cell index is out of range :raises: KeyError if the key is not found """ - cell = self.config["notebook"].cells[cell_index] + # TODO allow output level configuration? cell_metadata_key = self.get_nb_config("cell_render_key") if ( - cell_metadata_key not in cell.metadata - or key not in cell.metadata[cell_metadata_key] + cell_metadata_key not in cell_metadata + or key not in cell_metadata[cell_metadata_key] ): if not has_nb_key: raise KeyError(key) return self.get_nb_config(nb_key if nb_key is not None else key) # TODO validate? - return cell.metadata[cell_metadata_key][key] + return cell_metadata[cell_metadata_key][key] def render_nb_metadata(self, token: SyntaxTreeNode) -> None: """Render the notebook metadata.""" @@ -430,12 +431,12 @@ def render_nb_cell_code(self, token: SyntaxTreeNode) -> None: # TODO do we need this -/_ duplication of tag names, or can we deprecate one? remove_input = ( - self.get_cell_render_config(cell_index, "remove_code_source") + self.get_cell_render_config(token.meta["metadata"], "remove_code_source") or ("remove_input" in tags) or ("remove-input" in tags) ) remove_output = ( - self.get_cell_render_config(cell_index, "remove_code_outputs") + self.get_cell_render_config(token.meta["metadata"], "remove_code_outputs") or ("remove_output" in tags) or ("remove-output" in tags) ) @@ -482,12 +483,14 @@ def render_nb_cell_code(self, token: SyntaxTreeNode) -> None: def render_nb_cell_code_source(self, token: SyntaxTreeNode) -> None: """Render a notebook code cell's source.""" - cell_index = token.meta["index"] + # cell_index = token.meta["index"] lexer = token.meta.get("lexer", None) node = self.create_highlighted_code_block( token.content, lexer, - number_lines=self.get_cell_render_config(cell_index, "number_source_lines"), + number_lines=self.get_cell_render_config( + token.meta["metadata"], "number_source_lines" + ), source=self.document["source"], line=token_line(token), ) @@ -496,26 +499,33 @@ def render_nb_cell_code_source(self, token: SyntaxTreeNode) -> None: def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: """Render a notebook code cell's outputs.""" - cell_index = token.meta["index"] line = token_line(token, 0) + cell_index = token.meta["index"] + metadata = token.meta["metadata"] outputs: List[NotebookNode] = self.config["notebook"]["cells"][cell_index].get( "outputs", [] ) # render the outputs - for output in outputs: + for output_index, output in enumerate(outputs): if output.output_type == "stream": if output.name == "stdout": - _nodes = self.nb_renderer.render_stdout(output, cell_index, line) + _nodes = self.nb_renderer.render_stdout( + output, metadata, cell_index, line + ) self.add_line_and_source_path_r(_nodes, token) self.current_node.extend(_nodes) elif output.name == "stderr": - _nodes = self.nb_renderer.render_stderr(output, cell_index, line) + _nodes = self.nb_renderer.render_stderr( + output, metadata, cell_index, line + ) self.add_line_and_source_path_r(_nodes, token) self.current_node.extend(_nodes) else: pass # TODO warning elif output.output_type == "error": - _nodes = self.nb_renderer.render_error(output, cell_index, line) + _nodes = self.nb_renderer.render_error( + output, metadata, cell_index, line + ) self.add_line_and_source_path_r(_nodes, token) self.current_node.extend(_nodes) elif output.output_type in ("display_data", "execute_result"): @@ -532,7 +542,7 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: figure_options = None with suppress(KeyError): figure_options = self.get_cell_render_config( - cell_index, "figure", has_nb_key=False + metadata, "figure", has_nb_key=False ) with create_figure_context(self, figure_options, line): @@ -545,7 +555,15 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: mime_container = nodes.container(mime_type=mime_type) with self.current_node_context(mime_container): _nodes = self.nb_renderer.render_mime_type( - mime_type, data, cell_index, line + MimeData( + mime_type, + data, + cell_metadata=metadata, + output_metadata=output.get("metadata", {}), + cell_index=cell_index, + output_index=output_index, + line=line, + ) ) self.current_node.extend(_nodes) if mime_container.children: From 3b9010d9880ea96e4057e6ed5f89f5a63cfe4c47 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Fri, 7 Jan 2022 15:09:28 +0100 Subject: [PATCH 42/75] Add most glue functionality --- myst_nb/docutils_.py | 17 ++- myst_nb/nb_glue/domain.py | 24 +-- myst_nb/nb_glue/elements.py | 291 ++++++++++++++++++++++++++---------- myst_nb/render.py | 136 +++++++++++------ 4 files changed, 331 insertions(+), 137 deletions(-) diff --git a/myst_nb/docutils_.py b/myst_nb/docutils_.py index b7d7df6c..633d31e6 100644 --- a/myst_nb/docutils_.py +++ b/myst_nb/docutils_.py @@ -6,6 +6,7 @@ from docutils import nodes from docutils.core import default_description, publish_cmdline from docutils.parsers.rst.directives import _directives +from docutils.parsers.rst.roles import _roles from markdown_it.token import Token from markdown_it.tree import SyntaxTreeNode from myst_parser.docutils_ import DOCUTILS_EXCLUDED_ARGS as DOCUTILS_EXCLUDED_ARGS_MYST @@ -19,7 +20,12 @@ from myst_nb.configuration import NbParserConfig from myst_nb.execute import execute_notebook from myst_nb.loggers import DEFAULT_LOG_TYPE, DocutilsDocLogger -from myst_nb.nb_glue.elements import PasteDirective, PasteFigureDirective +from myst_nb.nb_glue.elements import ( + PasteDirective, + PasteFigureDirective, + PasteRole, + PasteTextRole, +) from myst_nb.parse import nb_node_to_dict, notebook_to_tokens from myst_nb.preprocess import preprocess_notebook from myst_nb.read import ( @@ -65,13 +71,22 @@ def parse(self, inputstring: str, document: nodes.document) -> None: ("glue:any", PasteDirective), ("glue:figure", PasteFigureDirective), ) + new_roles = ( + ("glue:", PasteRole()), + ("glue:any", PasteRole()), + ("glue:text", PasteTextRole()), + ) for name, directive in new_directives: _directives[name] = directive + for name, role in new_roles: + _roles[name] = role try: return self._parse(inputstring, document) finally: for name, _ in new_directives: _directives.pop(name, None) + for name, _ in new_roles: + _roles.pop(name, None) def _parse(self, inputstring: str, document: nodes.document) -> None: """Parse source text. diff --git a/myst_nb/nb_glue/domain.py b/myst_nb/nb_glue/domain.py index 91175e15..27562108 100644 --- a/myst_nb/nb_glue/domain.py +++ b/myst_nb/nb_glue/domain.py @@ -1,10 +1,19 @@ +"""A domain to register in sphinx. + +This is required for any directive/role names using `:`. +""" from typing import List from sphinx.domains import Domain from sphinx.ext.autodoc.directive import DummyOptionSpec -from sphinx.util.docutils import SphinxDirective, SphinxRole +from sphinx.util.docutils import SphinxDirective -from myst_nb.nb_glue.elements import PasteDirective, PasteFigureDirective +from myst_nb.nb_glue.elements import ( + PasteDirective, + PasteFigureDirective, + PasteRole, + PasteTextRole, +) class DummyDirective(SphinxDirective): @@ -17,15 +26,6 @@ def run(self): return [] -class DummyDirective2(DummyDirective): - has_content = True - - -class DummyRole(SphinxRole): - def run(self): - return [], [] - - class NbGlueDomain(Domain): """A sphinx domain for defining glue roles and directives.""" @@ -41,7 +41,7 @@ class NbGlueDomain(Domain): "figure": PasteFigureDirective, "math": DummyDirective, } - roles = {"": DummyRole(), "any": DummyRole(), "text": DummyRole()} + roles = {"": PasteRole(), "any": PasteRole(), "text": PasteTextRole()} def merge_domaindata(self, docnames: List[str], otherdata: dict) -> None: pass diff --git a/myst_nb/nb_glue/elements.py b/myst_nb/nb_glue/elements.py index 0b45b162..3549ef42 100644 --- a/myst_nb/nb_glue/elements.py +++ b/myst_nb/nb_glue/elements.py @@ -1,102 +1,231 @@ """Directives and roles which can be used by both docutils and sphinx.""" -from typing import Any, Dict, List +from typing import Any, Dict, List, Tuple from docutils import nodes from docutils.parsers.rst import Directive, directives +from docutils.parsers.rst.states import Inliner +from docutils.utils import unescape from myst_nb.loggers import DocutilsDocLogger, SphinxDocLogger from myst_nb.render import MimeData, NbElementRenderer -class PasteDirective(Directive): - """A directive for pasting code outputs from notebooks.""" +def is_sphinx(document) -> bool: + """Return True if we are in sphinx, otherwise docutils.""" + return hasattr(document.settings, "env") - required_arguments = 1 # the key - final_argument_whitespace = True - has_content = False - @property - def is_sphinx(self) -> bool: - """Return True if we are in sphinx, otherwise docutils.""" - return hasattr(self.state.document.settings, "env") +def warning(message: str, document: nodes.document, line: int) -> nodes.system_message: + """Create a warning.""" + if is_sphinx(document): + logger = SphinxDocLogger(document) + else: + logger = DocutilsDocLogger(document) + logger.warning(message, subtype="glue") + return nodes.system_message( + message, + type="WARNING", + level=2, + line=line, + source=document["source"], + ) + - def warning(self, message: str) -> nodes.system_message: - """Create a warning.""" - if self.is_sphinx: - logger = SphinxDocLogger(self.state.document) +def render_output_docutils( + document, line, nb_renderer: NbElementRenderer, output: Dict[str, Any], inline=False +) -> List[nodes.Node]: + """Render the output in docutils (select mime priority directly).""" + mime_priority = nb_renderer.renderer.get_nb_config("mime_priority") + try: + mime_type = next(x for x in mime_priority if x in output["data"]) + except StopIteration: + return [ + warning( + "No output mime type found from render_priority", + document, + line, + ) + ] + else: + data = MimeData( + mime_type, + output["data"][mime_type], + output_metadata=output.get("metadata", {}), + line=line, + ) + if inline: + return nb_renderer.render_mime_type_inline(data) + return nb_renderer.render_mime_type(data) + + +def render_output_sphinx( + document, + line, + nb_renderer: NbElementRenderer, + output: Dict[str, Any], + set_source_info, + inline=False, +) -> List[nodes.Node]: + """Render the output in sphinx (defer mime priority selection).""" + mime_bundle = nodes.container(nb_element="mime_bundle") + set_source_info(mime_bundle) + for mime_type, data in output["data"].items(): + mime_container = nodes.container(mime_type=mime_type) + set_source_info(mime_container) + data = MimeData( + mime_type, data, output_metadata=output.get("metadata", {}), line=line + ) + if inline: + nds = nb_renderer.render_mime_type_inline(data) else: - logger = DocutilsDocLogger(self.state.document) - logger.warning(message, subtype="glue") - return nodes.system_message( - message, - type="WARNING", - level=2, - line=self.lineno, - source=self.state.document["source"], + nds = nb_renderer.render_mime_type(data) + if nds: + mime_container.extend(nds) + mime_bundle.append(mime_container) + return [mime_bundle] + + +def render_glue_output( + key: str, document: nodes.document, line: int, set_source_info, inline=False +) -> List[nodes.Node]: + if "nb_renderer" not in document: + return [warning("No 'nb_renderer' found on the document.", document, line)] + nb_renderer: NbElementRenderer = document["nb_renderer"] + resources = nb_renderer.get_resources() + if "glue" not in resources: + return [ + warning("No glue data found in the notebook resources.", document, line) + ] + if key not in resources["glue"]: + return [warning(f"No key {key!r} found in glue data.", document, line)] + if not resources["glue"][key].get("data"): + return [warning(f"{key!r} does not contain any data.", document, line)] + if is_sphinx(document): + return render_output_sphinx( + document, line, nb_renderer, resources["glue"][key], set_source_info, inline + ) + else: + return render_output_docutils( + document, line, nb_renderer, resources["glue"][key], inline ) - def set_source_info(self, node: nodes.Node) -> None: - """Set source and line number for the node.""" - node.source, node.line = self.state_machine.get_source_and_line(self.lineno) - def run(self) -> List[nodes.Node]: - """Run the directive.""" - key = self.arguments[0] - if "nb_renderer" not in self.state.document: - return self.warning("No 'nb_renderer' found on the document.") - nb_renderer: NbElementRenderer = self.state.document["nb_renderer"] +class PasteRole: + """A role for pasting inline code outputs from notebooks.""" + + def get_source_info(self, lineno: int = None) -> Tuple[str, int]: + if lineno is None: + lineno = self.lineno + return self.inliner.reporter.get_source_and_line(lineno) # type: ignore + + def set_source_info(self, node: nodes.Node, lineno: int = None) -> None: + node.source, node.line = self.get_source_info(lineno) + + def __call__( + self, + name: str, + rawtext: str, + text: str, + lineno: int, + inliner: Inliner, + options=None, + content=(), + ) -> Tuple[List[nodes.Node], List[nodes.system_message]]: + self.text = unescape(text) + self.lineno = lineno + self.inliner = inliner + self.rawtext = rawtext + return self.run() + + def run(self) -> Tuple[List[nodes.Node], List[nodes.system_message]]: + """Run the role.""" + paste_nodes = render_glue_output( + self.text, + self.inliner.document, + self.lineno, + self.set_source_info, + inline=True, + ) + if not paste_nodes and isinstance(paste_nodes[0], nodes.system_message): + return [], paste_nodes + return paste_nodes, [] + + +class PasteTextRole(PasteRole): + """A role for pasting text outputs from notebooks.""" + + def run(self) -> Tuple[List[nodes.Node], List[nodes.system_message]]: + # now check if we have both key:format in the key + parts = self.text.rsplit(":", 1) + if len(parts) == 2: + key, formatting = parts + else: + key = parts[0] + formatting = None + + # now retrieve the data + document = self.inliner.document + if "nb_renderer" not in document: + return [], [ + warning( + "No 'nb_renderer' found on the document.", document, self.lineno + ) + ] + nb_renderer: NbElementRenderer = document["nb_renderer"] resources = nb_renderer.get_resources() if "glue" not in resources: - return self.warning("No glue data found in the notebook resources.") + return [], [ + warning( + "No glue data found in the notebook resources.", + document, + self.lineno, + ) + ] if key not in resources["glue"]: - return self.warning(f"No key {key!r} found in glue data.") - if not resources["glue"][key].get("data"): - return self.warning(f"{key!r} does not contain any data.") - if self.is_sphinx: - return self.render_output_sphinx(nb_renderer, resources["glue"][key]) - else: - return self.render_output_docutils(nb_renderer, resources["glue"][key]) - - def render_output_docutils( - self, nb_renderer: NbElementRenderer, output: Dict[str, Any] - ) -> List[nodes.Node]: - """Render the output in docutils (select mime priority directly).""" - mime_priority = nb_renderer.renderer.get_nb_config("mime_priority") - try: - mime_type = next(x for x in mime_priority if x in output["data"]) - except StopIteration: - return self.warning("No output mime type found from render_priority") - else: - return nb_renderer.render_mime_type( - MimeData( - mime_type, - output["data"][mime_type], - output_metadata=output.get("metadata", {}), - line=self.lineno, + return [], [ + warning(f"No key {key!r} found in glue data.", document, self.lineno) + ] + if "text/plain" not in resources["glue"][key].get("data", {}): + return [], [ + warning( + f"{key!r} does not contain 'text/plain' data.", + document, + self.lineno, ) - ) + ] + text = resources["glue"][key]["data"]["text/plain"].strip("'") + # If formatting is specified, see if we have a number of some kind + if formatting: + try: + newtext = float(text) + text = f"{newtext:>{formatting}}" + except ValueError: + pass + node = nodes.inline(text, text, classes=["pasted-text"]) + self.set_source_info(node) + return [node], [] - def render_output_sphinx( - self, nb_renderer: NbElementRenderer, output: Dict[str, Any] - ) -> List[nodes.Node]: - """Render the output in sphinx (defer mime priority selection).""" - mime_bundle = nodes.container(nb_element="mime_bundle") - self.set_source_info(mime_bundle) - for mime_type, data in output["data"].items(): - mime_container = nodes.container(mime_type=mime_type) - self.set_source_info(mime_container) - nds = nb_renderer.render_mime_type( - MimeData( - mime_type, - data, - output_metadata=output.get("metadata", {}), - line=self.lineno, - ) - ) - if nds: - mime_container.extend(nds) - mime_bundle.append(mime_container) - return [mime_bundle] + +class PasteDirective(Directive): + """A directive for pasting code outputs from notebooks.""" + + required_arguments = 1 # the key + final_argument_whitespace = True + has_content = False + + def get_source_info(self) -> Tuple[str, int]: + """Get source and line number.""" + return self.state_machine.get_source_and_line(self.lineno) + + def set_source_info(self, node: nodes.Node) -> None: + """Set source and line number to the node.""" + node.source, node.line = self.get_source_info() + + def run(self) -> List[nodes.Node]: + """Run the directive.""" + return render_glue_output( + self.arguments[0], self.state.document, self.lineno, self.set_source_info + ) class PasteFigureDirective(PasteDirective): @@ -151,8 +280,10 @@ def run(self): caption.line = first_node.line figure_node += caption elif not (isinstance(first_node, nodes.comment) and len(first_node) == 0): - error = self.warning( - "Figure caption must be a paragraph or empty comment." + error = warning( + "Figure caption must be a paragraph or empty comment.", + self.state.document, + self.lineno, ) return [figure_node, error] if len(node) > 1: diff --git a/myst_nb/render.py b/myst_nb/render.py index a58384f5..afdd1366 100644 --- a/myst_nb/render.py +++ b/myst_nb/render.py @@ -259,7 +259,7 @@ def render_error( return [node] def render_mime_type(self, data: MimeData) -> List[nodes.Element]: - """Render a notebook mime output.""" + """Render a notebook mime output, as a block level element.""" if data.mime_type == "text/plain": return self.render_text_plain(data) if data.mime_type in { @@ -310,7 +310,7 @@ def render_markdown(self, data: MimeData) -> List[nodes.Element]: # TODO create transform and for sphinx prioritise this output for all output formats self.renderer.md_env["match_titles"] = True else: - # otherwise we render as simple Markdown and heading are not allowed + # otherwise we render as simple Markdown and headings are not allowed self.renderer.md_env["match_titles"] = False self.renderer.md = create_md_parser( MdParserConfig(commonmark_only=True), self.renderer.__class__ @@ -327,12 +327,7 @@ def render_markdown(self, data: MimeData) -> List[nodes.Element]: return temp_container.children def render_text_plain(self, data: MimeData) -> List[nodes.Element]: - """Render a notebook text/plain mime data output. - - :param data: the value from the "data" dict - :param cell_index: the index of the cell containing the output - :param source_line: the line number of the cell in the source document - """ + """Render a notebook text/plain mime data output.""" lexer = self.renderer.get_cell_render_config( data.cell_metadata, "text_lexer", "render_text_lexer" ) @@ -343,24 +338,13 @@ def render_text_plain(self, data: MimeData) -> List[nodes.Element]: return [node] def render_text_html(self, data: MimeData) -> List[nodes.Element]: - """Render a notebook text/html mime data output. - - :param data: the value from the "data" dict - :param cell_index: the index of the cell containing the output - :param source_line: the line number of the cell in the source document - :param inline: create inline nodes instead of block nodes - """ + """Render a notebook text/html mime data output.""" return [ nodes.raw(text=data.string, format="html", classes=["output", "text_html"]) ] def render_text_latex(self, data: MimeData) -> List[nodes.Element]: - """Render a notebook text/latex mime data output. - - :param data: the value from the "data" dict - :param cell_index: the index of the cell containing the output - :param source_line: the line number of the cell in the source document - """ + """Render a notebook text/latex mime data output.""" # TODO should we always assume this is math? return [ nodes.math_block( @@ -371,17 +355,8 @@ def render_text_latex(self, data: MimeData) -> List[nodes.Element]: ) ] - def render_image( - self, - data: MimeData, - ) -> List[nodes.Element]: - """Render a notebook image mime data output. - - :param mime_type: the key from the "data" dict - :param data: the value from the "data" dict - :param cell_index: the index of the cell containing the output - :param source_line: the line number of the cell in the source document - """ + def render_image(self, data: MimeData) -> List[nodes.Element]: + """Render a notebook image mime data output.""" # Adapted from: # https://github.com/jupyter/nbconvert/blob/45df4b6089b3bbab4b9c504f9e6a892f5b8692e3/nbconvert/preprocessors/extractoutput.py#L43 @@ -433,12 +408,7 @@ def render_image( return [image_node] def render_javascript(self, data: MimeData) -> List[nodes.Element]: - """Render a notebook application/javascript mime data output. - - :param data: the value from the "data" dict - :param cell_index: the index of the cell containing the output - :param source_line: the line number of the cell in the source document - """ + """Render a notebook application/javascript mime data output.""" content = sanitize_script_content(data.string) mime_type = "application/javascript" return [ @@ -449,12 +419,7 @@ def render_javascript(self, data: MimeData) -> List[nodes.Element]: ] def render_widget_view(self, data: MimeData) -> List[nodes.Element]: - """Render a notebook application/vnd.jupyter.widget-view+json mime output. - - :param data: the value from the "data" dict - :param cell_index: the index of the cell containing the output - :param source_line: the line number of the cell in the source document - """ + """Render a notebook application/vnd.jupyter.widget-view+json mime output.""" # TODO note ipywidgets present? content = sanitize_script_content(json.dumps(data.string)) return [ @@ -464,6 +429,89 @@ def render_widget_view(self, data: MimeData) -> List[nodes.Element]: ) ] + def render_mime_type_inline(self, data: MimeData) -> List[nodes.Element]: + """Render a notebook mime output, as an inline level element.""" + if data.mime_type == "text/plain": + return self.render_text_plain_inline(data) + if data.mime_type in { + "image/png", + "image/jpeg", + "application/pdf", + "image/svg+xml", + "image/gif", + }: + return self.render_image_inline(data) + if data.mime_type == "text/html": + return self.render_text_html_inline(data) + if data.mime_type == "text/latex": + return self.render_text_latex_inline(data) + if data.mime_type == "application/javascript": + return self.render_javascript_inline(data) + if data.mime_type == WIDGET_VIEW_MIMETYPE: + return self.render_widget_view_inline(data) + if data.mime_type == "text/markdown": + return self.render_markdown_inline(data) + + return self.render_unknown_inline(data) + + def render_unknown_inline(self, data: MimeData) -> List[nodes.Element]: + """Render a notebook output of unknown mime type.""" + self.logger.warning( + f"skipping unknown output mime type: {data.mime_type}", + subtype="unknown_mime_type", + line=data.line, + ) + return [] + + def render_markdown_inline(self, data: MimeData) -> List[nodes.Element]: + """Render a notebook text/markdown mime data output.""" + # TODO render_markdown_inline + return [] + + def render_text_plain_inline(self, data: MimeData) -> List[nodes.Element]: + """Render a notebook text/plain mime data output.""" + # TODO previously this was not syntax highlighted? + lexer = self.renderer.get_cell_render_config( + data.cell_metadata, "text_lexer", "render_text_lexer" + ) + node = self.renderer.create_highlighted_code_block( + data.string, + lexer, + source=self.source, + line=data.line, + node_cls=nodes.literal, + ) + node["classes"] += ["output", "text_plain"] + return [node] + + def render_text_html_inline(self, data: MimeData) -> List[nodes.Element]: + """Render a notebook text/html mime data output.""" + return self.render_text_html(data) + + def render_text_latex_inline(self, data: MimeData) -> List[nodes.Element]: + """Render a notebook text/latex mime data output.""" + # TODO should we always assume this is math? + return [ + nodes.math( + text=strip_latex_delimiters(data.string), + nowrap=False, + number=None, + classes=["output", "text_latex"], + ) + ] + + def render_image_inline(self, data: MimeData) -> List[nodes.Element]: + """Render a notebook image mime data output.""" + return self.render_image(data) + + def render_javascript_inline(self, data: MimeData) -> List[nodes.Element]: + """Render a notebook application/javascript mime data output.""" + return self.render_javascript(data) + + def render_widget_view_inline(self, data: MimeData) -> List[nodes.Element]: + """Render a notebook application/vnd.jupyter.widget-view+json mime output.""" + return self.render_widget_view(data) + class EntryPointError(Exception): """Exception raised when an entry point cannot be loaded.""" From b67d21903223eb51b5e52c71e4492fdf91dc3a80 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Sat, 8 Jan 2022 02:34:48 +0100 Subject: [PATCH 43/75] Update documentation --- docs/api/index.rst | 62 ++++- docs/conf.py | 86 ++++++- docs/use/config-reference.md | 7 + docs/use/formatting_outputs.md | 14 +- docs/use/glue.md | 72 ++++-- myst_nb/configuration.py | 9 - myst_nb/docutils_.py | 57 +++-- myst_nb/nb_glue/domain.py | 25 +- myst_nb/nb_glue/elements.py | 225 +++++++++++++++--- myst_nb/render.py | 62 +++-- myst_nb/sphinx_.py | 38 ++- tests/test_glue/test_parser.sphinx4.xml | 172 +++++-------- .../test_complex_outputs.xml | 126 +++++----- .../test_complex_outputs_latex.xml | 46 ++-- .../test_merge_streams.xml | 5 +- .../test_metadata_figure.xml | 3 +- .../test_metadata_image.xml | 3 +- 17 files changed, 656 insertions(+), 356 deletions(-) diff --git a/docs/api/index.rst b/docs/api/index.rst index 867b299a..072bfea1 100644 --- a/docs/api/index.rst +++ b/docs/api/index.rst @@ -3,9 +3,69 @@ Python API ========== -Miscellaneous +The parsing of a notebook consists of a number of stages, with each stage separated into a separate module: + +1. The configuration is set (from a file or CLI) +2. The parser is called with an input string and source +3. The parser reads the input string to a notebook node +4. The notebook code outputs are potentially updated, via execution or from a cache +5. The notebook is "pre-processed" in-place (e.g. to coalesce output streams and extract glue outputs) +6. The notebook is converted to a Markdown-It tokens syntax tree +7. The syntax tree is transformed to a docutils document AST (calling the renderer plugin) +8. The docutils document is processed by docutils/sphinx, to create the desired output format(s) + +Configuration +------------- + +.. autoclass:: myst_nb.configuration.NbParserConfig + :members: + +Parsers +------- + +.. autoclass:: myst_nb.docutils_.Parser + :members: + +.. autoclass:: myst_nb.sphinx_.Parser + :members: + +Read +---- + +.. autoclass:: myst_nb.read.NbReader + :members: + +.. autofunction:: myst_nb.read.create_nb_reader + +.. autofunction:: myst_nb.read.is_myst_markdown_notebook + +.. autofunction:: myst_nb.read.read_myst_markdown_notebook + +Execute +------- + +.. autoclass:: myst_nb.execute.ExecutionResult + :members: + +.. autofunction:: myst_nb.execute.execute_notebook + +Pre-process +----------- + +.. autofunction:: myst_nb.preprocess.preprocess_notebook + +Render plugin ------------- +.. autoclass:: myst_nb.render.MimeData + :members: + +.. autoclass:: myst_nb.render.NbElementRenderer + :members: + +Lexers +------ + .. autoclass:: myst_nb.lexers.AnsiColorLexer :members: :undoc-members: diff --git a/docs/conf.py b/docs/conf.py index 63d43e43..54ddbc06 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -41,6 +41,9 @@ nb_execution_mode = "cache" nb_execution_show_tb = "READTHEDOCS" in os.environ nb_execution_timeout = 60 # Note: 30 was timing out on RTD +# nb_render_image_options = {"width": "200px"} +# application/vnd.plotly.v1+json and application/vnd.bokehjs_load.v0+json +suppress_warnings = ["mystnb.unknown_mime_type"] intersphinx_mapping = { "python": ("https://docs.python.org/3.8", None), @@ -53,13 +56,27 @@ } intersphinx_cache_limit = 5 +# ignore these type annotations nitpick_ignore = [ - ("py:class", "docutils.nodes.document"), - ("py:class", "docutils.nodes.Node"), - ("py:class", "docutils.nodes.container"), - ("py:class", "docutils.nodes.system_message"), - ("py:class", "nbformat.notebooknode.NotebookNode"), - ("py:class", "pygments.lexer.RegexLexer"), + ("py:class", klass) + for klass in [ + "attr._make.Attribute", + "docutils.nodes.document", + "docutils.nodes.Node", + "docutils.nodes.Element", + "docutils.nodes.container", + "docutils.nodes.system_message", + "DocutilsNbRenderer", + "myst_parser.main.MdParserConfig", + "nbformat.notebooknode.NotebookNode", + "pygments.lexer.RegexLexer", + "typing_extensions.Literal", + "typing_extensions.Literal[show, remove, remove - warn, warn, error, severe]", + "off", + "force", + "auto", + "cache", + ] ] # -- Options for HTML output ------------------------------------------------- @@ -90,9 +107,64 @@ panels_add_bootstrap_css = False -def setup(_): +def setup(app): + """Add functions to the Sphinx setup.""" import subprocess + from typing import cast + + from docutils import nodes + from docutils.parsers.rst import directives + from sphinx.application import Sphinx + from sphinx.util.docutils import SphinxDirective + + app = cast(Sphinx, app) # this is required to register the coconut kernel with Jupyter, # to execute docs/examples/coconut-lang.md subprocess.check_call(["coconut", "--jupyter"]) + + class MystNbConfigDirective(SphinxDirective): + """Directive to automate printing of the configuration.""" + + option_spec = {"sphinx": directives.flag} + + def run(self): + """Run the directive.""" + from myst_nb.configuration import NbParserConfig + + config = NbParserConfig() + text = [ + "```````{list-table}", + ":header-rows: 1", + "", + "* - Name", + " - Type", + " - Default", + " - Description", + ] + for name, value, field in config.as_triple(): + if "sphinx" in self.options and field.metadata.get("sphinx_exclude"): + continue + description = " ".join(field.metadata.get("help", "").splitlines()) + default = " ".join(f"{value!r}".splitlines()) + if len(default) > 20: + default = default[:20] + "..." + ctype = " ".join(str(field.type).splitlines()) + ctype = ctype.replace("typing.", "") + ctype = ctype.replace("typing_extensions.", "") + for tname in ("str", "int", "float", "bool"): + ctype = ctype.replace(f"<class '{tname}'>", tname) + text.extend( + [ + f"* - `{name}`", + f" - `{ctype}`", + f" - `{default}`", + f" - {description}", + ] + ) + text.append("```````") + node = nodes.Element() + self.state.nested_parse(text, 0, node) + return node.children + + app.add_directive("mystnb-config", MystNbConfigDirective) diff --git a/docs/use/config-reference.md b/docs/use/config-reference.md index 197d7b41..0f5cedb4 100644 --- a/docs/use/config-reference.md +++ b/docs/use/config-reference.md @@ -77,3 +77,10 @@ These configuration options affect the look and feel of notebook parsing and out - `False` - If `True`, ensure all stdout / stderr output streams are merged into single outputs. This ensures deterministic outputs. ````` + + +## Auto-generated config + +```{mystnb-config} +:sphinx: +``` diff --git a/docs/use/formatting_outputs.md b/docs/use/formatting_outputs.md index 6308edc5..4104cbaf 100644 --- a/docs/use/formatting_outputs.md +++ b/docs/use/formatting_outputs.md @@ -266,18 +266,16 @@ This is currently not supported, but we hope to introduce it at a later date (use/format/cutomise)= ## Customise the render process -The render process is governed by subclasses of {py:class}`myst_nb.render_outputs.CellOutputRendererBase`, which dictate how to create the `docutils` AST nodes for a particular MIME type. the default implementation is {py:class}`~myst_nb.render_outputs.CellOutputRenderer`. - -Implementations are loaded *via* Python [entry points](https://packaging.python.org/guides/distributing-packages-using-setuptools/#entry-points), in the `myst_nb.mime_render` group. +The render process is governed by subclasses of {py:class}`myst_nb.render.NbElementRenderer`, which dictate how to create the `docutils` AST nodes for a particular MIME type. +Implementations are loaded *via* Python [entry points](https://packaging.python.org/guides/distributing-packages-using-setuptools/#entry-points), in the `myst_nb.renderers` group. So it is possible to inject your own subclass to handle rendering. -For example, the renderers loaded in this package are: +For example, the renderer loaded in this package is: ```python entry_points={ - "myst_nb.mime_render": [ - "default = myst_nb.render_outputs:CellOutputRenderer", - "inline = myst_nb.render_outputs:CellOutputRendererInline", + "myst_nb.renderers": [ + "default = myst_nb.render:NbElementRenderer", ], } ``` @@ -287,3 +285,5 @@ You can then select the renderer plugin in your `conf.py`: ```python nb_render_plugin = "default" ``` + +TODO and example of overriding the renderer ... diff --git a/docs/use/glue.md b/docs/use/glue.md index 089b478c..73cafd94 100644 --- a/docs/use/glue.md +++ b/docs/use/glue.md @@ -11,16 +11,17 @@ kernelspec: name: python3 --- -(glue)= +(glue/main)= # Insert variables into pages with `glue` -You often wish to run analyses in one notebook and insert them into your -documents text elsewhere. For example, if you'd like to include a figure, +You often wish to run analyses in a notebook and insert them into your +documents text elsewhere. +For example, if you'd like to include a figure, or if you want to cite a statistic that you have run. -The **`glue` submodule** allows you to add a key to variables in a notebook, -then display those variables in your book by referencing the key. +The **`glue` submodule** allows you to add a key to variables in a notebook code cell, +then display those variables in a Markdown cell by referencing the key. This page describes how to add keys to variables in notebooks, and how to insert them into your book's content in a variety of ways.[^download] @@ -205,7 +206,7 @@ generic command that doesn't make many assumptions about what you are gluing. ### The `glue:text` role -The `glue:text` role, is specific to text outputs. +The `glue:text` role, is specific to `text/plain` outputs. For example, the following text: ``` @@ -316,20 +317,49 @@ Which we reference as Equation {eq}`eq-sym`. `glue:math` only works with glued variables that contain a `text/latex` output. ``` +### The `glue:myst` role/directive + +With `glue:myst`, you can output `text/markdown` as MyST Markdown text, that will be integrated into your page (by default `text/markdown` will be parsed as CommonMark only): + +````{code-cell} ipython3 +from IPython.display import Markdown +glue("inline_md", Markdown( + "inline **markdown** with a [link](glue/main), " + "and a nested glue value: {glue:}`boot_mean`" +)) +glue("block_md", Markdown(""" +#### A heading + +Then some text, and anything nested. + +```python +print("Hello world!") +``` +""" +)) +```` + +Now, when we glue, the Markdown will be evaluated as block/inline MyST Markdown, as if it was part of the original document. + +````md +Here is some {glue:myst}`inline_md`! + +```{glue:myst} block_md +``` +```` + +Here is some {glue:myst}`inline_md`! + +```{glue:myst} block_md +``` + +++ ## Advanced glue usecases Here are a few more specific and advanced uses of the `glue` submodule. -### Pasting from pages you don't include in the documentation - -Sometimes you'd like to use variables from notebooks that are not meant to be -shown to users. In this case, you should bundle the notebook with the rest of your -content pages, but include `orphan:` in the metadata of the notebook. - -For example, the following text: `` {glue:}`orphaned_var` was created in {ref}`orphaned-nb` ``. -Results in: {glue:}`orphaned_var` was created in {ref}`orphaned-nb` +### Pasting ### Pasting into tables @@ -350,3 +380,17 @@ Results in: |:-------------------------------:|:---------------------------:|---------------------------|---------------------------------------------------| | histogram and raw text | {glue:}`boot_fig` | {glue:}`boot_mean` | {glue:}`boot_clo`-{glue:}`boot_chi` | | sorted means and formatted text | {glue:}`sorted_means_fig` | {glue:text}`boot_mean:.3f` | {glue:text}`boot_clo:.3f`-{glue:text}`boot_chi:.3f` | + + +### Pasting from pages you don't include in the documentation + +:::{warning} +This is now deprecated: keys can only be pasted if they originate in the same notebook. +::: + +Sometimes you'd like to use variables from notebooks that are not meant to be +shown to users. In this case, you should bundle the notebook with the rest of your +content pages, but include `orphan:` in the metadata of the notebook. + +For example, the following text: `` {glue:}`orphaned_var` was created in {ref}`orphaned-nb` ``. +<!-- Results in: {glue:}`orphaned_var` was created in {ref}`orphaned-nb` --> diff --git a/myst_nb/configuration.py b/myst_nb/configuration.py index 8b6fc4ca..80a66802 100644 --- a/myst_nb/configuration.py +++ b/myst_nb/configuration.py @@ -330,15 +330,6 @@ class NbParserConfig: ), metadata={"help": "Behaviour for stderr output", "cell_metadata": True}, ) - # TODO this needs to be implemented - embed_markdown_outputs: bool = attr.ib( - default=False, - validator=instance_of(bool), - metadata={ - "help": "Embed markdown outputs", # TODO better help text - "cell_metadata": True, - }, - ) # docutils does not allow for the dictionaries in its configuration, # and also there is no API for the parser to know the output format, so # we use two different options for docutils(mime_priority)/sphinx(render_priority) diff --git a/myst_nb/docutils_.py b/myst_nb/docutils_.py index 633d31e6..a4afa22f 100644 --- a/myst_nb/docutils_.py +++ b/myst_nb/docutils_.py @@ -23,6 +23,9 @@ from myst_nb.nb_glue.elements import ( PasteDirective, PasteFigureDirective, + PasteMathDirective, + PasteMystDirective, + PasteMystRole, PasteRole, PasteTextRole, ) @@ -70,11 +73,14 @@ def parse(self, inputstring: str, document: nodes.document) -> None: ("glue:", PasteDirective), ("glue:any", PasteDirective), ("glue:figure", PasteFigureDirective), + ("glue:math", PasteMathDirective), + ("glue:myst", PasteMystDirective), ) new_roles = ( ("glue:", PasteRole()), ("glue:any", PasteRole()), ("glue:text", PasteTextRole()), + ("glue:myst", PasteMystRole()), ) for name, directive in new_directives: _directives[name] = directive @@ -399,14 +405,21 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: self.current_node.extend(_nodes) elif output.output_type in ("display_data", "execute_result"): - # TODO these output have their own 'metadata' key, - # we should parse these to render_mime_type - - # TODO unwrapped Markdown (so you can output headers) - # maybe in a transform, we grab the containers and move them - # "below" the code cell container? - # if embed_markdown_outputs is True, - # this should be top priority and we "mark" the container for the transform + # Note, this is different to the sphinx implementation, + # here we directly select a single output, based on the mime_priority, + # as opposed to output all mime types, and select in a post-transform + # (the mime_priority must then be set for the output format) + + # TODO how to output MyST Markdown? + # currently text/markdown is set to be rendered as CommonMark only, + # with headings dissallowed, + # to avoid "side effects" if the mime is discarded but contained + # targets, etc, and because we can't parse headings within containers. + # perhaps we could have a config option to allow this? + # - for non-commonmark, the text/markdown would always be considered + # the top priority, and all other mime types would be ignored. + # - for headings, we would also need to parsing the markdown + # at the "top-level", i.e. not nested in container(s) try: mime_type = next(x for x in mime_priority if x in output["data"]) @@ -426,21 +439,19 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: ) with create_figure_context(self, figure_options, line): - container = nodes.container(mime_type=mime_type) - with self.current_node_context(container, append=True): - _nodes = self.nb_renderer.render_mime_type( - MimeData( - mime_type, - output["data"][mime_type], - cell_metadata=metadata, - output_metadata=output.get("metadata", {}), - cell_index=cell_index, - output_index=output_index, - line=line, - ), - ) - self.current_node.extend(_nodes) - self.add_line_and_source_path_r([container], token) + _nodes = self.nb_renderer.render_mime_type( + MimeData( + mime_type, + output["data"][mime_type], + cell_metadata=metadata, + output_metadata=output.get("metadata", {}), + cell_index=cell_index, + output_index=output_index, + line=line, + ), + ) + self.current_node.extend(_nodes) + self.add_line_and_source_path_r(_nodes, token) else: self.create_warning( f"Unsupported output type: {output.output_type}", diff --git a/myst_nb/nb_glue/domain.py b/myst_nb/nb_glue/domain.py index 27562108..9f6ca353 100644 --- a/myst_nb/nb_glue/domain.py +++ b/myst_nb/nb_glue/domain.py @@ -5,27 +5,18 @@ from typing import List from sphinx.domains import Domain -from sphinx.ext.autodoc.directive import DummyOptionSpec -from sphinx.util.docutils import SphinxDirective from myst_nb.nb_glue.elements import ( PasteDirective, PasteFigureDirective, + PasteMathDirective, + PasteMystDirective, + PasteMystRole, PasteRole, PasteTextRole, ) -class DummyDirective(SphinxDirective): - required_arguments = 1 - final_argument_whitespace = True - has_content = False - option_spec = DummyOptionSpec() - - def run(self): - return [] - - class NbGlueDomain(Domain): """A sphinx domain for defining glue roles and directives.""" @@ -39,9 +30,15 @@ class NbGlueDomain(Domain): "": PasteDirective, "any": PasteDirective, "figure": PasteFigureDirective, - "math": DummyDirective, + "math": PasteMathDirective, + "myst": PasteMystDirective, + } + roles = { + "": PasteRole(), + "any": PasteRole(), + "text": PasteTextRole(), + "myst": PasteMystRole(), } - roles = {"": PasteRole(), "any": PasteRole(), "text": PasteTextRole()} def merge_domaindata(self, docnames: List[str], otherdata: dict) -> None: pass diff --git a/myst_nb/nb_glue/elements.py b/myst_nb/nb_glue/elements.py index 3549ef42..2b5b13cd 100644 --- a/myst_nb/nb_glue/elements.py +++ b/myst_nb/nb_glue/elements.py @@ -1,13 +1,14 @@ """Directives and roles which can be used by both docutils and sphinx.""" -from typing import Any, Dict, List, Tuple +from typing import Any, Dict, List, Optional, Tuple, Union +import attr from docutils import nodes from docutils.parsers.rst import Directive, directives from docutils.parsers.rst.states import Inliner from docutils.utils import unescape from myst_nb.loggers import DocutilsDocLogger, SphinxDocLogger -from myst_nb.render import MimeData, NbElementRenderer +from myst_nb.render import MimeData, NbElementRenderer, strip_latex_delimiters def is_sphinx(document) -> bool: @@ -110,16 +111,57 @@ def render_glue_output( ) +@attr.s +class RetrievedData: + """A class to store retrieved mime data.""" + + warning: Optional[str] = attr.ib() + data: Union[None, str, bytes] = attr.ib(default=None) + metadata: Dict[str, Any] = attr.ib(factory=dict) + nb_renderer: Optional[NbElementRenderer] = attr.ib(default=None) + + +def retrieve_mime_data( + document: nodes.document, key: str, mime_type: str +) -> RetrievedData: + """Retrieve the mime data from the document.""" + if "nb_renderer" not in document: + return RetrievedData("No 'nb_renderer' found on the document.") + nb_renderer: NbElementRenderer = document["nb_renderer"] + resources = nb_renderer.get_resources() + if "glue" not in resources: + return RetrievedData(f"No key {key!r} found in glue data.") + + if key not in resources["glue"]: + return RetrievedData(f"No key {key!r} found in glue data.") + + if mime_type not in resources["glue"][key].get("data", {}): + return RetrievedData(f"{key!r} does not contain {mime_type!r} data.") + + return RetrievedData( + None, + resources["glue"][key]["data"][mime_type], + resources["glue"][key].get("metadata", {}), + nb_renderer, + ) + + class PasteRole: """A role for pasting inline code outputs from notebooks.""" def get_source_info(self, lineno: int = None) -> Tuple[str, int]: + """Get source and line number.""" if lineno is None: lineno = self.lineno return self.inliner.reporter.get_source_and_line(lineno) # type: ignore def set_source_info(self, node: nodes.Node, lineno: int = None) -> None: - node.source, node.line = self.get_source_info(lineno) + """Set the source info for a node and its descendants.""" + source, line = self.get_source_info(lineno) + iterator = getattr(node, "findall", node.traverse) # findall for docutils 0.18 + for _node in iterator(include_self=True): + _node.source = source + _node.line = line def __call__( self, @@ -155,7 +197,7 @@ class PasteTextRole(PasteRole): """A role for pasting text outputs from notebooks.""" def run(self) -> Tuple[List[nodes.Node], List[nodes.system_message]]: - # now check if we have both key:format in the key + # check if we have both key:format in the key parts = self.text.rsplit(":", 1) if len(parts) == 2: key, formatting = parts @@ -165,35 +207,17 @@ def run(self) -> Tuple[List[nodes.Node], List[nodes.system_message]]: # now retrieve the data document = self.inliner.document - if "nb_renderer" not in document: - return [], [ - warning( - "No 'nb_renderer' found on the document.", document, self.lineno - ) - ] - nb_renderer: NbElementRenderer = document["nb_renderer"] - resources = nb_renderer.get_resources() - if "glue" not in resources: + result = retrieve_mime_data(document, key, "text/plain") + if result.warning is not None: return [], [ warning( - "No glue data found in the notebook resources.", + result.warning, document, self.lineno, ) ] - if key not in resources["glue"]: - return [], [ - warning(f"No key {key!r} found in glue data.", document, self.lineno) - ] - if "text/plain" not in resources["glue"][key].get("data", {}): - return [], [ - warning( - f"{key!r} does not contain 'text/plain' data.", - document, - self.lineno, - ) - ] - text = resources["glue"][key]["data"]["text/plain"].strip("'") + text = str(result.data).strip("'") + # If formatting is specified, see if we have a number of some kind if formatting: try: @@ -201,30 +225,98 @@ def run(self) -> Tuple[List[nodes.Node], List[nodes.system_message]]: text = f"{newtext:>{formatting}}" except ValueError: pass + node = nodes.inline(text, text, classes=["pasted-text"]) self.set_source_info(node) return [node], [] -class PasteDirective(Directive): - """A directive for pasting code outputs from notebooks.""" +class PasteMystRole(PasteRole): + """A role for pasting markdown outputs from notebooks as inline MyST Markdown.""" + + def run(self) -> Tuple[List[nodes.Node], List[nodes.system_message]]: + # retrieve the data + document = self.inliner.document + result = retrieve_mime_data(document, self.text, "text/markdown") + if result.warning is not None: + return [], [ + warning( + result.warning, + document, + self.lineno, + ) + ] + mime = MimeData( + "text/markdown", + result.data, + output_metadata=result.metadata, + line=self.lineno, + md_commonmark=False, + ) + _nodes = result.nb_renderer.render_markdown_inline(mime) + for node in _nodes: + self.set_source_info(node) + return _nodes, [] + + +class _PasteBaseDirective(Directive): required_arguments = 1 # the key final_argument_whitespace = True has_content = False + @property + def document(self) -> nodes.document: + return self.state.document + def get_source_info(self) -> Tuple[str, int]: """Get source and line number.""" return self.state_machine.get_source_and_line(self.lineno) def set_source_info(self, node: nodes.Node) -> None: - """Set source and line number to the node.""" - node.source, node.line = self.get_source_info() + """Set source and line number to the node and its descendants.""" + source, line = self.get_source_info() + iterator = getattr(node, "findall", node.traverse) # findall for docutils 0.18 + for _node in iterator(include_self=True): + _node.source = source + _node.line = line + + +class PasteMystDirective(_PasteBaseDirective): + """A directive for pasting markdown outputs from notebooks as MyST Markdown.""" + + def run(self) -> List[nodes.Node]: + """Run the directive.""" + result = retrieve_mime_data(self.document, self.arguments[0], "text/markdown") + if result.warning is not None: + return [ + warning( + result.warning, + self.document, + self.lineno, + ) + ] + mime = MimeData( + "text/markdown", + result.data, + output_metadata=result.metadata, + line=self.lineno, + md_commonmark=False, + md_headings=True, + ) + _nodes = result.nb_renderer.render_markdown(mime) + for node in _nodes: + self.set_source_info(node) + return _nodes + + +class PasteDirective(_PasteBaseDirective): + """A directive for pasting code outputs from notebooks.""" def run(self) -> List[nodes.Node]: """Run the directive.""" return render_glue_output( - self.arguments[0], self.state.document, self.lineno, self.set_source_info + self.arguments[0], self.document, self.lineno, self.set_source_info ) @@ -282,7 +374,7 @@ def run(self): elif not (isinstance(first_node, nodes.comment) and len(first_node) == 0): error = warning( "Figure caption must be a paragraph or empty comment.", - self.state.document, + self.document, self.lineno, ) return [figure_node, error] @@ -290,3 +382,70 @@ def run(self): figure_node += nodes.legend("", *node[1:]) return [figure_node] + + +class PasteMathDirective(_PasteBaseDirective): + """A directive for pasting latex outputs from notebooks as math.""" + + option_spec = { + "label": directives.unchanged, + "name": directives.unchanged, + "class": directives.class_option, + "nowrap": directives.flag, + } + + def run(self) -> List[nodes.Node]: + """Run the directive.""" + result = retrieve_mime_data(self.document, self.arguments[0], "text/latex") + if result.warning is not None: + return [ + warning( + result.warning, + self.document, + self.lineno, + ) + ] + latex = strip_latex_delimiters(str(result.data)) + label = self.options.get("label", self.options.get("name")) + node = nodes.math_block( + latex, + latex, + nowrap="nowrap" in self.options, + label=label, + number=None, + classes=["pasted-math"] + (self.options.get("class") or []), + ) + self.add_name(node) + self.set_source_info(node) + if is_sphinx(self.document): + return self.add_target(node) + return [node] + + def add_target(self, node: nodes.math_block) -> List[nodes.Node]: + """Add target to the node.""" + # adapted from sphinx.directives.patches.MathDirective + + env = self.state.document.settings.env + + node["docname"] = env.docname + + # assign label automatically if math_number_all enabled + if node["label"] == "" or (env.config.math_number_all and not node["label"]): + seq = env.new_serialno("sphinx.ext.math#equations") + node["label"] = "%s:%d" % (env.docname, seq) + + # no targets and numbers are needed + if not node["label"]: + return [node] + + # register label to domain + domain = env.get_domain("math") + domain.note_equation(env.docname, node["label"], location=node) + node["number"] = domain.get_equation_number_for(node["label"]) + + # add target node + node_id = nodes.make_id("equation-%s" % node["label"]) + target = nodes.target("", "", ids=[node_id]) + self.document.note_explicit_target(target) + + return [target, node] diff --git a/myst_nb/render.py b/myst_nb/render.py index afdd1366..33d4ea1e 100644 --- a/myst_nb/render.py +++ b/myst_nb/render.py @@ -53,6 +53,14 @@ class MimeData: """Index of the output in the cell""" line: Optional[int] = attr.ib(default=None) """Source line of the cell""" + md_commonmark: bool = attr.ib(default=True) + """Whether to parse the content as "isolated" CommonMark""" + # as opposed to using the current render and its environment + md_headings: bool = attr.ib(default=False) + """Whether to render headings in text/markdown blocks.""" + # we can only do this if know the content will be rendered into the main body + # of the document, e.g. not inside a container node + # (otherwise it will break the structure of the AST) @property def string(self) -> str: @@ -295,34 +303,27 @@ def render_unknown(self, data: MimeData) -> List[nodes.Element]: def render_markdown(self, data: MimeData) -> List[nodes.Element]: """Render a notebook text/markdown mime data output.""" # create a container to parse the markdown into - temp_container = nodes.container() + temp_container = nodes.Element() - # setup temporary renderer config + # store the current renderer config md = self.renderer.md match_titles = self.renderer.md_env.get("match_titles", None) - if self.renderer.get_cell_render_config( - data.cell_metadata, "embed_markdown_outputs" - ): - # this configuration is used in conjunction with a transform, - # which move this content outside & below the output container - # in this way the Markdown output can contain headings, - # and not break the structure of the docutils AST - # TODO create transform and for sphinx prioritise this output for all output formats - self.renderer.md_env["match_titles"] = True - else: - # otherwise we render as simple Markdown and headings are not allowed - self.renderer.md_env["match_titles"] = False + + # setup temporary renderer config + self.renderer.md_env["match_titles"] = data.md_headings + if data.md_commonmark: self.renderer.md = create_md_parser( MdParserConfig(commonmark_only=True), self.renderer.__class__ ) - # parse markdown - with self.renderer.current_node_context(temp_container): - self.renderer.nested_render_text(data.string, data.line) - - # restore renderer config - self.renderer.md = md - self.renderer.md_env["match_titles"] = match_titles + try: + # parse markdown + with self.renderer.current_node_context(temp_container): + self.renderer.nested_render_text(data.string, data.line) + finally: + # restore renderer config + self.renderer.md = md + self.renderer.md_env["match_titles"] = match_titles return temp_container.children @@ -465,8 +466,23 @@ def render_unknown_inline(self, data: MimeData) -> List[nodes.Element]: def render_markdown_inline(self, data: MimeData) -> List[nodes.Element]: """Render a notebook text/markdown mime data output.""" - # TODO render_markdown_inline - return [] + # TODO upstream this to myst-parser (replace MockState.inline_text)? + if data.md_commonmark: + parser = create_md_parser( + MdParserConfig(commonmark_only=True), self.renderer.__class__ + ) + tokens = parser.parseInline(data.string) + else: + tokens = self.renderer.md.parseInline(data.string, self.renderer.md_env) + if data.line is not None: + for token in tokens: + if token.map: + token.map = [token.map[0] + data.line, token.map[1] + data.line] + node = nodes.Element() # anonymous container for parsing + with self.renderer.current_node_context(node): + self.renderer._render_tokens(tokens) + + return node.children def render_text_plain_inline(self, data: MimeData) -> List[nodes.Element]: """Render a notebook text/plain mime data output.""" diff --git a/myst_nb/sphinx_.py b/myst_nb/sphinx_.py index 9fe9a81c..516fbe72 100644 --- a/myst_nb/sphinx_.py +++ b/myst_nb/sphinx_.py @@ -42,9 +42,12 @@ ) SPHINX_LOGGER = sphinx_logging.getLogger(__name__) -UNSET = "--unset--" OUTPUT_FOLDER = "jupyter_execute" +# used for deprecated config values, +# so we can tell if they have been set by a user, and warn them +UNSET = "--unset--" + def sphinx_setup(app: Sphinx): """Initialize Sphinx extension.""" @@ -530,14 +533,24 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: self.current_node.extend(_nodes) elif output.output_type in ("display_data", "execute_result"): - # TODO these output have their own 'metadata' key, - # we should parse these to render_mime_type - - # TODO unwrapped Markdown (so you can output headers) - # maybe in a transform, we grab the containers and move them - # "below" the code cell container? - # if embed_markdown_outputs is True, - # this should be top priority and we "mark" the container for the transform + # Note, this is different to the docutils implementation, + # where we directly select a single output, based on the mime_priority. + # Here, we do not know the mime priority until we know the output format + # so we output all the outputs during this parsing phase + # (this is what sphinx caches as "output format agnostic" AST), + # and replace the mime_bundle with the format specific output + # in a post-transform (run per output format on the cached AST) + + # TODO how to output MyST Markdown? + # currently text/markdown is set to be rendered as CommonMark only, + # with headings dissallowed, + # to avoid "side effects" if the mime is discarded but contained + # targets, etc, and because we can't parse headings within containers. + # perhaps we could have a config option to allow this? + # - for non-commonmark, the text/markdown would always be considered + # the top priority, and all other mime types would be ignored. + # - for headings, we would also need to parsing the markdown + # at the "top-level", i.e. not nested in container(s) figure_options = None with suppress(KeyError): @@ -546,9 +559,6 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: ) with create_figure_context(self, figure_options, line): - # We differ from the docutils-only renderer here, because we need to - # cache all rendered outputs, then choose one from the priority list - # in a post-transform, once we know which builder is required. mime_bundle = nodes.container(nb_element="mime_bundle") with self.current_node_context(mime_bundle): for mime_type, data in output["data"].items(): @@ -636,8 +646,10 @@ def run(self, **kwargs: Any) -> None: location=node, ) node.parent.remove(node) + elif not node.children[index].children: + node.parent.remove(node) else: - node.replace_self(node.children[index]) + node.replace_self(node.children[index].children) class NbDownloadRole(ReferenceRole): diff --git a/tests/test_glue/test_parser.sphinx4.xml b/tests/test_glue/test_parser.sphinx4.xml index eff7afc6..608568a8 100644 --- a/tests/test_glue/test_parser.sphinx4.xml +++ b/tests/test_glue/test_parser.sphinx4.xml @@ -2,127 +2,79 @@ <section classes="tex2jax_ignore mathjax_ignore" ids="glue-tests" names="glue\ tests"> <title> Glue Tests - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="1" cell_metadata="{}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> from myst_nb import glue - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="2" cell_metadata="{}" classes="cell" exec_count="2" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> glue("key_text1", "text1") glue("key_float", 3.14159) - <CellOutputNode classes="cell_output"> - <literal_block classes="output text_plain" language="myst-ansi" linenos="False" xml:space="preserve"> - 'text1' - <literal_block classes="output text_plain" language="myst-ansi" linenos="False" xml:space="preserve"> - 3.14159 - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="3" cell_metadata="{}" classes="cell" exec_count="3" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> glue("key_undisplayed", "undisplayed", display=False) - <CellOutputNode classes="cell_output"> - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="4" cell_metadata="{'scrolled': True}" classes="cell" exec_count="4" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> import pandas as pd df = pd.DataFrame({"header": [1, 2, 3]}) glue("key_df", df) - <CellOutputNode classes="cell_output"> - <raw classes="output text_html" format="html" xml:space="preserve"> - <div> - <style scoped> - .dataframe tbody tr th:only-of-type { - vertical-align: middle; - } - - .dataframe tbody tr th { - vertical-align: top; - } - - .dataframe thead th { - text-align: right; - } - </style> - <table border="1" class="dataframe"> - <thead> - <tr style="text-align: right;"> - <th></th> - <th>header</th> - </tr> - </thead> - <tbody> - <tr> - <th>0</th> - <td>1</td> - </tr> - <tr> - <th>1</th> - <td>2</td> - </tr> - <tr> - <th>2</th> - <td>3</td> - </tr> - </tbody> - </table> - </div> - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="5" cell_metadata="{}" classes="cell" exec_count="5" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> import matplotlib.pyplot as plt plt.plot([1, 2, 3]) glue("key_plt", plt.gcf(), display=False) - <CellOutputNode classes="cell_output"> - <image candidates="{'*': '_build/jupyter_execute/with_glue_5_1.png'}" uri="_build/jupyter_execute/with_glue_5_1.png"> + <container classes="cell_output" nb_element="cell_code_output"> + <image candidates="{'*': '_build/jupyter_execute/8b394c6cdc09dc10c73e2d5f785aedc8eee615a4d219218f09d6732f7f8ef150.png'}" uri="_build/jupyter_execute/8b394c6cdc09dc10c73e2d5f785aedc8eee615a4d219218f09d6732f7f8ef150.png"> <section ids="referencing-the-figs" names="referencing\ the\ figs"> <title> Referencing the figs <paragraph> - <inline classes="pasted-inline"> - <literal classes="output text_plain" language="none"> - 'text1' + <literal classes="output text_plain" language="myst-ansi"> + 'text1' , - <inline classes="pasted-inline"> - <image candidates="{'*': '_build/jupyter_execute/with_glue_5_0.png'}" uri="_build/jupyter_execute/with_glue_5_0.png"> - <CellOutputNode classes="cell_output"> - <raw classes="output text_html" format="html" xml:space="preserve"> - <div> - <style scoped> - .dataframe tbody tr th:only-of-type { - vertical-align: middle; - } - - .dataframe tbody tr th { - vertical-align: top; - } - - .dataframe thead th { - text-align: right; - } - </style> - <table border="1" class="dataframe"> - <thead> - <tr style="text-align: right;"> - <th></th> - <th>header</th> - </tr> - </thead> - <tbody> - <tr> - <th>0</th> - <td>1</td> - </tr> - <tr> - <th>1</th> - <td>2</td> - </tr> - <tr> - <th>2</th> - <td>3</td> - </tr> - </tbody> - </table> - </div> + <image candidates="{'*': '_build/jupyter_execute/8b394c6cdc09dc10c73e2d5f785aedc8eee615a4d219218f09d6732f7f8ef150.png'}" uri="_build/jupyter_execute/8b394c6cdc09dc10c73e2d5f785aedc8eee615a4d219218f09d6732f7f8ef150.png"> + <raw classes="output text_html" format="html" xml:space="preserve"> + <div> + <style scoped> + .dataframe tbody tr th:only-of-type { + vertical-align: middle; + } + + .dataframe tbody tr th { + vertical-align: top; + } + + .dataframe thead th { + text-align: right; + } + </style> + <table border="1" class="dataframe"> + <thead> + <tr style="text-align: right;"> + <th></th> + <th>header</th> + </tr> + </thead> + <tbody> + <tr> + <th>0</th> + <td>1</td> + </tr> + <tr> + <th>1</th> + <td>2</td> + </tr> + <tr> + <th>2</th> + <td>3</td> + </tr> + </tbody> + </table> + </div> <paragraph> and <inline classes="pasted-text"> @@ -132,28 +84,25 @@ and formatted <inline classes="pasted-text"> 3.14 - <CellOutputNode classes="cell_output"> - <image candidates="{'*': '_build/jupyter_execute/with_glue_5_0.png'}" uri="_build/jupyter_execute/with_glue_5_0.png"> + <image candidates="{'*': '_build/jupyter_execute/8b394c6cdc09dc10c73e2d5f785aedc8eee615a4d219218f09d6732f7f8ef150.png'}" uri="_build/jupyter_execute/8b394c6cdc09dc10c73e2d5f785aedc8eee615a4d219218f09d6732f7f8ef150.png"> <paragraph> and <inline classes="pasted-text"> undisplayed inline… <figure ids="abc" names="abc"> - <CellOutputNode classes="cell_output"> - <image candidates="{'*': '_build/jupyter_execute/with_glue_5_0.png'}" uri="_build/jupyter_execute/with_glue_5_0.png"> + <image candidates="{'*': '_build/jupyter_execute/8b394c6cdc09dc10c73e2d5f785aedc8eee615a4d219218f09d6732f7f8ef150.png'}" uri="_build/jupyter_execute/8b394c6cdc09dc10c73e2d5f785aedc8eee615a4d219218f09d6732f7f8ef150.png"> <caption> A caption…. ```## A test title - <inline classes="pasted-inline"> - <literal classes="output text_plain" language="none"> - 'text1' + <literal classes="output text_plain" language="myst-ansi"> + 'text1' <section ids="math" names="math"> <title> Math - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="8" cell_metadata="{}" classes="cell" exec_count="6" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> import sympy as sym f = sym.Function('f') @@ -161,9 +110,6 @@ n = sym.symbols(r'\alpha') f = y(n)-2*y(n-1/sym.pi)-5*y(n-2) glue("sym_eq", sym.rsolve(f,y(n),[1,4])) - <CellOutputNode classes="cell_output"> - <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> - \displaystyle \left(\sqrt{5} i\right)^{\alpha} \left(\frac{1}{2} - \frac{2 \sqrt{5} i}{5}\right) + \left(- \sqrt{5} i\right)^{\alpha} \left(\frac{1}{2} + \frac{2 \sqrt{5} i}{5}\right) <target refid="equation-eq-sym"> <math_block classes="pasted-math" docname="with_glue" ids="equation-eq-sym" label="eq-sym" nowrap="False" number="1" xml:space="preserve"> \displaystyle \left(\sqrt{5} i\right)^{\alpha} \left(\frac{1}{2} - \frac{2 \sqrt{5} i}{5}\right) + \left(- \sqrt{5} i\right)^{\alpha} \left(\frac{1}{2} + \frac{2 \sqrt{5} i}{5}\right) diff --git a/tests/test_render_outputs/test_complex_outputs.xml b/tests/test_render_outputs/test_complex_outputs.xml index 16c177bd..36f0df46 100644 --- a/tests/test_render_outputs/test_complex_outputs.xml +++ b/tests/test_render_outputs/test_complex_outputs.xml @@ -108,8 +108,7 @@ <literal_block language="ipython3" linenos="False" xml:space="preserve"> Image('example.jpg',height=400) <container classes="cell_output" nb_element="cell_code_output"> - <container mime_type="image/jpeg"> - <image candidates="{'*': '_build/jupyter_execute/a4c9580c74dacf6f3316a3bd2e2a347933aa4463834dcf1bb8f20b4fcb476ae1.jpg'}" uri="_build/jupyter_execute/a4c9580c74dacf6f3316a3bd2e2a347933aa4463834dcf1bb8f20b4fcb476ae1.jpg"> + <image candidates="{'*': '_build/jupyter_execute/a4c9580c74dacf6f3316a3bd2e2a347933aa4463834dcf1bb8f20b4fcb476ae1.jpg'}" uri="_build/jupyter_execute/a4c9580c74dacf6f3316a3bd2e2a347933aa4463834dcf1bb8f20b4fcb476ae1.jpg"> <section ids="displaying-a-plot-with-its-code" names="displaying\ a\ plot\ with\ its\ code"> <title> Displaying a plot with its code @@ -125,8 +124,7 @@ plt.ylabel(r'a y label with latex $\alpha$') plt.legend(); <container classes="cell_output" nb_element="cell_code_output"> - <container mime_type="image/png"> - <image candidates="{'*': '_build/jupyter_execute/16832f45917c1c9862c50f0948f64a498402d6ccde1f3a291da17f240797b160.png'}" uri="_build/jupyter_execute/16832f45917c1c9862c50f0948f64a498402d6ccde1f3a291da17f240797b160.png"> + <image candidates="{'*': '_build/jupyter_execute/16832f45917c1c9862c50f0948f64a498402d6ccde1f3a291da17f240797b160.png'}" uri="_build/jupyter_execute/16832f45917c1c9862c50f0948f64a498402d6ccde1f3a291da17f240797b160.png"> <section classes="tex2jax_ignore mathjax_ignore" ids="tables-with-pandas" names="tables\ (with\ pandas)"> <title> Tables (with pandas) @@ -141,57 +139,56 @@ df.set_index(['a','b']) df.round(3) <container classes="cell_output" nb_element="cell_code_output"> - <container mime_type="text/html"> - <raw classes="output text_html" format="html" xml:space="preserve"> - <div> - <style scoped> - .dataframe tbody tr th:only-of-type { - vertical-align: middle; - } - - .dataframe tbody tr th { - vertical-align: top; - } - - .dataframe thead th { - text-align: right; - } - </style> - <table border="1" class="dataframe"> - <thead> - <tr style="text-align: right;"> - <th></th> - <th>a</th> - <th>b</th> - <th>c</th> - <th>d</th> - </tr> - </thead> - <tbody> - <tr> - <th>0</th> - <td>$\delta$</td> - <td>l</td> - <td>0.391</td> - <td>0.607</td> - </tr> - <tr> - <th>1</th> - <td>x</td> - <td>m</td> - <td>0.132</td> - <td>0.205</td> - </tr> - <tr> - <th>2</th> - <td>y</td> - <td>n</td> - <td>0.969</td> - <td>0.726</td> - </tr> - </tbody> - </table> - </div> + <raw classes="output text_html" format="html" xml:space="preserve"> + <div> + <style scoped> + .dataframe tbody tr th:only-of-type { + vertical-align: middle; + } + + .dataframe tbody tr th { + vertical-align: top; + } + + .dataframe thead th { + text-align: right; + } + </style> + <table border="1" class="dataframe"> + <thead> + <tr style="text-align: right;"> + <th></th> + <th>a</th> + <th>b</th> + <th>c</th> + <th>d</th> + </tr> + </thead> + <tbody> + <tr> + <th>0</th> + <td>$\delta$</td> + <td>l</td> + <td>0.391</td> + <td>0.607</td> + </tr> + <tr> + <th>1</th> + <td>x</td> + <td>m</td> + <td>0.132</td> + <td>0.205</td> + </tr> + <tr> + <th>2</th> + <td>y</td> + <td>n</td> + <td>0.969</td> + <td>0.726</td> + </tr> + </tbody> + </table> + </div> <section classes="tex2jax_ignore mathjax_ignore" ids="equations-with-ipython-or-sympy" names="equations\ (with\ ipython\ or\ sympy)"> <title> Equations (with ipython or sympy) @@ -200,9 +197,8 @@ <literal_block language="ipython3" linenos="False" xml:space="preserve"> Latex('$$ a = b+c $$') <container classes="cell_output" nb_element="cell_code_output"> - <container mime_type="text/latex"> - <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> - a = b+c + <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> + a = b+c <paragraph> The plotting code for a sympy equation (=@eqn:example_sympy). <container cell_index="24" cell_metadata="{'ipub': {'code': {'asfloat': True, 'caption': '', 'label': 'code:example_sym', 'placement': 'H', 'widefigure': False}, 'equation': {'environment': 'equation', 'label': 'eqn:example_sympy'}}}" classes="cell" exec_count="7" nb_element="cell_code"> @@ -213,17 +209,15 @@ f = y(n)-2*y(n-1/sym.pi)-5*y(n-2) sym.rsolve(f,y(n),[1,4]) <container classes="cell_output" nb_element="cell_code_output"> - <container mime_type="image/png"> - <image candidates="{'*': '_build/jupyter_execute/8c43e5c8cccf697754876b7fec1b0a9b731d7900bb585e775a5fa326b4de8c5a.png'}" uri="_build/jupyter_execute/8c43e5c8cccf697754876b7fec1b0a9b731d7900bb585e775a5fa326b4de8c5a.png"> + <image candidates="{'*': '_build/jupyter_execute/8c43e5c8cccf697754876b7fec1b0a9b731d7900bb585e775a5fa326b4de8c5a.png'}" uri="_build/jupyter_execute/8c43e5c8cccf697754876b7fec1b0a9b731d7900bb585e775a5fa326b4de8c5a.png"> <container cell_index="25" cell_metadata="{}" classes="cell" exec_count="7" nb_element="cell_code"> <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> from IPython.display import display, Markdown display(Markdown('**_some_ markdown**')) <container classes="cell_output" nb_element="cell_code_output"> - <container mime_type="text/markdown"> - <paragraph> - <strong> - <emphasis> - some - markdown + <paragraph> + <strong> + <emphasis> + some + markdown diff --git a/tests/test_render_outputs/test_complex_outputs_latex.xml b/tests/test_render_outputs/test_complex_outputs_latex.xml index 28d2175a..50c6d24b 100644 --- a/tests/test_render_outputs/test_complex_outputs_latex.xml +++ b/tests/test_render_outputs/test_complex_outputs_latex.xml @@ -108,8 +108,7 @@ <literal_block language="ipython3" linenos="False" xml:space="preserve"> Image('example.jpg',height=400) <container classes="cell_output" nb_element="cell_code_output"> - <container mime_type="image/jpeg"> - <image candidates="{'*': '_build/jupyter_execute/a4c9580c74dacf6f3316a3bd2e2a347933aa4463834dcf1bb8f20b4fcb476ae1.jpg'}" uri="_build/jupyter_execute/a4c9580c74dacf6f3316a3bd2e2a347933aa4463834dcf1bb8f20b4fcb476ae1.jpg"> + <image candidates="{'*': '_build/jupyter_execute/a4c9580c74dacf6f3316a3bd2e2a347933aa4463834dcf1bb8f20b4fcb476ae1.jpg'}" uri="_build/jupyter_execute/a4c9580c74dacf6f3316a3bd2e2a347933aa4463834dcf1bb8f20b4fcb476ae1.jpg"> <section ids="displaying-a-plot-with-its-code" names="displaying\ a\ plot\ with\ its\ code"> <title> Displaying a plot with its code @@ -125,8 +124,7 @@ plt.ylabel(r'a y label with latex $\alpha$') plt.legend(); <container classes="cell_output" nb_element="cell_code_output"> - <container mime_type="image/png"> - <image candidates="{'*': '_build/jupyter_execute/16832f45917c1c9862c50f0948f64a498402d6ccde1f3a291da17f240797b160.png'}" uri="_build/jupyter_execute/16832f45917c1c9862c50f0948f64a498402d6ccde1f3a291da17f240797b160.png"> + <image candidates="{'*': '_build/jupyter_execute/16832f45917c1c9862c50f0948f64a498402d6ccde1f3a291da17f240797b160.png'}" uri="_build/jupyter_execute/16832f45917c1c9862c50f0948f64a498402d6ccde1f3a291da17f240797b160.png"> <section classes="tex2jax_ignore mathjax_ignore" ids="tables-with-pandas" names="tables\ (with\ pandas)"> <title> Tables (with pandas) @@ -141,17 +139,16 @@ df.set_index(['a','b']) df.round(3) <container classes="cell_output" nb_element="cell_code_output"> - <container mime_type="text/latex"> - <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> - \begin{tabular}{lllrr} - \toprule - {} & a & b & c & d \\ - \midrule - 0 & \$\textbackslash delta\$ & l & 0.391 & 0.607 \\ - 1 & x & m & 0.132 & 0.205 \\ - 2 & y & n & 0.969 & 0.726 \\ - \bottomrule - \end{tabular} + <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> + \begin{tabular}{lllrr} + \toprule + {} & a & b & c & d \\ + \midrule + 0 & \$\textbackslash delta\$ & l & 0.391 & 0.607 \\ + 1 & x & m & 0.132 & 0.205 \\ + 2 & y & n & 0.969 & 0.726 \\ + \bottomrule + \end{tabular} <section classes="tex2jax_ignore mathjax_ignore" ids="equations-with-ipython-or-sympy" names="equations\ (with\ ipython\ or\ sympy)"> <title> Equations (with ipython or sympy) @@ -160,9 +157,8 @@ <literal_block language="ipython3" linenos="False" xml:space="preserve"> Latex('$$ a = b+c $$') <container classes="cell_output" nb_element="cell_code_output"> - <container mime_type="text/latex"> - <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> - a = b+c + <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> + a = b+c <paragraph> The plotting code for a sympy equation (=@eqn:example_sympy). <container cell_index="24" cell_metadata="{'ipub': {'code': {'asfloat': True, 'caption': '', 'label': 'code:example_sym', 'placement': 'H', 'widefigure': False}, 'equation': {'environment': 'equation', 'label': 'eqn:example_sympy'}}}" classes="cell" exec_count="7" nb_element="cell_code"> @@ -173,17 +169,15 @@ f = y(n)-2*y(n-1/sym.pi)-5*y(n-2) sym.rsolve(f,y(n),[1,4]) <container classes="cell_output" nb_element="cell_code_output"> - <container mime_type="image/png"> - <image candidates="{'*': '_build/jupyter_execute/8c43e5c8cccf697754876b7fec1b0a9b731d7900bb585e775a5fa326b4de8c5a.png'}" uri="_build/jupyter_execute/8c43e5c8cccf697754876b7fec1b0a9b731d7900bb585e775a5fa326b4de8c5a.png"> + <image candidates="{'*': '_build/jupyter_execute/8c43e5c8cccf697754876b7fec1b0a9b731d7900bb585e775a5fa326b4de8c5a.png'}" uri="_build/jupyter_execute/8c43e5c8cccf697754876b7fec1b0a9b731d7900bb585e775a5fa326b4de8c5a.png"> <container cell_index="25" cell_metadata="{}" classes="cell" exec_count="7" nb_element="cell_code"> <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> from IPython.display import display, Markdown display(Markdown('**_some_ markdown**')) <container classes="cell_output" nb_element="cell_code_output"> - <container mime_type="text/markdown"> - <paragraph> - <strong> - <emphasis> - some - markdown + <paragraph> + <strong> + <emphasis> + some + markdown diff --git a/tests/test_render_outputs/test_merge_streams.xml b/tests/test_render_outputs/test_merge_streams.xml index 75499729..88cc4251 100644 --- a/tests/test_render_outputs/test_merge_streams.xml +++ b/tests/test_render_outputs/test_merge_streams.xml @@ -19,6 +19,5 @@ stderr1 stderr2 stderr3 - <container mime_type="text/plain"> - <literal_block classes="output text_plain" language="myst-ansi" linenos="False" xml:space="preserve"> - 1 + <literal_block classes="output text_plain" language="myst-ansi" linenos="False" xml:space="preserve"> + 1 diff --git a/tests/test_render_outputs/test_metadata_figure.xml b/tests/test_render_outputs/test_metadata_figure.xml index f78f18d4..15a4691f 100644 --- a/tests/test_render_outputs/test_metadata_figure.xml +++ b/tests/test_render_outputs/test_metadata_figure.xml @@ -9,8 +9,7 @@ Image("fun-fish.png") <container classes="cell_output" nb_element="cell_code_output"> <figure ids="fun-fish" names="fun-fish"> - <container mime_type="image/png"> - <image candidates="{'*': '_build/jupyter_execute/3eacaf6adad1a4305807616181bbee897bb29177e79e2092ddd0264b848ddb4e.png'}" uri="_build/jupyter_execute/3eacaf6adad1a4305807616181bbee897bb29177e79e2092ddd0264b848ddb4e.png"> + <image candidates="{'*': '_build/jupyter_execute/3eacaf6adad1a4305807616181bbee897bb29177e79e2092ddd0264b848ddb4e.png'}" uri="_build/jupyter_execute/3eacaf6adad1a4305807616181bbee897bb29177e79e2092ddd0264b848ddb4e.png"> <caption> Hey everyone its <strong> diff --git a/tests/test_render_outputs/test_metadata_image.xml b/tests/test_render_outputs/test_metadata_image.xml index d2b9b167..c43bbce1 100644 --- a/tests/test_render_outputs/test_metadata_image.xml +++ b/tests/test_render_outputs/test_metadata_image.xml @@ -8,5 +8,4 @@ from IPython.display import Image Image("fun-fish.png") <container classes="cell_output" nb_element="cell_code_output"> - <container mime_type="image/png"> - <image alt="fun-fish" candidates="{'*': '_build/jupyter_execute/3eacaf6adad1a4305807616181bbee897bb29177e79e2092ddd0264b848ddb4e.png'}" classes="shadow bg-primary" uri="_build/jupyter_execute/3eacaf6adad1a4305807616181bbee897bb29177e79e2092ddd0264b848ddb4e.png" width="300px"> + <image alt="fun-fish" candidates="{'*': '_build/jupyter_execute/3eacaf6adad1a4305807616181bbee897bb29177e79e2092ddd0264b848ddb4e.png'}" classes="shadow bg-primary" uri="_build/jupyter_execute/3eacaf6adad1a4305807616181bbee897bb29177e79e2092ddd0264b848ddb4e.png" width="300px"> From 1e54d1ee3834e1c9ddec857f59975bc2be69d17a Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Sat, 8 Jan 2022 03:05:19 +0100 Subject: [PATCH 44/75] Update test for sphinx 3 --- tests/test_glue/test_parser.sphinx3.xml | 172 ++++++------------ .../test_metadata_figure.xml | 2 +- 2 files changed, 60 insertions(+), 114 deletions(-) diff --git a/tests/test_glue/test_parser.sphinx3.xml b/tests/test_glue/test_parser.sphinx3.xml index af80e267..0166a8b2 100644 --- a/tests/test_glue/test_parser.sphinx3.xml +++ b/tests/test_glue/test_parser.sphinx3.xml @@ -2,127 +2,79 @@ <section classes="tex2jax_ignore mathjax_ignore" ids="glue-tests" names="glue\ tests"> <title> Glue Tests - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="1" cell_metadata="{}" classes="cell" exec_count="1" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> from myst_nb import glue - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="2" cell_metadata="{}" classes="cell" exec_count="2" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> glue("key_text1", "text1") glue("key_float", 3.14159) - <CellOutputNode classes="cell_output"> - <literal_block classes="output text_plain" language="myst-ansi" linenos="False" xml:space="preserve"> - 'text1' - <literal_block classes="output text_plain" language="myst-ansi" linenos="False" xml:space="preserve"> - 3.14159 - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="3" cell_metadata="{}" classes="cell" exec_count="3" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> glue("key_undisplayed", "undisplayed", display=False) - <CellOutputNode classes="cell_output"> - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="4" cell_metadata="{'scrolled': True}" classes="cell" exec_count="4" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> import pandas as pd df = pd.DataFrame({"header": [1, 2, 3]}) glue("key_df", df) - <CellOutputNode classes="cell_output"> - <raw classes="output text_html" format="html" xml:space="preserve"> - <div> - <style scoped> - .dataframe tbody tr th:only-of-type { - vertical-align: middle; - } - - .dataframe tbody tr th { - vertical-align: top; - } - - .dataframe thead th { - text-align: right; - } - </style> - <table border="1" class="dataframe"> - <thead> - <tr style="text-align: right;"> - <th></th> - <th>header</th> - </tr> - </thead> - <tbody> - <tr> - <th>0</th> - <td>1</td> - </tr> - <tr> - <th>1</th> - <td>2</td> - </tr> - <tr> - <th>2</th> - <td>3</td> - </tr> - </tbody> - </table> - </div> - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="5" cell_metadata="{}" classes="cell" exec_count="5" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> import matplotlib.pyplot as plt plt.plot([1, 2, 3]) glue("key_plt", plt.gcf(), display=False) - <CellOutputNode classes="cell_output"> - <image candidates="{'*': '_build/jupyter_execute/with_glue_5_1.png'}" uri="_build/jupyter_execute/with_glue_5_1.png"> + <container classes="cell_output" nb_element="cell_code_output"> + <image candidates="{'*': '_build/jupyter_execute/8b394c6cdc09dc10c73e2d5f785aedc8eee615a4d219218f09d6732f7f8ef150.png'}" uri="_build/jupyter_execute/8b394c6cdc09dc10c73e2d5f785aedc8eee615a4d219218f09d6732f7f8ef150.png"> <section ids="referencing-the-figs" names="referencing\ the\ figs"> <title> Referencing the figs <paragraph> - <inline classes="pasted-inline"> - <literal classes="output text_plain" language="none"> - 'text1' + <literal classes="output text_plain" language="myst-ansi"> + 'text1' , - <inline classes="pasted-inline"> - <image candidates="{'*': '_build/jupyter_execute/with_glue_5_0.png'}" uri="_build/jupyter_execute/with_glue_5_0.png"> - <CellOutputNode classes="cell_output"> - <raw classes="output text_html" format="html" xml:space="preserve"> - <div> - <style scoped> - .dataframe tbody tr th:only-of-type { - vertical-align: middle; - } - - .dataframe tbody tr th { - vertical-align: top; - } - - .dataframe thead th { - text-align: right; - } - </style> - <table border="1" class="dataframe"> - <thead> - <tr style="text-align: right;"> - <th></th> - <th>header</th> - </tr> - </thead> - <tbody> - <tr> - <th>0</th> - <td>1</td> - </tr> - <tr> - <th>1</th> - <td>2</td> - </tr> - <tr> - <th>2</th> - <td>3</td> - </tr> - </tbody> - </table> - </div> + <image candidates="{'*': '_build/jupyter_execute/8b394c6cdc09dc10c73e2d5f785aedc8eee615a4d219218f09d6732f7f8ef150.png'}" uri="_build/jupyter_execute/8b394c6cdc09dc10c73e2d5f785aedc8eee615a4d219218f09d6732f7f8ef150.png"> + <raw classes="output text_html" format="html" xml:space="preserve"> + <div> + <style scoped> + .dataframe tbody tr th:only-of-type { + vertical-align: middle; + } + + .dataframe tbody tr th { + vertical-align: top; + } + + .dataframe thead th { + text-align: right; + } + </style> + <table border="1" class="dataframe"> + <thead> + <tr style="text-align: right;"> + <th></th> + <th>header</th> + </tr> + </thead> + <tbody> + <tr> + <th>0</th> + <td>1</td> + </tr> + <tr> + <th>1</th> + <td>2</td> + </tr> + <tr> + <th>2</th> + <td>3</td> + </tr> + </tbody> + </table> + </div> <paragraph> and <inline classes="pasted-text"> @@ -132,28 +84,25 @@ and formatted <inline classes="pasted-text"> 3.14 - <CellOutputNode classes="cell_output"> - <image candidates="{'*': '_build/jupyter_execute/with_glue_5_0.png'}" uri="_build/jupyter_execute/with_glue_5_0.png"> + <image candidates="{'*': '_build/jupyter_execute/8b394c6cdc09dc10c73e2d5f785aedc8eee615a4d219218f09d6732f7f8ef150.png'}" uri="_build/jupyter_execute/8b394c6cdc09dc10c73e2d5f785aedc8eee615a4d219218f09d6732f7f8ef150.png"> <paragraph> and <inline classes="pasted-text"> undisplayed inline… <figure align="default" ids="abc" names="abc"> - <CellOutputNode classes="cell_output"> - <image candidates="{'*': '_build/jupyter_execute/with_glue_5_0.png'}" uri="_build/jupyter_execute/with_glue_5_0.png"> + <image candidates="{'*': '_build/jupyter_execute/8b394c6cdc09dc10c73e2d5f785aedc8eee615a4d219218f09d6732f7f8ef150.png'}" uri="_build/jupyter_execute/8b394c6cdc09dc10c73e2d5f785aedc8eee615a4d219218f09d6732f7f8ef150.png"> <caption> A caption…. ```## A test title - <inline classes="pasted-inline"> - <literal classes="output text_plain" language="none"> - 'text1' + <literal classes="output text_plain" language="myst-ansi"> + 'text1' <section ids="math" names="math"> <title> Math - <CellNode cell_type="code" classes="cell"> - <CellInputNode classes="cell_input"> + <container cell_index="8" cell_metadata="{}" classes="cell" exec_count="6" nb_element="cell_code"> + <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> import sympy as sym f = sym.Function('f') @@ -161,9 +110,6 @@ n = sym.symbols(r'\alpha') f = y(n)-2*y(n-1/sym.pi)-5*y(n-2) glue("sym_eq", sym.rsolve(f,y(n),[1,4])) - <CellOutputNode classes="cell_output"> - <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> - \displaystyle \left(\sqrt{5} i\right)^{\alpha} \left(\frac{1}{2} - \frac{2 \sqrt{5} i}{5}\right) + \left(- \sqrt{5} i\right)^{\alpha} \left(\frac{1}{2} + \frac{2 \sqrt{5} i}{5}\right) <target refid="equation-eq-sym"> <math_block classes="pasted-math" docname="with_glue" ids="equation-eq-sym" label="eq-sym" nowrap="False" number="1" xml:space="preserve"> \displaystyle \left(\sqrt{5} i\right)^{\alpha} \left(\frac{1}{2} - \frac{2 \sqrt{5} i}{5}\right) + \left(- \sqrt{5} i\right)^{\alpha} \left(\frac{1}{2} + \frac{2 \sqrt{5} i}{5}\right) diff --git a/tests/test_render_outputs/test_metadata_figure.xml b/tests/test_render_outputs/test_metadata_figure.xml index 15a4691f..852087bd 100644 --- a/tests/test_render_outputs/test_metadata_figure.xml +++ b/tests/test_render_outputs/test_metadata_figure.xml @@ -8,7 +8,7 @@ from IPython.display import Image Image("fun-fish.png") <container classes="cell_output" nb_element="cell_code_output"> - <figure ids="fun-fish" names="fun-fish"> + <figure align="default" ids="fun-fish" names="fun-fish"> <image candidates="{'*': '_build/jupyter_execute/3eacaf6adad1a4305807616181bbee897bb29177e79e2092ddd0264b848ddb4e.png'}" uri="_build/jupyter_execute/3eacaf6adad1a4305807616181bbee897bb29177e79e2092ddd0264b848ddb4e.png"> <caption> Hey everyone its From b1bfa59c896e6ab0349e47194cf7ac6afd4db0af Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Sat, 8 Jan 2022 03:17:52 +0100 Subject: [PATCH 45/75] Test fixes --- .github/workflows/tests.yml | 2 +- myst_nb/execute.py | 5 ++++- setup.cfg | 2 +- tests/test_execute.py | 10 ++++++++-- 4 files changed, 14 insertions(+), 5 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index b34018a1..2040c95f 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -26,7 +26,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - python-version: [3.6, 3.7, 3.8, 3.9] + python-version: [3.7, 3.8, 3.9] sphinx: [">=3,<4", ">=4,<5"] include: - os: windows-latest diff --git a/myst_nb/execute.py b/myst_nb/execute.py index eaf991b2..a3f454b0 100644 --- a/myst_nb/execute.py +++ b/myst_nb/execute.py @@ -56,7 +56,10 @@ def execute_notebook( # path should only be None when using docutils programmatically, # e.g. source="<string>" - path = Path(source) if Path(source).is_file() else None + try: + path = Path(source) if Path(source).is_file() else None + except OSError: + path = None # occurs on Windows for `source="<string>"` exec_metadata: Optional[ExecutionResult] = None diff --git a/setup.cfg b/setup.cfg index d0a5b49c..3df8274f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -48,7 +48,7 @@ install_requires = sphinx>=3.5,<5 sphinx-togglebutton~=0.2.2 typing-extensions -python_requires = >=3.6 +python_requires = >=3.7 include_package_data = True zip_safe = True diff --git a/tests/test_execute.py b/tests/test_execute.py index 7ab56aea..46948a68 100644 --- a/tests/test_execute.py +++ b/tests/test_execute.py @@ -10,8 +10,14 @@ def regress_nb_doc(file_regression, sphinx_run, check_nbs): sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb", encoding="utf8" ) finally: - doctree = sphinx_run.get_doctree() - file_regression.check(doctree.pformat(), extension=".xml", encoding="utf8") + doctree_string = sphinx_run.get_doctree().pformat() + # TODO this is a difference in the hashing on the CI, + # with complex_outputs_unrun.ipynb equation PNG, after execution + doctree_string = doctree_string.replace( + "438c56ea3dcf99d86cd64df1b23e2b436afb25846434efb1cfec7b660ef01127", + "e2dfbe330154316cfb6f3186e8f57fc4df8aee03b0303ed1345fc22cd51f66de", + ) + file_regression.check(doctree_string, extension=".xml", encoding="utf8") @pytest.mark.sphinx_params("basic_unrun.ipynb", conf={"nb_execution_mode": "auto"}) From ed688512e4fd74350fd517e41688c85711a20852 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Sat, 8 Jan 2022 03:27:41 +0100 Subject: [PATCH 46/75] test fix --- tests/test_render_outputs.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/test_render_outputs.py b/tests/test_render_outputs.py index f9b83df6..41c759c2 100644 --- a/tests/test_render_outputs.py +++ b/tests/test_render_outputs.py @@ -109,8 +109,14 @@ def test_metadata_figure(sphinx_run, clean_doctree, file_regression): sphinx_run.build() assert sphinx_run.warnings() == "" doctree = clean_doctree(sphinx_run.get_resolved_doctree("metadata_figure")) + doctree_string = doctree.pformat() + # change, presumably with new docutils version + doctree_string = doctree_string.replace( + '<figure ids="fun-fish" names="fun-fish">', + '<figure align="default" ids="fun-fish" names="fun-fish">', + ) file_regression.check( - doctree.pformat().replace(".jpeg", ".jpg"), extension=".xml", encoding="utf8" + doctree_string.replace(".jpeg", ".jpg"), extension=".xml", encoding="utf8" ) From 766ee43528be31b57469215405b9c13d9533cb0d Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Sat, 8 Jan 2022 04:58:05 +0100 Subject: [PATCH 47/75] Output displayed glue variables --- myst_nb/configuration.py | 19 +++++----- myst_nb/nb_glue/__init__.py | 3 ++ tests/test_glue/test_parser.sphinx3.xml | 47 +++++++++++++++++++++++++ tests/test_glue/test_parser.sphinx4.xml | 47 +++++++++++++++++++++++++ 4 files changed, 108 insertions(+), 8 deletions(-) diff --git a/myst_nb/configuration.py b/myst_nb/configuration.py index 80a66802..e082aff2 100644 --- a/myst_nb/configuration.py +++ b/myst_nb/configuration.py @@ -265,6 +265,17 @@ class NbParserConfig: }, ) + # pre-processing options + + merge_streams: bool = attr.ib( + default=False, + validator=instance_of(bool), + metadata={ + "help": "Merge stdout/stderr execution output streams", + "cell_metadata": True, + }, + ) + # render options output_folder: str = attr.ib( @@ -306,14 +317,6 @@ class NbParserConfig: validator=instance_of(bool), metadata={"help": "Number code cell source lines", "cell_metadata": True}, ) - merge_streams: bool = attr.ib( - default=False, - validator=instance_of(bool), - metadata={ - "help": "Merge stdout/stderr execution output streams", - "cell_metadata": True, - }, - ) output_stderr: Literal[ "show", "remove", "remove-warn", "warn", "error", "severe" ] = attr.ib( diff --git a/myst_nb/nb_glue/__init__.py b/myst_nb/nb_glue/__init__.py index 159115b2..e15eda30 100644 --- a/myst_nb/nb_glue/__init__.py +++ b/myst_nb/nb_glue/__init__.py @@ -62,4 +62,7 @@ def extract_glue_data( ) output["data"] = {k[mime_prefix:]: v for k, v in output["data"].items()} data[key] = output + if not mime_prefix: + # assume that the output is a displayable object + outputs.append(output) cell.outputs = outputs diff --git a/tests/test_glue/test_parser.sphinx3.xml b/tests/test_glue/test_parser.sphinx3.xml index 0166a8b2..c920202a 100644 --- a/tests/test_glue/test_parser.sphinx3.xml +++ b/tests/test_glue/test_parser.sphinx3.xml @@ -11,6 +11,11 @@ <literal_block language="ipython3" linenos="False" xml:space="preserve"> glue("key_text1", "text1") glue("key_float", 3.14159) + <container classes="cell_output" nb_element="cell_code_output"> + <literal_block classes="output text_plain" language="myst-ansi" linenos="False" xml:space="preserve"> + 'text1' + <literal_block classes="output text_plain" language="myst-ansi" linenos="False" xml:space="preserve"> + 3.14159 <container cell_index="3" cell_metadata="{}" classes="cell" exec_count="3" nb_element="cell_code"> <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> @@ -21,6 +26,45 @@ import pandas as pd df = pd.DataFrame({"header": [1, 2, 3]}) glue("key_df", df) + <container classes="cell_output" nb_element="cell_code_output"> + <raw classes="output text_html" format="html" xml:space="preserve"> + <div> + <style scoped> + .dataframe tbody tr th:only-of-type { + vertical-align: middle; + } + + .dataframe tbody tr th { + vertical-align: top; + } + + .dataframe thead th { + text-align: right; + } + </style> + <table border="1" class="dataframe"> + <thead> + <tr style="text-align: right;"> + <th></th> + <th>header</th> + </tr> + </thead> + <tbody> + <tr> + <th>0</th> + <td>1</td> + </tr> + <tr> + <th>1</th> + <td>2</td> + </tr> + <tr> + <th>2</th> + <td>3</td> + </tr> + </tbody> + </table> + </div> <container cell_index="5" cell_metadata="{}" classes="cell" exec_count="5" nb_element="cell_code"> <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> @@ -110,6 +154,9 @@ n = sym.symbols(r'\alpha') f = y(n)-2*y(n-1/sym.pi)-5*y(n-2) glue("sym_eq", sym.rsolve(f,y(n),[1,4])) + <container classes="cell_output" nb_element="cell_code_output"> + <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> + \displaystyle \left(\sqrt{5} i\right)^{\alpha} \left(\frac{1}{2} - \frac{2 \sqrt{5} i}{5}\right) + \left(- \sqrt{5} i\right)^{\alpha} \left(\frac{1}{2} + \frac{2 \sqrt{5} i}{5}\right) <target refid="equation-eq-sym"> <math_block classes="pasted-math" docname="with_glue" ids="equation-eq-sym" label="eq-sym" nowrap="False" number="1" xml:space="preserve"> \displaystyle \left(\sqrt{5} i\right)^{\alpha} \left(\frac{1}{2} - \frac{2 \sqrt{5} i}{5}\right) + \left(- \sqrt{5} i\right)^{\alpha} \left(\frac{1}{2} + \frac{2 \sqrt{5} i}{5}\right) diff --git a/tests/test_glue/test_parser.sphinx4.xml b/tests/test_glue/test_parser.sphinx4.xml index 608568a8..1f994071 100644 --- a/tests/test_glue/test_parser.sphinx4.xml +++ b/tests/test_glue/test_parser.sphinx4.xml @@ -11,6 +11,11 @@ <literal_block language="ipython3" linenos="False" xml:space="preserve"> glue("key_text1", "text1") glue("key_float", 3.14159) + <container classes="cell_output" nb_element="cell_code_output"> + <literal_block classes="output text_plain" language="myst-ansi" linenos="False" xml:space="preserve"> + 'text1' + <literal_block classes="output text_plain" language="myst-ansi" linenos="False" xml:space="preserve"> + 3.14159 <container cell_index="3" cell_metadata="{}" classes="cell" exec_count="3" nb_element="cell_code"> <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> @@ -21,6 +26,45 @@ import pandas as pd df = pd.DataFrame({"header": [1, 2, 3]}) glue("key_df", df) + <container classes="cell_output" nb_element="cell_code_output"> + <raw classes="output text_html" format="html" xml:space="preserve"> + <div> + <style scoped> + .dataframe tbody tr th:only-of-type { + vertical-align: middle; + } + + .dataframe tbody tr th { + vertical-align: top; + } + + .dataframe thead th { + text-align: right; + } + </style> + <table border="1" class="dataframe"> + <thead> + <tr style="text-align: right;"> + <th></th> + <th>header</th> + </tr> + </thead> + <tbody> + <tr> + <th>0</th> + <td>1</td> + </tr> + <tr> + <th>1</th> + <td>2</td> + </tr> + <tr> + <th>2</th> + <td>3</td> + </tr> + </tbody> + </table> + </div> <container cell_index="5" cell_metadata="{}" classes="cell" exec_count="5" nb_element="cell_code"> <container classes="cell_input" nb_element="cell_code_source"> <literal_block language="ipython3" linenos="False" xml:space="preserve"> @@ -110,6 +154,9 @@ n = sym.symbols(r'\alpha') f = y(n)-2*y(n-1/sym.pi)-5*y(n-2) glue("sym_eq", sym.rsolve(f,y(n),[1,4])) + <container classes="cell_output" nb_element="cell_code_output"> + <math_block classes="output text_latex" nowrap="False" number="True" xml:space="preserve"> + \displaystyle \left(\sqrt{5} i\right)^{\alpha} \left(\frac{1}{2} - \frac{2 \sqrt{5} i}{5}\right) + \left(- \sqrt{5} i\right)^{\alpha} \left(\frac{1}{2} + \frac{2 \sqrt{5} i}{5}\right) <target refid="equation-eq-sym"> <math_block classes="pasted-math" docname="with_glue" ids="equation-eq-sym" label="eq-sym" nowrap="False" number="1" xml:space="preserve"> \displaystyle \left(\sqrt{5} i\right)^{\alpha} \left(\frac{1}{2} - \frac{2 \sqrt{5} i}{5}\right) + \left(- \sqrt{5} i\right)^{\alpha} \left(\frac{1}{2} + \frac{2 \sqrt{5} i}{5}\right) From c3dc375fb207c4194af6583e0c758b1d3e2b0130 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Sat, 8 Jan 2022 05:05:15 +0100 Subject: [PATCH 48/75] fix docs --- docs/use/glue.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/use/glue.md b/docs/use/glue.md index 73cafd94..ae480c5c 100644 --- a/docs/use/glue.md +++ b/docs/use/glue.md @@ -326,7 +326,7 @@ from IPython.display import Markdown glue("inline_md", Markdown( "inline **markdown** with a [link](glue/main), " "and a nested glue value: {glue:}`boot_mean`" -)) +), display=False) glue("block_md", Markdown(""" #### A heading @@ -336,7 +336,7 @@ Then some text, and anything nested. print("Hello world!") ``` """ -)) +), display=False) ```` Now, when we glue, the Markdown will be evaluated as block/inline MyST Markdown, as if it was part of the original document. From 19e3204d9f7c2bc8ca17db7bda6fc72f8760b997 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Sat, 8 Jan 2022 05:58:01 +0100 Subject: [PATCH 49/75] fix sphinx_book_theme integration --- docs/conf.py | 11 +++++++++-- myst_nb/sphinx_.py | 8 +++++--- tests/test_codecell_file.py | 18 ++++++++++-------- tests/test_parser.py | 10 ++++------ tests/test_text_based.py | 13 +++++++++---- 5 files changed, 37 insertions(+), 23 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 54ddbc06..8f026389 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -19,6 +19,7 @@ extensions = [ "myst_nb", "sphinx_copybutton", + "sphinx_book_theme", "sphinx.ext.intersphinx", "sphinx.ext.autodoc", "sphinx.ext.viewcode", @@ -92,9 +93,15 @@ "github_url": "https://github.com/executablebooks/myst-nb", "repository_url": "https://github.com/executablebooks/myst-nb", "repository_branch": "master", - "use_edit_page_button": True, - "path_to_docs": "docs/", + "path_to_docs": "docs", "show_navbar_depth": 2, + "use_edit_page_button": True, + "use_repository_button": True, + "use_download_button": True, + "launch_buttons": { + "binderhub_url": "https://mybinder.org", + "notebook_interface": "classic", + }, } # Add any paths that contain custom static files (such as style sheets) here, diff --git a/myst_nb/sphinx_.py b/myst_nb/sphinx_.py index 516fbe72..6e34443b 100644 --- a/myst_nb/sphinx_.py +++ b/myst_nb/sphinx_.py @@ -380,9 +380,11 @@ def render_nb_metadata(self, token: SyntaxTreeNode) -> None: # save these special keys on the metadata, rather than as docinfo for key in ("kernelspec", "language_info"): - NbMetadataCollector.set_doc_data( - env, env.docname, key, metadata.pop(key, None) - ) + # TODO sphinx_book_theme checks kernelspec in `_is_notebook` check + # NbMetadataCollector.set_doc_data( + # env, env.docname, key, metadata.pop(key, None) + # ) + env.metadata[env.docname][key] = metadata.pop(key, None) # TODO should we provide hook for NbElementRenderer? # Also add method to NbElementRenderer, to store scripts to load diff --git a/tests/test_codecell_file.py b/tests/test_codecell_file.py index e65932e4..508b703e 100644 --- a/tests/test_codecell_file.py +++ b/tests/test_codecell_file.py @@ -17,14 +17,14 @@ def test_codecell_file(sphinx_run, file_regression, check_nbs, get_test_path): "author", "source_map", "wordcount", + "kernelspec", + "language_info", } assert set(sphinx_run.env.nb_metadata["mystnb_codecell_file"].keys()) == { "exec_data", - "kernelspec", - "language_info", } assert sphinx_run.env.metadata["mystnb_codecell_file"]["author"] == "Matt" - assert sphinx_run.env.nb_metadata["mystnb_codecell_file"]["kernelspec"] == { + assert sphinx_run.env.metadata["mystnb_codecell_file"]["kernelspec"] == { "display_name": "Python 3", "language": "python", "name": "python3", @@ -57,18 +57,20 @@ def test_codecell_file_warnings(sphinx_run, file_regression, check_nbs, get_test "author", "source_map", "wordcount", + "kernelspec", + "language_info", } assert set(sphinx_run.env.nb_metadata["mystnb_codecell_file_warnings"].keys()) == { "exec_data", - "kernelspec", - "language_info", } assert ( sphinx_run.env.metadata["mystnb_codecell_file_warnings"]["author"] == "Aakash" ) - assert sphinx_run.env.nb_metadata["mystnb_codecell_file_warnings"][ - "kernelspec" - ] == {"display_name": "Python 3", "language": "python", "name": "python3"} + assert sphinx_run.env.metadata["mystnb_codecell_file_warnings"]["kernelspec"] == { + "display_name": "Python 3", + "language": "python", + "name": "python3", + } try: file_regression.check( sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb", encoding="utf8" diff --git a/tests/test_parser.py b/tests/test_parser.py index 7da379ff..b8bf89d9 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -10,13 +10,12 @@ def test_basic_run(sphinx_run, file_regression): assert set(sphinx_run.env.metadata["basic_run"].keys()) == { "test_name", "wordcount", - } - assert set(sphinx_run.env.nb_metadata["basic_run"].keys()) == { "kernelspec", "language_info", } + assert set(sphinx_run.env.nb_metadata["basic_run"].keys()) == set() assert sphinx_run.env.metadata["basic_run"]["test_name"] == "notebook1" - assert sphinx_run.env.nb_metadata["basic_run"]["kernelspec"] == { + assert sphinx_run.env.metadata["basic_run"]["kernelspec"] == { "display_name": "Python 3", "language": "python", "name": "python3", @@ -46,14 +45,13 @@ def test_complex_outputs(sphinx_run, file_regression): "toc", "varInspector", "wordcount", - } - assert set(sphinx_run.env.nb_metadata["complex_outputs"].keys()) == { "kernelspec", "language_info", } + assert set(sphinx_run.env.nb_metadata["complex_outputs"].keys()) == set() assert sphinx_run.env.metadata["complex_outputs"]["celltoolbar"] == "Edit Metadata" assert sphinx_run.env.metadata["complex_outputs"]["hide_input"] == "False" - assert sphinx_run.env.nb_metadata["complex_outputs"]["kernelspec"] == { + assert sphinx_run.env.metadata["complex_outputs"]["kernelspec"] == { "display_name": "Python 3", "language": "python", "name": "python3", diff --git a/tests/test_text_based.py b/tests/test_text_based.py index 2d598944..28cad54f 100644 --- a/tests/test_text_based.py +++ b/tests/test_text_based.py @@ -14,14 +14,14 @@ def test_basic_run(sphinx_run, file_regression, check_nbs): "author", "source_map", "wordcount", + "kernelspec", + "language_info", } assert set(sphinx_run.env.nb_metadata["basic_unrun"].keys()) == { "exec_data", - "kernelspec", - "language_info", } assert sphinx_run.env.metadata["basic_unrun"]["author"] == "Chris" - assert sphinx_run.env.nb_metadata["basic_unrun"]["kernelspec"] == { + assert sphinx_run.env.metadata["basic_unrun"]["kernelspec"] == { "display_name": "Python 3", "language": "python", "name": "python3", @@ -41,10 +41,15 @@ def test_basic_run(sphinx_run, file_regression, check_nbs): def test_basic_run_exec_off(sphinx_run, file_regression, check_nbs): sphinx_run.build() # print(sphinx_run.status()) - assert set(sphinx_run.env.nb_metadata["basic_unrun"].keys()) == { + assert set(sphinx_run.env.metadata["basic_unrun"].keys()) == { + "jupytext", + "author", + "source_map", + "wordcount", "kernelspec", "language_info", } + assert set(sphinx_run.env.nb_metadata["basic_unrun"].keys()) == set() assert sphinx_run.env.metadata["basic_unrun"]["author"] == "Chris" file_regression.check( From 413048979378a442999a785d388c4d334aea3803 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Sun, 9 Jan 2022 18:42:41 +0100 Subject: [PATCH 50/75] Updates from myst-parser --- myst_nb/docutils_.py | 24 ++++++++--------- myst_nb/nb_glue/elements.py | 2 +- myst_nb/render.py | 4 +-- myst_nb/sphinx_.py | 24 ++++++++--------- setup.cfg | 1 + tests/nb_fixtures/reporter_warnings.txt | 2 +- tests/test_docutils.py | 34 +++++++------------------ 7 files changed, 36 insertions(+), 55 deletions(-) diff --git a/myst_nb/docutils_.py b/myst_nb/docutils_.py index a4afa22f..523c688a 100644 --- a/myst_nb/docutils_.py +++ b/myst_nb/docutils_.py @@ -181,7 +181,7 @@ def _parse(self, inputstring: str, document: nodes.document) -> None: resources = preprocess_notebook( notebook, logger, mdit_parser.renderer.get_cell_render_config ) - mdit_parser.renderer.config["nb_resources"] = resources + mdit_parser.renderer.md_options["nb_resources"] = resources # we temporarily store nb_renderer on the document, # so that roles/directives can access it document.attributes["nb_renderer"] = nb_renderer @@ -205,16 +205,14 @@ class DocutilsNbRenderer(DocutilsRenderer): """A docutils-only renderer for Jupyter Notebooks.""" @property - def nb_renderer(self) -> NbElementRenderer: + def nb_config(self) -> NbParserConfig: """Get the notebook element renderer.""" - return self.config["nb_renderer"] - - def get_nb_config(self, key: str) -> Any: - """Get a notebook level configuration value. + return self.md_options["nb_config"] - :raises: KeyError if the key is not found - """ - return self.config["nb_config"][key] + @property + def nb_renderer(self) -> NbElementRenderer: + """Get the notebook element renderer.""" + return self.md_options["nb_renderer"] def get_cell_render_config( self, @@ -233,14 +231,14 @@ def get_cell_render_config( :raises: KeyError if the key is not found """ # TODO allow output level configuration? - cell_metadata_key = self.get_nb_config("cell_render_key") + cell_metadata_key = self.nb_config.cell_render_key if ( cell_metadata_key not in cell_metadata or key not in cell_metadata[cell_metadata_key] ): if not has_nb_key: raise KeyError(key) - return self.get_nb_config(nb_key if nb_key is not None else key) + return self.nb_config[nb_key if nb_key is not None else key] # TODO validate? return cell_metadata[cell_metadata_key][key] @@ -345,7 +343,7 @@ def render_nb_cell_code(self, token: SyntaxTreeNode) -> None: self.render_nb_cell_code_source(token) # render the execution output, if any - has_outputs = self.config["notebook"]["cells"][cell_index].get( + has_outputs = self.md_options["notebook"]["cells"][cell_index].get( "outputs", [] ) if (not remove_output) and has_outputs: @@ -376,7 +374,7 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: cell_index = token.meta["index"] metadata = token.meta["metadata"] line = token_line(token) - outputs: List[NotebookNode] = self.config["notebook"]["cells"][cell_index].get( + outputs: List[NotebookNode] = self.md_options["notebook"]["cells"][cell_index].get( "outputs", [] ) # render the outputs diff --git a/myst_nb/nb_glue/elements.py b/myst_nb/nb_glue/elements.py index 2b5b13cd..087de7b9 100644 --- a/myst_nb/nb_glue/elements.py +++ b/myst_nb/nb_glue/elements.py @@ -36,7 +36,7 @@ def render_output_docutils( document, line, nb_renderer: NbElementRenderer, output: Dict[str, Any], inline=False ) -> List[nodes.Node]: """Render the output in docutils (select mime priority directly).""" - mime_priority = nb_renderer.renderer.get_nb_config("mime_priority") + mime_priority = nb_renderer.renderer.nb_config.mime_priority try: mime_type = next(x for x in mime_priority if x in output["data"]) except StopIteration: diff --git a/myst_nb/render.py b/myst_nb/render.py index 33d4ea1e..ea50df91 100644 --- a/myst_nb/render.py +++ b/myst_nb/render.py @@ -107,7 +107,7 @@ def source(self): def get_resources(self) -> Dict[str, Any]: """Get the resources from the notebook pre-processing.""" - return self.renderer.config["nb_resources"] + return self.renderer.md_options["nb_resources"] def write_file( self, path: List[str], content: bytes, overwrite=False, exists_ok=False @@ -121,7 +121,7 @@ def write_file( :returns: URI to use for referencing the file """ - output_folder = self.renderer.get_nb_config("output_folder") + output_folder = self.renderer.nb_config.output_folder filepath = Path(output_folder).joinpath(*path) if not output_folder: pass # do not output anything if output_folder is not set (docutils only) diff --git a/myst_nb/sphinx_.py b/myst_nb/sphinx_.py index 6e34443b..8408f8fb 100644 --- a/myst_nb/sphinx_.py +++ b/myst_nb/sphinx_.py @@ -311,7 +311,7 @@ def parse(self, inputstring: str, document: nodes.document) -> None: resources = preprocess_notebook( notebook, logger, mdit_parser.renderer.get_cell_render_config ) - mdit_parser.renderer.config["nb_resources"] = resources + mdit_parser.renderer.md_options["nb_resources"] = resources # we temporarily store nb_renderer on the document, # so that roles/directives can access it document.attributes["nb_renderer"] = nb_renderer @@ -334,16 +334,14 @@ class SphinxNbRenderer(SphinxRenderer): """A sphinx renderer for Jupyter Notebooks.""" @property - def nb_renderer(self) -> NbElementRenderer: + def nb_config(self) -> NbParserConfig: """Get the notebook element renderer.""" - return self.config["nb_renderer"] - - def get_nb_config(self, key: str) -> Any: - """Get a notebook level configuration value. + return self.md_options["nb_config"] - :raises: KeyError if the key is not found - """ - return self.config["nb_config"][key] + @property + def nb_renderer(self) -> NbElementRenderer: + """Get the notebook element renderer.""" + return self.md_options["nb_renderer"] def get_cell_render_config( self, @@ -362,14 +360,14 @@ def get_cell_render_config( :raises: KeyError if the key is not found """ # TODO allow output level configuration? - cell_metadata_key = self.get_nb_config("cell_render_key") + cell_metadata_key = self.nb_config.cell_render_key if ( cell_metadata_key not in cell_metadata or key not in cell_metadata[cell_metadata_key] ): if not has_nb_key: raise KeyError(key) - return self.get_nb_config(nb_key if nb_key is not None else key) + return self.nb_config[nb_key if nb_key is not None else key] # TODO validate? return cell_metadata[cell_metadata_key][key] @@ -475,7 +473,7 @@ def render_nb_cell_code(self, token: SyntaxTreeNode) -> None: self.render_nb_cell_code_source(token) # render the execution output, if any - has_outputs = self.config["notebook"]["cells"][cell_index].get( + has_outputs = self.md_options["notebook"]["cells"][cell_index].get( "outputs", [] ) if (not remove_output) and has_outputs: @@ -507,7 +505,7 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: line = token_line(token, 0) cell_index = token.meta["index"] metadata = token.meta["metadata"] - outputs: List[NotebookNode] = self.config["notebook"]["cells"][cell_index].get( + outputs: List[NotebookNode] = self.md_options["notebook"]["cells"][cell_index].get( "outputs", [] ) # render the outputs diff --git a/setup.cfg b/setup.cfg index 3df8274f..905d1f24 100644 --- a/setup.cfg +++ b/setup.cfg @@ -102,6 +102,7 @@ testing = pytest~=5.4 pytest-cov~=2.8 pytest-regressions + pytest-param-files~=0.2.2 sympy [flake8] diff --git a/tests/nb_fixtures/reporter_warnings.txt b/tests/nb_fixtures/reporter_warnings.txt index 8656650f..813d082c 100644 --- a/tests/nb_fixtures/reporter_warnings.txt +++ b/tests/nb_fixtures/reporter_warnings.txt @@ -66,5 +66,5 @@ cells: [a]: c . -<string>:20004: (WARNING/2) Duplicate reference definition: A +<string>:20004: (WARNING/2) Duplicate reference definition: A [myst.ref] . \ No newline at end of file diff --git a/tests/test_docutils.py b/tests/test_docutils.py index 9db9d69c..38dd4ea5 100644 --- a/tests/test_docutils.py +++ b/tests/test_docutils.py @@ -4,7 +4,7 @@ from pathlib import Path from docutils.core import publish_doctree -from markdown_it.utils import read_fixture_file +from pytest_param_files import with_parameters import pytest import yaml @@ -13,14 +13,10 @@ FIXTURE_PATH = Path(__file__).parent.joinpath("nb_fixtures") -@pytest.mark.parametrize( - "line,title,input,expected", - read_fixture_file(FIXTURE_PATH.joinpath("basic.txt")), - ids=[f"{i[0]}-{i[1]}" for i in read_fixture_file(FIXTURE_PATH / "basic.txt")], -) -def test_basic(line, title, input, expected): +@with_parameters(FIXTURE_PATH / "basic.txt") +def test_basic(file_params): """Test basic parsing.""" - dct = yaml.safe_load(input) + dct = yaml.safe_load(file_params.content) dct.update({"nbformat": 4, "nbformat_minor": 4}) dct.setdefault("metadata", {}) report_stream = StringIO() @@ -36,24 +32,13 @@ def test_basic(line, title, input, expected): ) assert report_stream.getvalue().rstrip() == "" - try: - assert doctree.pformat().rstrip() == expected.rstrip() - except AssertionError: - print(doctree.pformat().rstrip()) - raise + file_params.assert_expected(doctree.pformat(), rstrip=True) -@pytest.mark.parametrize( - "line,title,input,expected", - read_fixture_file(FIXTURE_PATH.joinpath("reporter_warnings.txt")), - ids=[ - f"{i[0]}-{i[1]}" - for i in read_fixture_file(FIXTURE_PATH / "reporter_warnings.txt") - ], -) -def test_reporting(line, title, input, expected): +@with_parameters(FIXTURE_PATH / "reporter_warnings.txt") +def test_reporting(file_params): """Test that warnings and errors are reported as expected.""" - dct = yaml.safe_load(input) + dct = yaml.safe_load(file_params.content) dct.update({"metadata": {}, "nbformat": 4, "nbformat_minor": 4}) report_stream = StringIO() publish_doctree( @@ -65,5 +50,4 @@ def test_reporting(line, title, input, expected): "warning_stream": report_stream, }, ) - - assert report_stream.getvalue().rstrip() == expected.rstrip() + file_params.assert_expected(report_stream.getvalue(), rstrip=True) From 7929b658b977d0712059e5ea90953a2410e56e51 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sun, 9 Jan 2022 17:42:57 +0000 Subject: [PATCH 51/75] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- myst_nb/docutils_.py | 6 +++--- myst_nb/sphinx_.py | 6 +++--- tests/test_docutils.py | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/myst_nb/docutils_.py b/myst_nb/docutils_.py index 523c688a..e6342994 100644 --- a/myst_nb/docutils_.py +++ b/myst_nb/docutils_.py @@ -374,9 +374,9 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: cell_index = token.meta["index"] metadata = token.meta["metadata"] line = token_line(token) - outputs: List[NotebookNode] = self.md_options["notebook"]["cells"][cell_index].get( - "outputs", [] - ) + outputs: List[NotebookNode] = self.md_options["notebook"]["cells"][ + cell_index + ].get("outputs", []) # render the outputs mime_priority = self.get_cell_render_config(metadata, "mime_priority") for output_index, output in enumerate(outputs): diff --git a/myst_nb/sphinx_.py b/myst_nb/sphinx_.py index 8408f8fb..2f739c3b 100644 --- a/myst_nb/sphinx_.py +++ b/myst_nb/sphinx_.py @@ -505,9 +505,9 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: line = token_line(token, 0) cell_index = token.meta["index"] metadata = token.meta["metadata"] - outputs: List[NotebookNode] = self.md_options["notebook"]["cells"][cell_index].get( - "outputs", [] - ) + outputs: List[NotebookNode] = self.md_options["notebook"]["cells"][ + cell_index + ].get("outputs", []) # render the outputs for output_index, output in enumerate(outputs): if output.output_type == "stream": diff --git a/tests/test_docutils.py b/tests/test_docutils.py index 38dd4ea5..b1644a03 100644 --- a/tests/test_docutils.py +++ b/tests/test_docutils.py @@ -4,8 +4,8 @@ from pathlib import Path from docutils.core import publish_doctree -from pytest_param_files import with_parameters import pytest +from pytest_param_files import with_parameters import yaml from myst_nb.docutils_ import Parser From bcc711c14d6a7ecd39c033aaa1dcdfacaf9b79ab Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Sun, 9 Jan 2022 18:43:41 +0100 Subject: [PATCH 52/75] pre-commit fix --- tests/test_docutils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/test_docutils.py b/tests/test_docutils.py index b1644a03..46884ff8 100644 --- a/tests/test_docutils.py +++ b/tests/test_docutils.py @@ -4,7 +4,6 @@ from pathlib import Path from docutils.core import publish_doctree -import pytest from pytest_param_files import with_parameters import yaml From aa6e502eb36dbedecdae52b7d4012aa41937333d Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Mon, 10 Jan 2022 00:58:49 +0100 Subject: [PATCH 53/75] Update pytest_param_files --- setup.cfg | 2 +- tests/test_docutils.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/setup.cfg b/setup.cfg index 905d1f24..612eab8c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -102,7 +102,7 @@ testing = pytest~=5.4 pytest-cov~=2.8 pytest-regressions - pytest-param-files~=0.2.2 + pytest-param-files~=0.3.3 sympy [flake8] diff --git a/tests/test_docutils.py b/tests/test_docutils.py index 46884ff8..c302c53a 100644 --- a/tests/test_docutils.py +++ b/tests/test_docutils.py @@ -4,7 +4,7 @@ from pathlib import Path from docutils.core import publish_doctree -from pytest_param_files import with_parameters +import pytest import yaml from myst_nb.docutils_ import Parser @@ -12,7 +12,7 @@ FIXTURE_PATH = Path(__file__).parent.joinpath("nb_fixtures") -@with_parameters(FIXTURE_PATH / "basic.txt") +@pytest.mark.param_file(FIXTURE_PATH / "basic.txt") def test_basic(file_params): """Test basic parsing.""" dct = yaml.safe_load(file_params.content) @@ -34,7 +34,7 @@ def test_basic(file_params): file_params.assert_expected(doctree.pformat(), rstrip=True) -@with_parameters(FIXTURE_PATH / "reporter_warnings.txt") +@pytest.mark.param_file(FIXTURE_PATH / "reporter_warnings.txt") def test_reporting(file_params): """Test that warnings and errors are reported as expected.""" dct = yaml.safe_load(file_params.content) From daab4d29c0f43e499e7f9f08f7597f4230c5fa2e Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Mon, 10 Jan 2022 08:21:51 +0100 Subject: [PATCH 54/75] Add `render_markdown_format` configuration --- docs/conf.py | 4 ++ docs/use/formatting_outputs.md | 54 ++++++++++++++++- docs/use/glue.md | 25 +++++--- myst_nb/configuration.py | 40 ++++++++----- myst_nb/docutils_.py | 21 ++++--- myst_nb/nb_glue/domain.py | 8 +-- myst_nb/nb_glue/elements.py | 33 +++++++++-- myst_nb/render.py | 103 +++++++++++++++++++-------------- 8 files changed, 200 insertions(+), 88 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 8f026389..b33b29ba 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -71,12 +71,16 @@ "myst_parser.main.MdParserConfig", "nbformat.notebooknode.NotebookNode", "pygments.lexer.RegexLexer", + # Literal values are not supported "typing_extensions.Literal", "typing_extensions.Literal[show, remove, remove - warn, warn, error, severe]", "off", "force", "auto", "cache", + "commonmark", + "gfm", + "myst", ] ] diff --git a/docs/use/formatting_outputs.md b/docs/use/formatting_outputs.md index 4104cbaf..25648f90 100644 --- a/docs/use/formatting_outputs.md +++ b/docs/use/formatting_outputs.md @@ -197,13 +197,18 @@ pandas.DataFrame({"column 1": [1, 2, 3]}) (use/format/markdown)= ## Markdown -Markdown output is parsed by MyST-Parser, currently with the configuration set to `myst_commonmark_only=True` (see [MyST configuration options](myst:sphinx/config-options)). +The format of output `text/markdown` can be specified by `render_markdown_format` configuration: -The parsed Markdown is integrated into the wider documentation, and so it is possible, for example, to include internal references: +- `commonmark` (default): Restricted to the [CommonMark specification](https://commonmark.org/). +- `gfm`: Restricted to the [GitHub-flavored markdown](https://github.github.com/gfm/). + - Note, this requires the installation of the [linkify-it-py package](https://pypi.org/project/linkify-it-py) +- `myst`: The MyST parser configuration for the the current document. + +CommonMark formatting will ouput basic Markdown syntax: ```{code-cell} ipython3 from IPython.display import display, Markdown -display(Markdown('**_some_ markdown** and an [internal reference](use/format/markdown)!')) +display(Markdown('**_some_ markdown** and an [a reference](https://example.com)!')) ``` and even internal images can be rendered! @@ -212,6 +217,49 @@ and even internal images can be rendered! display(Markdown('![figure](../_static/logo-wide.svg)')) ``` +But setting the `render_markdown_format` to `myst` will allow for more advanced formatting, +such as including internal references, tables, and even other directives: + +`````md +````{code-cell} ipython3 +--- +render: + markdown_format: myst +--- +display(Markdown('**_some_ markdown** and an [internal reference](use/format/markdown)!')) +display(Markdown(""" +| a | b | c | +|---|---|---| +| 1 | 2 | 3 | +""")) +display(Markdown(""" +```{note} +A note admonition! +``` +""")) +```` +````` + +The parsed Markdown is integrated into the wider documentation, and so it is possible, for example, to include internal references: + +````{code-cell} ipython3 +--- +render: + markdown_format: myst +--- +display(Markdown('**_some_ markdown** and an [internal reference](use/format/markdown)!')) +display(Markdown(""" +| a | b | c | +|---|---|---| +| 1 | 2 | 3 | +""")) +display(Markdown(""" +```{note} +A note admonition! +``` +""")) +```` + (use/format/ansi)= ## ANSI Outputs diff --git a/docs/use/glue.md b/docs/use/glue.md index ae480c5c..aa3a9e69 100644 --- a/docs/use/glue.md +++ b/docs/use/glue.md @@ -224,7 +224,7 @@ With `glue:text` we can **add formatting to the output**. This is particularly useful if you are displaying numbers and want to round the results. To add formatting, use this pattern: -* `` {glue:text}`mykey:formatstring` `` +- `` {glue:text}`mykey:formatstring` `` For example, the following: ``My rounded mean: {glue:text}`boot_mean:.2f` `` will be rendered like this: My rounded mean: {glue:text}`boot_mean:.2f` (95% CI: {glue:text}`boot_clo:.2f`/{glue:text}`boot_chi:.2f`). @@ -317,9 +317,9 @@ Which we reference as Equation {eq}`eq-sym`. `glue:math` only works with glued variables that contain a `text/latex` output. ``` -### The `glue:myst` role/directive +### The `glue:md` role/directive -With `glue:myst`, you can output `text/markdown` as MyST Markdown text, that will be integrated into your page (by default `text/markdown` will be parsed as CommonMark only): +With `glue:md`, you can output `text/markdown`, that will be integrated into your page. ````{code-cell} ipython3 from IPython.display import Markdown @@ -339,18 +339,27 @@ print("Hello world!") ), display=False) ```` -Now, when we glue, the Markdown will be evaluated as block/inline MyST Markdown, as if it was part of the original document. +The format of the markdown can be specified as: + +- `commonmark` (default): Restricted to the [CommonMark specification](https://commonmark.org/). +- `gfm`: Restricted to the [GitHub-flavored markdown](https://github.github.com/gfm/). + - Note, this requires the installation of the [linkify-it-py package](https://pypi.org/project/linkify-it-py) +- `myst`: The MyST parser configuration for the the current document. + +For example, the following role/directive will glue inline/block MyST Markdown, as if it was part of the original document. ````md -Here is some {glue:myst}`inline_md`! +Here is some {glue:md}`inline_md:myst`! -```{glue:myst} block_md +```{glue:md} block_md +:format: myst ``` ```` -Here is some {glue:myst}`inline_md`! +Here is some {glue:md}`inline_md:myst`! -```{glue:myst} block_md +```{glue:md} block_md +:format: myst ``` +++ diff --git a/myst_nb/configuration.py b/myst_nb/configuration.py index e082aff2..a9c52f91 100644 --- a/myst_nb/configuration.py +++ b/myst_nb/configuration.py @@ -317,22 +317,6 @@ class NbParserConfig: validator=instance_of(bool), metadata={"help": "Number code cell source lines", "cell_metadata": True}, ) - output_stderr: Literal[ - "show", "remove", "remove-warn", "warn", "error", "severe" - ] = attr.ib( - default="show", - validator=in_( - [ - "show", - "remove", - "remove-warn", - "warn", - "error", - "severe", - ] - ), - metadata={"help": "Behaviour for stderr output", "cell_metadata": True}, - ) # docutils does not allow for the dictionaries in its configuration, # and also there is no API for the parser to know the output format, so # we use two different options for docutils(mime_priority)/sphinx(render_priority) @@ -365,6 +349,22 @@ class NbParserConfig: }, repr=False, ) + output_stderr: Literal[ + "show", "remove", "remove-warn", "warn", "error", "severe" + ] = attr.ib( + default="show", + validator=in_( + [ + "show", + "remove", + "remove-warn", + "warn", + "error", + "severe", + ] + ), + metadata={"help": "Behaviour for stderr output", "cell_metadata": True}, + ) render_text_lexer: str = attr.ib( default="myst-ansi", # TODO allow None -> "none"? @@ -394,6 +394,14 @@ class NbParserConfig: "cell_metadata": "image", }, ) + render_markdown_format: Literal["commonmark", "gfm", "myst"] = attr.ib( + default="commonmark", + validator=in_(["commonmark", "gfm", "myst"]), + metadata={ + "help": "The format to use for text/markdown rendering", + "cell_metadata": "markdown_format", + }, + ) # TODO jupyter_sphinx_require_url and jupyter_sphinx_embed_url (undocumented), # are no longer used by this package, replaced by ipywidgets_js # do we add any deprecation warnings? diff --git a/myst_nb/docutils_.py b/myst_nb/docutils_.py index e6342994..0c9cde4a 100644 --- a/myst_nb/docutils_.py +++ b/myst_nb/docutils_.py @@ -23,9 +23,9 @@ from myst_nb.nb_glue.elements import ( PasteDirective, PasteFigureDirective, + PasteMarkdownDirective, + PasteMarkdownRole, PasteMathDirective, - PasteMystDirective, - PasteMystRole, PasteRole, PasteTextRole, ) @@ -74,13 +74,13 @@ def parse(self, inputstring: str, document: nodes.document) -> None: ("glue:any", PasteDirective), ("glue:figure", PasteFigureDirective), ("glue:math", PasteMathDirective), - ("glue:myst", PasteMystDirective), + ("glue:md", PasteMarkdownDirective), ) new_roles = ( ("glue:", PasteRole()), ("glue:any", PasteRole()), ("glue:text", PasteTextRole()), - ("glue:myst", PasteMystRole()), + ("glue:md", PasteMarkdownRole()), ) for name, directive in new_directives: _directives[name] = directive @@ -231,11 +231,16 @@ def get_cell_render_config( :raises: KeyError if the key is not found """ # TODO allow output level configuration? + use_nb_level = True cell_metadata_key = self.nb_config.cell_render_key - if ( - cell_metadata_key not in cell_metadata - or key not in cell_metadata[cell_metadata_key] - ): + if cell_metadata_key in cell_metadata: + if isinstance(cell_metadata[cell_metadata_key], dict): + if key in cell_metadata[cell_metadata_key]: + use_nb_level = False + else: + # TODO log warning + pass + if use_nb_level: if not has_nb_key: raise KeyError(key) return self.nb_config[nb_key if nb_key is not None else key] diff --git a/myst_nb/nb_glue/domain.py b/myst_nb/nb_glue/domain.py index 9f6ca353..5c9b2b6c 100644 --- a/myst_nb/nb_glue/domain.py +++ b/myst_nb/nb_glue/domain.py @@ -9,9 +9,9 @@ from myst_nb.nb_glue.elements import ( PasteDirective, PasteFigureDirective, + PasteMarkdownDirective, + PasteMarkdownRole, PasteMathDirective, - PasteMystDirective, - PasteMystRole, PasteRole, PasteTextRole, ) @@ -31,13 +31,13 @@ class NbGlueDomain(Domain): "any": PasteDirective, "figure": PasteFigureDirective, "math": PasteMathDirective, - "myst": PasteMystDirective, + "md": PasteMarkdownDirective, } roles = { "": PasteRole(), "any": PasteRole(), "text": PasteTextRole(), - "myst": PasteMystRole(), + "md": PasteMarkdownRole(), } def merge_domaindata(self, docnames: List[str], otherdata: dict) -> None: diff --git a/myst_nb/nb_glue/elements.py b/myst_nb/nb_glue/elements.py index 087de7b9..ce7289fa 100644 --- a/myst_nb/nb_glue/elements.py +++ b/myst_nb/nb_glue/elements.py @@ -231,13 +231,21 @@ def run(self) -> Tuple[List[nodes.Node], List[nodes.system_message]]: return [node], [] -class PasteMystRole(PasteRole): +class PasteMarkdownRole(PasteRole): """A role for pasting markdown outputs from notebooks as inline MyST Markdown.""" def run(self) -> Tuple[List[nodes.Node], List[nodes.system_message]]: + # check if we have both key:format in the key + parts = self.text.rsplit(":", 1) + if len(parts) == 2: + key, fmt = parts + else: + key = parts[0] + fmt = "commonmark" + # TODO - check fmt is valid # retrieve the data document = self.inliner.document - result = retrieve_mime_data(document, self.text, "text/markdown") + result = retrieve_mime_data(document, key, "text/markdown") if result.warning is not None: return [], [ warning( @@ -246,12 +254,16 @@ def run(self) -> Tuple[List[nodes.Node], List[nodes.system_message]]: self.lineno, ) ] + # TODO this feels a bit hacky + cell_key = result.nb_renderer.renderer.nb_config.cell_render_key mime = MimeData( "text/markdown", result.data, + cell_metadata={ + cell_key: {"markdown_format": fmt}, + }, output_metadata=result.metadata, line=self.lineno, - md_commonmark=False, ) _nodes = result.nb_renderer.render_markdown_inline(mime) for node in _nodes: @@ -282,9 +294,16 @@ def set_source_info(self, node: nodes.Node) -> None: _node.line = line -class PasteMystDirective(_PasteBaseDirective): +class PasteMarkdownDirective(_PasteBaseDirective): """A directive for pasting markdown outputs from notebooks as MyST Markdown.""" + def fmt(argument): + return directives.choice(argument, ("commonmark", "gfm", "myst")) + + option_spec = { + "format": fmt, + } + def run(self) -> List[nodes.Node]: """Run the directive.""" result = retrieve_mime_data(self.document, self.arguments[0], "text/markdown") @@ -296,12 +315,16 @@ def run(self) -> List[nodes.Node]: self.lineno, ) ] + # TODO this "override" feels a bit hacky + cell_key = result.nb_renderer.renderer.nb_config.cell_render_key mime = MimeData( "text/markdown", result.data, + cell_metadata={ + cell_key: {"markdown_format": self.options.get("format", "commonmark")}, + }, output_metadata=result.metadata, line=self.lineno, - md_commonmark=False, md_headings=True, ) _nodes = result.nb_renderer.render_markdown(mime) diff --git a/myst_nb/render.py b/myst_nb/render.py index ea50df91..c5d8c2db 100644 --- a/myst_nb/render.py +++ b/myst_nb/render.py @@ -53,9 +53,6 @@ class MimeData: """Index of the output in the cell""" line: Optional[int] = attr.ib(default=None) """Source line of the cell""" - md_commonmark: bool = attr.ib(default=True) - """Whether to parse the content as "isolated" CommonMark""" - # as opposed to using the current render and its environment md_headings: bool = attr.ib(default=False) """Whether to render headings in text/markdown blocks.""" # we can only do this if know the content will be rendered into the main body @@ -302,30 +299,12 @@ def render_unknown(self, data: MimeData) -> List[nodes.Element]: def render_markdown(self, data: MimeData) -> List[nodes.Element]: """Render a notebook text/markdown mime data output.""" - # create a container to parse the markdown into - temp_container = nodes.Element() - - # store the current renderer config - md = self.renderer.md - match_titles = self.renderer.md_env.get("match_titles", None) - - # setup temporary renderer config - self.renderer.md_env["match_titles"] = data.md_headings - if data.md_commonmark: - self.renderer.md = create_md_parser( - MdParserConfig(commonmark_only=True), self.renderer.__class__ - ) - - try: - # parse markdown - with self.renderer.current_node_context(temp_container): - self.renderer.nested_render_text(data.string, data.line) - finally: - # restore renderer config - self.renderer.md = md - self.renderer.md_env["match_titles"] = match_titles - - return temp_container.children + fmt = self.renderer.get_cell_render_config( + data.cell_metadata, "markdown_format", "render_markdown_format" + ) + return self._render_markdown_base( + data, fmt=fmt, inline=False, allow_headings=data.md_headings + ) def render_text_plain(self, data: MimeData) -> List[nodes.Element]: """Render a notebook text/plain mime data output.""" @@ -384,6 +363,7 @@ def render_image(self, data: MimeData) -> List[nodes.Element]: # which names by {notbook_name}-{cell_index}-{output-index}.{extension} data_hash = hashlib.sha256(data_bytes).hexdigest() filename = f"{data_hash}{extension}" + # TODO should we be trying to clear old files? uri = self.write_file([filename], data_bytes, overwrite=False, exists_ok=True) image_node = nodes.image(uri=uri) # apply attributes to the image node @@ -466,23 +446,12 @@ def render_unknown_inline(self, data: MimeData) -> List[nodes.Element]: def render_markdown_inline(self, data: MimeData) -> List[nodes.Element]: """Render a notebook text/markdown mime data output.""" - # TODO upstream this to myst-parser (replace MockState.inline_text)? - if data.md_commonmark: - parser = create_md_parser( - MdParserConfig(commonmark_only=True), self.renderer.__class__ - ) - tokens = parser.parseInline(data.string) - else: - tokens = self.renderer.md.parseInline(data.string, self.renderer.md_env) - if data.line is not None: - for token in tokens: - if token.map: - token.map = [token.map[0] + data.line, token.map[1] + data.line] - node = nodes.Element() # anonymous container for parsing - with self.renderer.current_node_context(node): - self.renderer._render_tokens(tokens) - - return node.children + fmt = self.renderer.get_cell_render_config( + data.cell_metadata, "markdown_format", "render_markdown_format" + ) + return self._render_markdown_base( + data, fmt=fmt, inline=True, allow_headings=data.md_headings + ) def render_text_plain_inline(self, data: MimeData) -> List[nodes.Element]: """Render a notebook text/plain mime data output.""" @@ -528,6 +497,52 @@ def render_widget_view_inline(self, data: MimeData) -> List[nodes.Element]: """Render a notebook application/vnd.jupyter.widget-view+json mime output.""" return self.render_widget_view(data) + def _render_markdown_base( + self, data: MimeData, *, fmt: str, inline: bool, allow_headings: bool + ) -> List[nodes.Element]: + """Base render for a notebook markdown mime output (block or inline).""" + psuedo_element = nodes.Element() # element to hold the parsed markdown + current_parser = self.renderer.md + current_md_config = self.renderer.md_config + try: + # potentially replace the parser temporarily + if fmt == "myst": + # use the current configuration to render the markdown + pass + elif fmt == "commonmark": + # use an isolated, CommonMark only, parser + self.renderer.md_config = MdParserConfig(commonmark_only=True) + self.renderer.md = create_md_parser( + self.renderer.md_config, self.renderer.__class__ + ) + elif fmt == "gfm": + # use an isolated, GitHub Flavoured Markdown only, parser + self.renderer.md_config = MdParserConfig(gfm_only=True) + self.renderer.md = create_md_parser( + self.renderer.md_config, self.renderer.__class__ + ) + else: + self.logger.warning( + f"skipping unknown markdown format: {fmt}", + subtype="unknown_markdown_format", + line=data.line, + ) + return [] + + with self.renderer.current_node_context(psuedo_element): + self.renderer.nested_render_text( + data.string, + data.line or 0, + inline=inline, + allow_headings=allow_headings, + ) + finally: + # restore the parser + self.renderer.md = current_parser + self.renderer.md_config = current_md_config + + return psuedo_element.children + class EntryPointError(Exception): """Exception raised when an entry point cannot be loaded.""" From 58839823ad5f413317e8f706217159003172a215 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Tue, 11 Jan 2022 18:02:46 +0100 Subject: [PATCH 55/75] improve glue code --- docs/use/formatting_outputs.md | 2 +- myst_nb/docutils_.py | 12 +- myst_nb/nb_glue/domain.py | 12 +- myst_nb/nb_glue/elements.py | 309 +++++++++++++++++++-------------- 4 files changed, 189 insertions(+), 146 deletions(-) diff --git a/docs/use/formatting_outputs.md b/docs/use/formatting_outputs.md index 25648f90..44e92da9 100644 --- a/docs/use/formatting_outputs.md +++ b/docs/use/formatting_outputs.md @@ -204,7 +204,7 @@ The format of output `text/markdown` can be specified by `render_markdown_format - Note, this requires the installation of the [linkify-it-py package](https://pypi.org/project/linkify-it-py) - `myst`: The MyST parser configuration for the the current document. -CommonMark formatting will ouput basic Markdown syntax: +CommonMark formatting will output basic Markdown syntax: ```{code-cell} ipython3 from IPython.display import display, Markdown diff --git a/myst_nb/docutils_.py b/myst_nb/docutils_.py index 0c9cde4a..3ad59911 100644 --- a/myst_nb/docutils_.py +++ b/myst_nb/docutils_.py @@ -21,12 +21,12 @@ from myst_nb.execute import execute_notebook from myst_nb.loggers import DEFAULT_LOG_TYPE, DocutilsDocLogger from myst_nb.nb_glue.elements import ( - PasteDirective, + PasteAnyDirective, PasteFigureDirective, PasteMarkdownDirective, PasteMarkdownRole, PasteMathDirective, - PasteRole, + PasteRoleAny, PasteTextRole, ) from myst_nb.parse import nb_node_to_dict, notebook_to_tokens @@ -70,15 +70,15 @@ def parse(self, inputstring: str, document: nodes.document) -> None: new_directives = ( ("code-cell", UnexpectedCellDirective), ("raw-cell", UnexpectedCellDirective), - ("glue:", PasteDirective), - ("glue:any", PasteDirective), + ("glue:", PasteAnyDirective), + ("glue:any", PasteAnyDirective), ("glue:figure", PasteFigureDirective), ("glue:math", PasteMathDirective), ("glue:md", PasteMarkdownDirective), ) new_roles = ( - ("glue:", PasteRole()), - ("glue:any", PasteRole()), + ("glue:", PasteRoleAny()), + ("glue:any", PasteRoleAny()), ("glue:text", PasteTextRole()), ("glue:md", PasteMarkdownRole()), ) diff --git a/myst_nb/nb_glue/domain.py b/myst_nb/nb_glue/domain.py index 5c9b2b6c..1e3adf83 100644 --- a/myst_nb/nb_glue/domain.py +++ b/myst_nb/nb_glue/domain.py @@ -7,12 +7,12 @@ from sphinx.domains import Domain from myst_nb.nb_glue.elements import ( - PasteDirective, + PasteAnyDirective, PasteFigureDirective, PasteMarkdownDirective, PasteMarkdownRole, PasteMathDirective, - PasteRole, + PasteRoleAny, PasteTextRole, ) @@ -27,15 +27,15 @@ class NbGlueDomain(Domain): data_version = 0.2 directives = { - "": PasteDirective, - "any": PasteDirective, + "": PasteAnyDirective, + "any": PasteAnyDirective, "figure": PasteFigureDirective, "math": PasteMathDirective, "md": PasteMarkdownDirective, } roles = { - "": PasteRole(), - "any": PasteRole(), + "": PasteRoleAny(), + "any": PasteRoleAny(), "text": PasteTextRole(), "md": PasteMarkdownRole(), } diff --git a/myst_nb/nb_glue/elements.py b/myst_nb/nb_glue/elements.py index ce7289fa..fd26f6d7 100644 --- a/myst_nb/nb_glue/elements.py +++ b/myst_nb/nb_glue/elements.py @@ -1,4 +1,8 @@ -"""Directives and roles which can be used by both docutils and sphinx.""" +"""Directives and roles which can be used by both docutils and sphinx. + +We intentionally do no import sphinx in this module, +in order to allow docutils-only use without sphinx installed. +""" from typing import Any, Dict, List, Optional, Tuple, Union import attr @@ -32,13 +36,92 @@ def warning(message: str, document: nodes.document, line: int) -> nodes.system_m ) +def set_source_info(node: nodes.Node, source: str, line: int) -> None: + """Set the source info for a node and its descendants.""" + iterator = getattr(node, "findall", node.traverse) # findall for docutils 0.18 + for _node in iterator(include_self=True): + _node.source = source + _node.line = line + + +@attr.s +class RetrievedData: + """A class to store retrieved mime data.""" + + found: bool = attr.ib() + data: Union[None, str, bytes] = attr.ib(default=None) + metadata: Dict[str, Any] = attr.ib(factory=dict) + nb_renderer: Optional[NbElementRenderer] = attr.ib(default=None) + warning: Optional[str] = attr.ib(default=None) + + +def retrieve_glue_data(document: nodes.document, key: str) -> RetrievedData: + """Retrieve the glue data from a specific document.""" + if "nb_renderer" not in document: + return RetrievedData(False, warning="No 'nb_renderer' found on the document.") + nb_renderer: NbElementRenderer = document["nb_renderer"] + resources = nb_renderer.get_resources() + if "glue" not in resources: + return RetrievedData(False, warning=f"No key {key!r} found in glue data.") + + if key not in resources["glue"]: + return RetrievedData(False, warning=f"No key {key!r} found in glue data.") + + return RetrievedData( + True, + data=resources["glue"][key]["data"], + metadata=resources["glue"][key].get("metadata", {}), + nb_renderer=nb_renderer, + ) + + +def render_glue_output( + key: str, + document: nodes.document, + line: int, + source: str, + inline=False, +) -> Tuple[bool, List[nodes.Node]]: + """Retrive the notebook output data for this glue key, + then return the docutils/sphinx nodes relevant to this data. + + :param key: The glue key to retrieve. + :param document: The current docutils document. + :param line: The current source line number of the directive or role. + :param source: The current source path or description. + :param inline: Whether to render the output as inline (or block). + + :returns: A tuple of (was the key found, the docutils/sphinx nodes). + """ + data = retrieve_glue_data(document, key) + if not data.found: + return (False, [warning(data.warning, document, line)]) + if is_sphinx(document): + _nodes = render_output_sphinx( + data.nb_renderer, data.data, data.metadata, source, line, inline + ) + else: + _nodes = render_output_docutils( + data.nb_renderer, data.data, data.metadata, document, line, inline + ) + # TODO rendering should perhaps return if it succeeded explicitly + if _nodes and isinstance(_nodes[0], nodes.system_message): + return False, _nodes + return True, _nodes + + def render_output_docutils( - document, line, nb_renderer: NbElementRenderer, output: Dict[str, Any], inline=False + nb_renderer: NbElementRenderer, + data: Dict[str, Any], + metadata: Dict[str, Any], + document: nodes.document, + line: int, + inline=False, ) -> List[nodes.Node]: """Render the output in docutils (select mime priority directly).""" mime_priority = nb_renderer.renderer.nb_config.mime_priority try: - mime_type = next(x for x in mime_priority if x in output["data"]) + mime_type = next(x for x in mime_priority if x in data) except StopIteration: return [ warning( @@ -50,8 +133,8 @@ def render_output_docutils( else: data = MimeData( mime_type, - output["data"][mime_type], - output_metadata=output.get("metadata", {}), + data[mime_type], + output_metadata=metadata, line=line, ) if inline: @@ -60,22 +143,20 @@ def render_output_docutils( def render_output_sphinx( - document, - line, nb_renderer: NbElementRenderer, - output: Dict[str, Any], - set_source_info, + data: Dict[str, Any], + metadata: Dict[str, Any], + source: str, + line: int, inline=False, ) -> List[nodes.Node]: """Render the output in sphinx (defer mime priority selection).""" mime_bundle = nodes.container(nb_element="mime_bundle") - set_source_info(mime_bundle) - for mime_type, data in output["data"].items(): + set_source_info(mime_bundle, source, line) + for mime_type, data in data.items(): mime_container = nodes.container(mime_type=mime_type) - set_source_info(mime_container) - data = MimeData( - mime_type, data, output_metadata=output.get("metadata", {}), line=line - ) + set_source_info(mime_container, source, line) + data = MimeData(mime_type, data, output_metadata=metadata, line=line) if inline: nds = nb_renderer.render_mime_type_inline(data) else: @@ -86,67 +167,7 @@ def render_output_sphinx( return [mime_bundle] -def render_glue_output( - key: str, document: nodes.document, line: int, set_source_info, inline=False -) -> List[nodes.Node]: - if "nb_renderer" not in document: - return [warning("No 'nb_renderer' found on the document.", document, line)] - nb_renderer: NbElementRenderer = document["nb_renderer"] - resources = nb_renderer.get_resources() - if "glue" not in resources: - return [ - warning("No glue data found in the notebook resources.", document, line) - ] - if key not in resources["glue"]: - return [warning(f"No key {key!r} found in glue data.", document, line)] - if not resources["glue"][key].get("data"): - return [warning(f"{key!r} does not contain any data.", document, line)] - if is_sphinx(document): - return render_output_sphinx( - document, line, nb_renderer, resources["glue"][key], set_source_info, inline - ) - else: - return render_output_docutils( - document, line, nb_renderer, resources["glue"][key], inline - ) - - -@attr.s -class RetrievedData: - """A class to store retrieved mime data.""" - - warning: Optional[str] = attr.ib() - data: Union[None, str, bytes] = attr.ib(default=None) - metadata: Dict[str, Any] = attr.ib(factory=dict) - nb_renderer: Optional[NbElementRenderer] = attr.ib(default=None) - - -def retrieve_mime_data( - document: nodes.document, key: str, mime_type: str -) -> RetrievedData: - """Retrieve the mime data from the document.""" - if "nb_renderer" not in document: - return RetrievedData("No 'nb_renderer' found on the document.") - nb_renderer: NbElementRenderer = document["nb_renderer"] - resources = nb_renderer.get_resources() - if "glue" not in resources: - return RetrievedData(f"No key {key!r} found in glue data.") - - if key not in resources["glue"]: - return RetrievedData(f"No key {key!r} found in glue data.") - - if mime_type not in resources["glue"][key].get("data", {}): - return RetrievedData(f"{key!r} does not contain {mime_type!r} data.") - - return RetrievedData( - None, - resources["glue"][key]["data"][mime_type], - resources["glue"][key].get("metadata", {}), - nb_renderer, - ) - - -class PasteRole: +class _PasteRoleBase: """A role for pasting inline code outputs from notebooks.""" def get_source_info(self, lineno: int = None) -> Tuple[str, int]: @@ -158,10 +179,7 @@ def get_source_info(self, lineno: int = None) -> Tuple[str, int]: def set_source_info(self, node: nodes.Node, lineno: int = None) -> None: """Set the source info for a node and its descendants.""" source, line = self.get_source_info(lineno) - iterator = getattr(node, "findall", node.traverse) # findall for docutils 0.18 - for _node in iterator(include_self=True): - _node.source = source - _node.line = line + set_source_info(node, source, line) def __call__( self, @@ -181,19 +199,29 @@ def __call__( def run(self) -> Tuple[List[nodes.Node], List[nodes.system_message]]: """Run the role.""" - paste_nodes = render_glue_output( + raise NotImplementedError + + +class PasteRoleAny(_PasteRoleBase): + """A role for pasting inline code outputs from notebooks, + using render priority to decide the output mime type. + """ + + def run(self) -> Tuple[List[nodes.Node], List[nodes.system_message]]: + line, source = self.get_source_info() + found, paste_nodes = render_glue_output( self.text, self.inliner.document, - self.lineno, - self.set_source_info, + line, + source, inline=True, ) - if not paste_nodes and isinstance(paste_nodes[0], nodes.system_message): + if not found: return [], paste_nodes return paste_nodes, [] -class PasteTextRole(PasteRole): +class PasteTextRole(_PasteRoleBase): """A role for pasting text outputs from notebooks.""" def run(self) -> Tuple[List[nodes.Node], List[nodes.system_message]]: @@ -207,16 +235,16 @@ def run(self) -> Tuple[List[nodes.Node], List[nodes.system_message]]: # now retrieve the data document = self.inliner.document - result = retrieve_mime_data(document, key, "text/plain") - if result.warning is not None: + + result = retrieve_glue_data(document, key) + if not result.found: + return [], [warning(result.warning, document, self.lineno)] + if "text/plain" not in result.data: return [], [ - warning( - result.warning, - document, - self.lineno, - ) + warning(f"No text/plain found in {key!r} data", document, self.lineno) ] - text = str(result.data).strip("'") + + text = str(result.data["text/plain"]).strip("'") # If formatting is specified, see if we have a number of some kind if formatting: @@ -231,7 +259,7 @@ def run(self) -> Tuple[List[nodes.Node], List[nodes.system_message]]: return [node], [] -class PasteMarkdownRole(PasteRole): +class PasteMarkdownRole(_PasteRoleBase): """A role for pasting markdown outputs from notebooks as inline MyST Markdown.""" def run(self) -> Tuple[List[nodes.Node], List[nodes.system_message]]: @@ -245,20 +273,22 @@ def run(self) -> Tuple[List[nodes.Node], List[nodes.system_message]]: # TODO - check fmt is valid # retrieve the data document = self.inliner.document - result = retrieve_mime_data(document, key, "text/markdown") - if result.warning is not None: + + result = retrieve_glue_data(document, key) + if not result.found: + return [], [warning(result.warning, document, self.lineno)] + if "text/markdown" not in result.data: return [], [ warning( - result.warning, - document, - self.lineno, + f"No text/markdown found in {key!r} data", document, self.lineno ) ] + # TODO this feels a bit hacky cell_key = result.nb_renderer.renderer.nb_config.cell_render_key mime = MimeData( "text/markdown", - result.data, + result.data["text/markdown"], cell_metadata={ cell_key: {"markdown_format": fmt}, }, @@ -271,7 +301,7 @@ def run(self) -> Tuple[List[nodes.Node], List[nodes.system_message]]: return _nodes, [] -class _PasteBaseDirective(Directive): +class _PasteDirectiveBase(Directive): required_arguments = 1 # the key final_argument_whitespace = True @@ -288,13 +318,24 @@ def get_source_info(self) -> Tuple[str, int]: def set_source_info(self, node: nodes.Node) -> None: """Set source and line number to the node and its descendants.""" source, line = self.get_source_info() - iterator = getattr(node, "findall", node.traverse) # findall for docutils 0.18 - for _node in iterator(include_self=True): - _node.source = source - _node.line = line + set_source_info(node, source, line) + + +class PasteAnyDirective(_PasteDirectiveBase): + """A directive for pasting code outputs from notebooks, + using render priority to decide the output mime type. + """ + + def run(self) -> List[nodes.Node]: + """Run the directive.""" + line, source = self.get_source_info() + _, paste_nodes = render_glue_output( + self.arguments[0], self.document, line, source + ) + return paste_nodes -class PasteMarkdownDirective(_PasteBaseDirective): +class PasteMarkdownDirective(_PasteDirectiveBase): """A directive for pasting markdown outputs from notebooks as MyST Markdown.""" def fmt(argument): @@ -306,20 +347,24 @@ def fmt(argument): def run(self) -> List[nodes.Node]: """Run the directive.""" - result = retrieve_mime_data(self.document, self.arguments[0], "text/markdown") - if result.warning is not None: + key = self.arguments[0] + result = retrieve_glue_data(self.document, key) + if not result.found: + return [warning(result.warning, self.document, self.lineno)] + if "text/markdown" not in result.data: return [ warning( - result.warning, + f"No text/markdown found in {key!r} data", self.document, self.lineno, ) ] + # TODO this "override" feels a bit hacky cell_key = result.nb_renderer.renderer.nb_config.cell_render_key mime = MimeData( "text/markdown", - result.data, + result.data["text/markdown"], cell_metadata={ cell_key: {"markdown_format": self.options.get("format", "commonmark")}, }, @@ -333,17 +378,7 @@ def run(self) -> List[nodes.Node]: return _nodes -class PasteDirective(_PasteBaseDirective): - """A directive for pasting code outputs from notebooks.""" - - def run(self) -> List[nodes.Node]: - """Run the directive.""" - return render_glue_output( - self.arguments[0], self.document, self.lineno, self.set_source_info - ) - - -class PasteFigureDirective(PasteDirective): +class PasteFigureDirective(_PasteDirectiveBase): """A directive for pasting code outputs from notebooks, wrapped in a figure.""" def align(argument): @@ -352,16 +387,20 @@ def align(argument): def figwidth_value(argument): return directives.length_or_percentage_or_unitless(argument, "px") - option_spec = (PasteDirective.option_spec or {}).copy() - option_spec["figwidth"] = figwidth_value - option_spec["figclass"] = directives.class_option - option_spec["align"] = align - option_spec["name"] = directives.unchanged + option_spec = { + "figwidth": figwidth_value, + "figclass": directives.class_option, + "align": align, + "name": directives.unchanged, + } has_content = True def run(self): - paste_nodes = super().run() - if not paste_nodes or isinstance(paste_nodes[0], nodes.system_message): + line, source = self.get_source_info() + found, paste_nodes = render_glue_output( + self.arguments[0], self.document, line, source + ) + if not found: return paste_nodes # note: most of this is copied directly from sphinx.Figure @@ -407,7 +446,7 @@ def run(self): return [figure_node] -class PasteMathDirective(_PasteBaseDirective): +class PasteMathDirective(_PasteDirectiveBase): """A directive for pasting latex outputs from notebooks as math.""" option_spec = { @@ -419,16 +458,20 @@ class PasteMathDirective(_PasteBaseDirective): def run(self) -> List[nodes.Node]: """Run the directive.""" - result = retrieve_mime_data(self.document, self.arguments[0], "text/latex") - if result.warning is not None: + key = self.arguments[0] + result = retrieve_glue_data(self.document, key) + if not result.found: + return [warning(result.warning, self.document, self.lineno)] + if "text/latex" not in result.data: return [ warning( - result.warning, + f"No text/latex found in {key!r} data", self.document, self.lineno, ) ] - latex = strip_latex_delimiters(str(result.data)) + + latex = strip_latex_delimiters(str(result.data["text/latex"])) label = self.options.get("label", self.options.get("name")) node = nodes.math_block( latex, From 0d27b3eaa7d6ea2c07171699f6ae3c235c38ccaa Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Tue, 11 Jan 2022 20:31:29 +0100 Subject: [PATCH 56/75] Improve registering of per-page javascript --- myst_nb/configuration.py | 1 + myst_nb/docutils_.py | 37 +++++++++++++----- myst_nb/render.py | 13 ++++++- myst_nb/sphinx_.py | 82 ++++++++++++++++++++++++++-------------- tests/test_parser.py | 7 +++- 5 files changed, 100 insertions(+), 40 deletions(-) diff --git a/myst_nb/configuration.py b/myst_nb/configuration.py index a9c52f91..a7507879 100644 --- a/myst_nb/configuration.py +++ b/myst_nb/configuration.py @@ -144,6 +144,7 @@ def ipywidgets_js_factory() -> Dict[str, Dict[str, str]]: }, # Load IPywidgets bundle for embedding. "https://unpkg.com/@jupyter-widgets/html-manager@^0.20.0/dist/embed-amd.js": { + "data-jupyter-widgets-cdn": "https://cdn.jsdelivr.net/npm/", "crossorigin": "anonymous", }, } diff --git a/myst_nb/docutils_.py b/myst_nb/docutils_.py index 3ad59911..10366acc 100644 --- a/myst_nb/docutils_.py +++ b/myst_nb/docutils_.py @@ -1,6 +1,7 @@ """A parser for docutils.""" from contextlib import suppress from functools import partial +import json from typing import Any, Dict, List, Optional, Tuple from docutils import nodes @@ -38,10 +39,12 @@ standard_nb_read, ) from myst_nb.render import ( + WIDGET_STATE_MIMETYPE, MimeData, NbElementRenderer, create_figure_context, load_renderer, + sanitize_script_content, ) DOCUTILS_EXCLUDED_ARGS = { @@ -173,7 +176,9 @@ def _parse(self, inputstring: str, document: nodes.document) -> None: nb_renderer: NbElementRenderer = load_renderer(renderer_name)( mdit_parser.renderer, logger ) - mdit_parser.options["nb_renderer"] = nb_renderer + # we temporarily store nb_renderer on the document, + # so that roles/directives can access it + document.attributes["nb_renderer"] = nb_renderer # we currently do this early, so that the nb_renderer has access to things mdit_parser.renderer.setup_render(mdit_parser.options, mdit_env) @@ -182,9 +187,6 @@ def _parse(self, inputstring: str, document: nodes.document) -> None: notebook, logger, mdit_parser.renderer.get_cell_render_config ) mdit_parser.renderer.md_options["nb_resources"] = resources - # we temporarily store nb_renderer on the document, - # so that roles/directives can access it - document.attributes["nb_renderer"] = nb_renderer # parse to tokens mdit_tokens = notebook_to_tokens(notebook, mdit_parser, mdit_env, logger) @@ -196,8 +198,10 @@ def _parse(self, inputstring: str, document: nodes.document) -> None: path = ["processed.ipynb"] nb_renderer.write_file(path, content, overwrite=True) # TODO also write CSS to output folder if necessary or always? - # TODO we also need to load JS URLs if ipywidgets are present and HTML + # TODO we also need to load JS URLs from document["nb_js_files"], + # if HTML output is requested + # remove temporary state document.attributes.pop("nb_renderer") @@ -212,7 +216,7 @@ def nb_config(self) -> NbParserConfig: @property def nb_renderer(self) -> NbElementRenderer: """Get the notebook element renderer.""" - return self.md_options["nb_renderer"] + return self.document["nb_renderer"] def get_cell_render_config( self, @@ -258,9 +262,24 @@ def render_nb_metadata(self, token: SyntaxTreeNode) -> None: # TODO should we provide hook for NbElementRenderer? - # TODO how to handle ipywidgets in docutils? - ipywidgets = metadata.pop("widgets", None) # noqa: F841 - # ipywidgets_mime = (ipywidgets or {}).get(WIDGET_STATE_MIMETYPE, {}) + # store ipywidgets state in metadata, + # which will be later added to HTML page context + # The JSON inside the script tag is identified and parsed by: + # https://github.com/jupyter-widgets/ipywidgets/blob/32f59acbc63c3ff0acf6afa86399cb563d3a9a86/packages/html-manager/src/libembed.ts#L36 + # see also: https://ipywidgets.readthedocs.io/en/7.6.5/embedding.html + ipywidgets = metadata.pop("widgets", None) + ipywidgets_mime = (ipywidgets or {}).get(WIDGET_STATE_MIMETYPE, {}) + if ipywidgets_mime.get("state", None): + self.nb_renderer.add_js_file( + "ipywidgets_state", + None, + { + "type": "application/vnd.jupyter.widget-state+json", + "body": sanitize_script_content(json.dumps(ipywidgets_mime)), + }, + ) + for i, (path, kwargs) in enumerate(self.nb_config.ipywidgets_js.items()): + self.nb_renderer.add_js_file(f"ipywidgets_{i}", path, kwargs) # forward the rest to the front_matter renderer self.render_front_matter( diff --git a/myst_nb/render.py b/myst_nb/render.py index c5d8c2db..be77d785 100644 --- a/myst_nb/render.py +++ b/myst_nb/render.py @@ -1,4 +1,8 @@ -"""Module for rendering notebook components to docutils nodes.""" +"""Module for rendering notebook components to docutils nodes. + +Note, this module purposely does not import any Sphinx modules at the top-level, +in order for docutils-only use. +""" from binascii import a2b_base64 from contextlib import contextmanager from functools import lru_cache @@ -144,6 +148,13 @@ def write_file( else: return str(filepath) + def add_js_file(self, key: str, uri: Optional[str], kwargs: Dict[str, str]) -> None: + """Register a JavaScript file to include in the HTML output of this document.""" + if "nb_js_files" not in self.renderer.document: + self.renderer.document["nb_js_files"] = {} + # TODO handle duplicate keys (whether to override/ignore) + self.renderer.document["nb_js_files"][key] = (uri, kwargs) + def render_raw_cell( self, content: str, metadata: dict, cell_index: int, source_line: int ) -> List[nodes.Element]: diff --git a/myst_nb/sphinx_.py b/myst_nb/sphinx_.py index 2f739c3b..da242fe0 100644 --- a/myst_nb/sphinx_.py +++ b/myst_nb/sphinx_.py @@ -4,7 +4,7 @@ import json import os from pathlib import Path -from typing import Any, DefaultDict, Dict, List, Optional, Sequence, Set, cast +from typing import Any, DefaultDict, Dict, List, Optional, Sequence, Set, Tuple, cast from docutils import nodes from markdown_it.token import Token @@ -100,7 +100,7 @@ def sphinx_setup(app: Sphinx): app.connect("builder-inited", add_html_static_path) app.add_css_file("mystnb.css") # note, this event is only available in Sphinx >= 3.5 - app.connect("html-page-context", install_ipywidgets) + app.connect("html-page-context", add_js_files) # add configuration for hiding cell input/output # TODO replace this, or make it optional @@ -188,26 +188,13 @@ def add_html_static_path(app: Sphinx): app.config.html_static_path.append(str(static_path)) -def install_ipywidgets(app: Sphinx, pagename: str, *args: Any, **kwargs: Any) -> None: - """Install ipywidgets Javascript, if required on the page.""" +def add_js_files(app: Sphinx, pagename: str, *args: Any, **kwargs: Any) -> None: + """Add JS files for this page, identified from the parsing of the notebook.""" if app.builder.format != "html": return - ipywidgets_state = NbMetadataCollector.get_doc_data(app.env)[pagename].get( - "ipywidgets_state", None - ) - if ipywidgets_state is not None: - # see: https://ipywidgets.readthedocs.io/en/7.6.5/embedding.html - - for path, kwargs in app.env.config["nb_ipywidgets_js"].items(): - app.add_js_file(path, **kwargs) - - # The state of all the widget models on the page - # TODO how to add data-jupyter-widgets-cdn="https://cdn.jsdelivr.net/npm/"? - app.add_js_file( - None, - type="application/vnd.jupyter.widget-state+json", - body=ipywidgets_state, - ) + js_files = NbMetadataCollector.get_js_files(app.env, pagename) + for path, kwargs in js_files.values(): + app.add_js_file(path, **kwargs) def update_togglebutton_classes(app: Sphinx, config): @@ -303,7 +290,9 @@ def parse(self, inputstring: str, document: nodes.document) -> None: nb_renderer: NbElementRenderer = load_renderer(renderer_name)( mdit_parser.renderer, logger ) - mdit_parser.options["nb_renderer"] = nb_renderer + # we temporarily store nb_renderer on the document, + # so that roles/directives can access it + document.attributes["nb_renderer"] = nb_renderer # we currently do this early, so that the nb_renderer has access to things mdit_parser.renderer.setup_render(mdit_parser.options, mdit_env) @@ -312,9 +301,6 @@ def parse(self, inputstring: str, document: nodes.document) -> None: notebook, logger, mdit_parser.renderer.get_cell_render_config ) mdit_parser.renderer.md_options["nb_resources"] = resources - # we temporarily store nb_renderer on the document, - # so that roles/directives can access it - document.attributes["nb_renderer"] = nb_renderer # parse to tokens mdit_tokens = notebook_to_tokens(notebook, mdit_parser, mdit_env, logger) @@ -327,6 +313,15 @@ def parse(self, inputstring: str, document: nodes.document) -> None: path[-1] += ".ipynb" nb_renderer.write_file(path, content, overwrite=True) + # move some document metadata to environment metadata, + # so that we can later read it from the environment, + # rather than having to load the doctree + for key, (uri, kwargs) in document.attributes.pop("nb_js_files", {}).items(): + NbMetadataCollector.add_js_file( + self.env, self.env.docname, key, uri, kwargs + ) + + # remove temporary state document.attributes.pop("nb_renderer") @@ -341,7 +336,7 @@ def nb_config(self) -> NbParserConfig: @property def nb_renderer(self) -> NbElementRenderer: """Get the notebook element renderer.""" - return self.md_options["nb_renderer"] + return self.document["nb_renderer"] def get_cell_render_config( self, @@ -385,19 +380,25 @@ def render_nb_metadata(self, token: SyntaxTreeNode) -> None: env.metadata[env.docname][key] = metadata.pop(key, None) # TODO should we provide hook for NbElementRenderer? - # Also add method to NbElementRenderer, to store scripts to load # store ipywidgets state in metadata, # which will be later added to HTML page context # The JSON inside the script tag is identified and parsed by: # https://github.com/jupyter-widgets/ipywidgets/blob/32f59acbc63c3ff0acf6afa86399cb563d3a9a86/packages/html-manager/src/libembed.ts#L36 + # see also: https://ipywidgets.readthedocs.io/en/7.6.5/embedding.html ipywidgets = metadata.pop("widgets", None) ipywidgets_mime = (ipywidgets or {}).get(WIDGET_STATE_MIMETYPE, {}) if ipywidgets_mime.get("state", None): - string = sanitize_script_content(json.dumps(ipywidgets_mime)) - NbMetadataCollector.set_doc_data( - env, env.docname, "ipywidgets_state", string + self.nb_renderer.add_js_file( + "ipywidgets_state", + None, + { + "type": "application/vnd.jupyter.widget-state+json", + "body": sanitize_script_content(json.dumps(ipywidgets_mime)), + }, ) + for i, (path, kwargs) in enumerate(self.nb_config.ipywidgets_js.items()): + self.nb_renderer.add_js_file(f"ipywidgets_{i}", path, kwargs) # forward the rest to the front_matter renderer self.render_front_matter( @@ -730,6 +731,29 @@ def new_exec_data(env: BuildEnvironment) -> bool: """Return whether any notebooks have updated execution data.""" return getattr(env, "nb_new_exec_data", False) + @classmethod + def add_js_file( + cls, + env: BuildEnvironment, + docname: str, + key: str, + uri: Optional[str], + kwargs: Dict[str, str], + ): + """Register a JavaScript file to include in the HTML output.""" + if not hasattr(env, "nb_metadata"): + env.nb_metadata = defaultdict(dict) + js_files = env.nb_metadata.setdefault(docname, {}).setdefault("js_files", {}) + # TODO handle whether overrides are allowed + js_files[key] = (uri, kwargs) + + @classmethod + def get_js_files( + cls, env: BuildEnvironment, docname: str + ) -> Dict[str, Tuple[Optional[str], Dict[str, str]]]: + """Get myst-nb docname -> execution data.""" + return cls.get_doc_data(env)[docname].get("js_files", {}) + def clear_doc(self, app: Sphinx, env: BuildEnvironment, docname: str) -> None: if not hasattr(env, "nb_metadata"): env.nb_metadata = defaultdict(dict) diff --git a/tests/test_parser.py b/tests/test_parser.py index b8bf89d9..2f42281a 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -95,7 +95,12 @@ def test_ipywidgets(sphinx_run): sphinx_run.build() # print(sphinx_run.status()) assert sphinx_run.warnings() == "" - assert "ipywidgets_state" in sphinx_run.env.nb_metadata["ipywidgets"] + assert "js_files" in sphinx_run.env.nb_metadata["ipywidgets"] + assert set(sphinx_run.env.nb_metadata["ipywidgets"]["js_files"]) == { + "ipywidgets_state", + "ipywidgets_0", + "ipywidgets_1", + } head_scripts = sphinx_run.get_html().select("head > script") assert any("require.js" in script.get("src", "") for script in head_scripts) assert any("embed-amd.js" in script.get("src", "") for script in head_scripts) From e87cf4ce31f21a3eddfea362d0f377f02717825b Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Tue, 11 Jan 2022 20:33:39 +0100 Subject: [PATCH 57/75] Remove uneeded jupyter_sphinx CSS --- myst_nb/_static/mystnb.css | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/myst_nb/_static/mystnb.css b/myst_nb/_static/mystnb.css index 7374e231..adcbb351 100644 --- a/myst_nb/_static/mystnb.css +++ b/myst_nb/_static/mystnb.css @@ -36,18 +36,6 @@ div.cell_input > div, div.cell_output div.output > div.highlight { margin-top: 1em; } -/* TODO remove/replace? */ -/* Outputs from jupyter_sphinx overrides to remove extra CSS */ -div.section div.jupyter_container { - padding: .4em; - margin: 0 0 .4em 0; - background-color: none; - border: none; - -moz-box-shadow: none; - -webkit-box-shadow: none; - box-shadow: none; -} - /* Text outputs from cells */ .cell_output .output.text_plain, .cell_output .output.traceback, From 1dd2e64a1ee312bfcd36e77344f4e0dc9e1f27ac Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Tue, 11 Jan 2022 23:50:21 +0100 Subject: [PATCH 58/75] In sphinx, cache glue keys (on env)/outputs(on file) --- myst_nb/docutils_.py | 6 ++---- myst_nb/nb_glue/__init__.py | 20 ++++++++++++++++++-- myst_nb/sphinx_.py | 21 +++++++++++++++++---- tests/test_glue.py | 21 ++++++++------------- 4 files changed, 45 insertions(+), 23 deletions(-) diff --git a/myst_nb/docutils_.py b/myst_nb/docutils_.py index 10366acc..92970041 100644 --- a/myst_nb/docutils_.py +++ b/myst_nb/docutils_.py @@ -195,11 +195,9 @@ def _parse(self, inputstring: str, document: nodes.document) -> None: # write final (updated) notebook to output folder (utf8 is standard encoding) content = nbformat.writes(notebook).encode("utf-8") - path = ["processed.ipynb"] - nb_renderer.write_file(path, content, overwrite=True) + nb_renderer.write_file(["processed.ipynb"], content, overwrite=True) + # TODO also write CSS to output folder if necessary or always? - # TODO we also need to load JS URLs from document["nb_js_files"], - # if HTML output is requested # remove temporary state document.attributes.pop("nb_renderer") diff --git a/myst_nb/nb_glue/__init__.py b/myst_nb/nb_glue/__init__.py index e15eda30..796038ea 100644 --- a/myst_nb/nb_glue/__init__.py +++ b/myst_nb/nb_glue/__init__.py @@ -6,7 +6,7 @@ import IPython from IPython.display import display as ipy_display -from nbformat import NotebookNode +from nbformat import NotebookNode, v4 GLUE_PREFIX = "application/papermill.record/" @@ -42,7 +42,8 @@ def extract_glue_data( logger: Logger, ) -> None: """Extract all the glue data from the notebook, into the resources dictionary.""" - data = resources.setdefault("glue", {}) + # note this assumes v4 notebook format + data: Dict[str, NotebookNode] = resources.setdefault("glue", {}) for index, cell in enumerate(notebook.cells): if cell.cell_type != "code": continue @@ -66,3 +67,18 @@ def extract_glue_data( # assume that the output is a displayable object outputs.append(output) cell.outputs = outputs + + +def glue_dict_to_nb(data: Dict[str, NotebookNode]) -> NotebookNode: + """Convert glue data to a notebook that can be written to disk by nbformat. + + The notebook contains a single code cell that contains the glue outputs, + and the key for each output in a list at ``cell["metadata"]["glue"]``. + + This can be read in any post-processing step, where the glue outputs are + required. + """ + # note this assumes v4 notebook format + code_cell = v4.new_code_cell(outputs=list(data.values())) + code_cell.metadata["glue"] = list(data.keys()) + return v4.new_notebook(cells=[code_cell]) diff --git a/myst_nb/sphinx_.py b/myst_nb/sphinx_.py index da242fe0..08f415cc 100644 --- a/myst_nb/sphinx_.py +++ b/myst_nb/sphinx_.py @@ -28,6 +28,7 @@ from myst_nb.configuration import NbParserConfig from myst_nb.execute import ExecutionResult, execute_notebook from myst_nb.loggers import DEFAULT_LOG_TYPE, SphinxDocLogger +from myst_nb.nb_glue import glue_dict_to_nb from myst_nb.nb_glue.domain import NbGlueDomain from myst_nb.parse import nb_node_to_dict, notebook_to_tokens from myst_nb.preprocess import preprocess_notebook @@ -308,14 +309,26 @@ def parse(self, inputstring: str, document: nodes.document) -> None: mdit_parser.renderer.render(mdit_tokens, mdit_parser.options, mdit_env) # write final (updated) notebook to output folder (utf8 is standard encoding) - content = nbformat.writes(notebook).encode("utf-8") path = self.env.docname.split("/") - path[-1] += ".ipynb" - nb_renderer.write_file(path, content, overwrite=True) + ipynb_path = path[:-1] + [path[-1] + ".ipynb"] + content = nbformat.writes(notebook).encode("utf-8") + nb_renderer.write_file(ipynb_path, content, overwrite=True) + + # write glue data to the output folder, + # and store the keys to environment doc metadata, + # so that they may be used in any post-transform steps + if resources.get("glue", None): + glue_notebook = glue_dict_to_nb(resources["glue"]) + content = nbformat.writes(glue_notebook).encode("utf-8") + glue_path = path[:-1] + [path[-1] + ".__glue__.ipynb"] + nb_renderer.write_file(glue_path, content, overwrite=True) + NbMetadataCollector.set_doc_data( + self.env, self.env.docname, "glue", list(resources["glue"].keys()) + ) # move some document metadata to environment metadata, # so that we can later read it from the environment, - # rather than having to load the doctree + # rather than having to load the whole doctree for key, (uri, kwargs) in document.attributes.pop("nb_js_files", {}).items(): NbMetadataCollector.add_js_file( self.env, self.env.docname, key, uri, kwargs diff --git a/tests/test_glue.py b/tests/test_glue.py index 7b434ffa..b2fbc756 100644 --- a/tests/test_glue.py +++ b/tests/test_glue.py @@ -101,22 +101,17 @@ def test_parser(sphinx_run, clean_doctree, file_regression): # print(sphinx_run.status()) # print(sphinx_run.warnings()) assert sphinx_run.warnings() == "" + assert sphinx_run.env.nb_metadata["with_glue"]["glue"] == [ + "key_text1", + "key_float", + "key_undisplayed", + "key_df", + "key_plt", + "sym_eq", + ] doctree = clean_doctree(sphinx_run.get_resolved_doctree("with_glue")) file_regression.check( doctree.pformat(), extension=f"{sphinx_run.software_versions}.xml", encoding="utf8", ) - # from myst_nb.nb_glue.domain import NbGlueDomain - # glue_domain = NbGlueDomain.from_env(sphinx_run.app.env) - # assert set(glue_domain.cache) == { - # "key_text1", - # "key_float", - # "key_undisplayed", - # "key_df", - # "key_plt", - # "sym_eq", - # } - # glue_domain.clear_doc("with_glue") - # assert glue_domain.cache == {} - # assert glue_domain.docmap == {} From fff43fac18d27551063287bfd442243a9b711c80 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Wed, 12 Jan 2022 00:01:34 +0100 Subject: [PATCH 59/75] Update docs/use/formatting_outputs.md Co-authored-by: Chris Holdgraf <choldgraf@berkeley.edu> --- docs/use/formatting_outputs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/use/formatting_outputs.md b/docs/use/formatting_outputs.md index 44e92da9..49a7fe45 100644 --- a/docs/use/formatting_outputs.md +++ b/docs/use/formatting_outputs.md @@ -202,7 +202,7 @@ The format of output `text/markdown` can be specified by `render_markdown_format - `commonmark` (default): Restricted to the [CommonMark specification](https://commonmark.org/). - `gfm`: Restricted to the [GitHub-flavored markdown](https://github.github.com/gfm/). - Note, this requires the installation of the [linkify-it-py package](https://pypi.org/project/linkify-it-py) -- `myst`: The MyST parser configuration for the the current document. +- `myst`: Uses [the MyST parser](https://myst-parser.readthedocs.io/en/latest/) with the same configuration as the current document. CommonMark formatting will output basic Markdown syntax: From 64c6d2bb0aab005d618c9ea149a089aa6f26d867 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Wed, 12 Jan 2022 00:03:24 +0100 Subject: [PATCH 60/75] remove unnecessary header in glue.md --- docs/use/glue.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/use/glue.md b/docs/use/glue.md index aa3a9e69..825b7fd2 100644 --- a/docs/use/glue.md +++ b/docs/use/glue.md @@ -368,8 +368,6 @@ Here is some {glue:md}`inline_md:myst`! Here are a few more specific and advanced uses of the `glue` submodule. -### Pasting - ### Pasting into tables In addition to pasting blocks of outputs, or in-line with text, you can also paste directly From a7a7b326adf49d0891c599aaef71ebc7700b325b Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Wed, 12 Jan 2022 03:38:29 +0100 Subject: [PATCH 61/75] Improve docutils output of CSS --- MANIFEST.in | 2 +- myst_nb/configuration.py | 35 ++++++--- myst_nb/docutils_.py | 101 ++++++++++++++----------- myst_nb/render.py | 27 +++++++ myst_nb/sphinx_.py | 69 +++++++---------- myst_nb/static/__init__.py | 0 myst_nb/{_static => static}/mystnb.css | 0 tests/test_docutils.py | 24 +++++- tests/test_text_based.py | 1 - 9 files changed, 160 insertions(+), 99 deletions(-) create mode 100644 myst_nb/static/__init__.py rename myst_nb/{_static => static}/mystnb.css (100%) diff --git a/MANIFEST.in b/MANIFEST.in index d022067b..53f8647d 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -11,4 +11,4 @@ exclude codecov.yml include LICENSE include CHANGELOG.md include README.md -include myst_nb/_static/mystnb.css +include myst_nb/static/mystnb.css diff --git a/myst_nb/configuration.py b/myst_nb/configuration.py index a7507879..30796d19 100644 --- a/myst_nb/configuration.py +++ b/myst_nb/configuration.py @@ -181,7 +181,6 @@ class NbParserConfig: # configuration override keys (applied after file read) - # TODO mark which config are allowed per notebook/cell # TODO previously we had `nb_render_key` (default: "render"), # for cell.metadata.render.image and cell.metadata.render.figure`, # and also `timeout`/`allow_errors` in notebook.metadata.execution @@ -279,14 +278,6 @@ class NbParserConfig: # render options - output_folder: str = attr.ib( - default="build", - validator=instance_of(str), - metadata={ - "help": "Folder for external outputs (like images), skipped if empty", - "sphinx_exclude": True, # in sphinx we always output to the build folder - }, - ) render_plugin: str = attr.ib( default="default", validator=instance_of(str), # TODO check it can be loaded? @@ -418,6 +409,32 @@ class NbParserConfig: repr=False, ) + # write options for docutils + output_folder: str = attr.ib( + default="build", + validator=instance_of(str), + metadata={ + "help": "Folder for external outputs (like images), skipped if empty", + "sphinx_exclude": True, # in sphinx we always output to the build folder + }, + ) + append_css: bool = attr.ib( + default=True, + validator=instance_of(bool), + metadata={ + "help": "Add default MyST-NB CSS to HTML outputs", + "sphinx_exclude": True, + }, + ) + metadata_to_fm: bool = attr.ib( + default=False, + validator=instance_of(bool), + metadata={ + "help": "Convert unhandled metadata to frontmatter", + "sphinx_exclude": True, + }, + ) + @classmethod def get_fields(cls) -> Tuple[attr.Attribute, ...]: return attr.fields(cls) diff --git a/myst_nb/docutils_.py b/myst_nb/docutils_.py index 92970041..19117958 100644 --- a/myst_nb/docutils_.py +++ b/myst_nb/docutils_.py @@ -1,7 +1,8 @@ """A parser for docutils.""" from contextlib import suppress from functools import partial -import json +from importlib import resources as import_resources +import os from typing import Any, Dict, List, Optional, Tuple from docutils import nodes @@ -17,7 +18,9 @@ from myst_parser.main import MdParserConfig, create_md_parser import nbformat from nbformat import NotebookNode +from pygments.formatters import get_formatter_by_name +from myst_nb import static from myst_nb.configuration import NbParserConfig from myst_nb.execute import execute_notebook from myst_nb.loggers import DEFAULT_LOG_TYPE, DocutilsDocLogger @@ -39,12 +42,10 @@ standard_nb_read, ) from myst_nb.render import ( - WIDGET_STATE_MIMETYPE, MimeData, NbElementRenderer, create_figure_context, load_renderer, - sanitize_script_content, ) DOCUTILS_EXCLUDED_ARGS = { @@ -193,11 +194,38 @@ def _parse(self, inputstring: str, document: nodes.document) -> None: # convert to docutils AST, which is added to the document mdit_parser.renderer.render(mdit_tokens, mdit_parser.options, mdit_env) - # write final (updated) notebook to output folder (utf8 is standard encoding) - content = nbformat.writes(notebook).encode("utf-8") - nb_renderer.write_file(["processed.ipynb"], content, overwrite=True) + if nb_config.output_folder: + # write final (updated) notebook to output folder (utf8 is standard encoding) + content = nbformat.writes(notebook).encode("utf-8") + nb_renderer.write_file(["processed.ipynb"], content, overwrite=True) - # TODO also write CSS to output folder if necessary or always? + # if we are using an HTML writer, dynamically add the CSS to the output + if nb_config.append_css and hasattr(document.settings, "stylesheet"): + css_paths = [] + + css_paths.append( + nb_renderer.write_file( + ["mystnb.css"], + import_resources.read_binary(static, "mystnb.css"), + overwrite=True, + ) + ) + fmt = get_formatter_by_name("html", style="default") + css_paths.append( + nb_renderer.write_file( + ["pygments.css"], + fmt.get_style_defs(".code").encode("utf-8"), + overwrite=True, + ) + ) + css_paths = [os.path.abspath(path) for path in css_paths] + # stylesheet and stylesheet_path are mutually exclusive + if document.settings.stylesheet_path: + document.settings.stylesheet_path.extend(css_paths) + if document.settings.stylesheet: + document.settings.stylesheet.extend(css_paths) + + # TODO also handle JavaScript # remove temporary state document.attributes.pop("nb_renderer") @@ -252,43 +280,26 @@ def get_cell_render_config( def render_nb_metadata(self, token: SyntaxTreeNode) -> None: """Render the notebook metadata.""" metadata = dict(token.meta) - - # save these special keys on the document, rather than as docinfo - for key in ("kernelspec", "language_info", "source_map"): + special_keys = ("kernelspec", "language_info", "source_map") + for key in special_keys: + # save these special keys on the document, rather than as docinfo if key in metadata: - self.document[f"nb_{key}"] = metadata.pop(key) - - # TODO should we provide hook for NbElementRenderer? - - # store ipywidgets state in metadata, - # which will be later added to HTML page context - # The JSON inside the script tag is identified and parsed by: - # https://github.com/jupyter-widgets/ipywidgets/blob/32f59acbc63c3ff0acf6afa86399cb563d3a9a86/packages/html-manager/src/libembed.ts#L36 - # see also: https://ipywidgets.readthedocs.io/en/7.6.5/embedding.html - ipywidgets = metadata.pop("widgets", None) - ipywidgets_mime = (ipywidgets or {}).get(WIDGET_STATE_MIMETYPE, {}) - if ipywidgets_mime.get("state", None): - self.nb_renderer.add_js_file( - "ipywidgets_state", - None, - { - "type": "application/vnd.jupyter.widget-state+json", - "body": sanitize_script_content(json.dumps(ipywidgets_mime)), - }, + self.document[f"nb_{key}"] = metadata.get(key) + + metadata = self.nb_renderer.render_nb_metadata(dict(token.meta)) + + if self.nb_config.metadata_to_fm: + # forward the remaining metadata to the front_matter renderer + top_matter = {k: v for k, v in metadata.items() if k not in special_keys} + self.render_front_matter( + Token( + "front_matter", + "", + 0, + map=[0, 0], + content=top_matter, # type: ignore[arg-type] + ), ) - for i, (path, kwargs) in enumerate(self.nb_config.ipywidgets_js.items()): - self.nb_renderer.add_js_file(f"ipywidgets_{i}", path, kwargs) - - # forward the rest to the front_matter renderer - self.render_front_matter( - Token( - "front_matter", - "", - 0, - map=[0, 0], - content=metadata, # type: ignore[arg-type] - ), - ) def render_nb_widget_state(self, token: SyntaxTreeNode) -> None: """Render the HTML defining the ipywidget state.""" @@ -484,14 +495,16 @@ def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: def _run_cli(writer_name: str, writer_description: str, argv: Optional[List[str]]): """Run the command line interface for a particular writer.""" - # TODO note to run this with --report="info", to see notebook execution publish_cmdline( parser=Parser(), writer_name=writer_name, description=( f"Generates {writer_description} from standalone MyST Notebook sources.\n" - f"{default_description}" + f"{default_description}\n" + "External outputs are written to `--nb-output-folder`.\n" ), + # to see notebook execution info by default + settings_overrides={"report_level": 1}, argv=argv, ) diff --git a/myst_nb/render.py b/myst_nb/render.py index be77d785..ee9dc434 100644 --- a/myst_nb/render.py +++ b/myst_nb/render.py @@ -155,6 +155,33 @@ def add_js_file(self, key: str, uri: Optional[str], kwargs: Dict[str, str]) -> N # TODO handle duplicate keys (whether to override/ignore) self.renderer.document["nb_js_files"][key] = (uri, kwargs) + def render_nb_metadata(self, metadata: dict) -> dict: + """Render the notebook metadata. + + :returns: unhandled metadata + """ + # add ipywidgets state JavaScript, + # The JSON inside the script tag is identified and parsed by: + # https://github.com/jupyter-widgets/ipywidgets/blob/32f59acbc63c3ff0acf6afa86399cb563d3a9a86/packages/html-manager/src/libembed.ts#L36 + # see also: https://ipywidgets.readthedocs.io/en/7.6.5/embedding.html + ipywidgets = metadata.pop("widgets", None) + ipywidgets_mime = (ipywidgets or {}).get(WIDGET_STATE_MIMETYPE, {}) + if ipywidgets_mime.get("state", None): + self.add_js_file( + "ipywidgets_state", + None, + { + "type": "application/vnd.jupyter.widget-state+json", + "body": sanitize_script_content(json.dumps(ipywidgets_mime)), + }, + ) + for i, (path, kwargs) in enumerate( + self.renderer.nb_config.ipywidgets_js.items() + ): + self.add_js_file(f"ipywidgets_{i}", path, kwargs) + + return metadata + def render_raw_cell( self, content: str, metadata: dict, cell_index: int, source_line: int ) -> List[nodes.Element]: diff --git a/myst_nb/sphinx_.py b/myst_nb/sphinx_.py index 08f415cc..551c5d95 100644 --- a/myst_nb/sphinx_.py +++ b/myst_nb/sphinx_.py @@ -1,7 +1,7 @@ """An extension for sphinx""" from collections import defaultdict from contextlib import suppress -import json +from importlib import resources as import_resources import os from pathlib import Path from typing import Any, DefaultDict, Dict, List, Optional, Sequence, Set, Tuple, cast @@ -23,8 +23,9 @@ from sphinx.transforms.post_transforms import SphinxPostTransform from sphinx.util import logging as sphinx_logging from sphinx.util.docutils import ReferenceRole +from sphinx.util.fileutil import copy_asset_file -from myst_nb import __version__ +from myst_nb import __version__, static from myst_nb.configuration import NbParserConfig from myst_nb.execute import ExecutionResult, execute_notebook from myst_nb.loggers import DEFAULT_LOG_TYPE, SphinxDocLogger @@ -34,12 +35,10 @@ from myst_nb.preprocess import preprocess_notebook from myst_nb.read import UnexpectedCellDirective, create_nb_reader from myst_nb.render import ( - WIDGET_STATE_MIMETYPE, MimeData, NbElementRenderer, create_figure_context, load_renderer, - sanitize_script_content, ) SPHINX_LOGGER = sphinx_logging.getLogger(__name__) @@ -98,10 +97,10 @@ def sphinx_setup(app: Sphinx): app.add_post_transform(SelectMimeType) # add HTML resources - app.connect("builder-inited", add_html_static_path) app.add_css_file("mystnb.css") + app.connect("build-finished", add_global_html_resources) # note, this event is only available in Sphinx >= 3.5 - app.connect("html-page-context", add_js_files) + app.connect("html-page-context", add_per_page_html_resources) # add configuration for hiding cell input/output # TODO replace this, or make it optional @@ -182,14 +181,18 @@ def add_exclude_patterns(app: Sphinx, config): config.exclude_patterns.append("**.ipynb_checkpoints") -def add_html_static_path(app: Sphinx): - """Add static path for HTML resources.""" - # TODO better to use importlib_resources here, or perhaps now there is another way? - static_path = Path(__file__).absolute().with_name("_static") - app.config.html_static_path.append(str(static_path)) +def add_global_html_resources(app: Sphinx, exception): + """Add HTML resources that apply to all pages.""" + # see https://github.com/sphinx-doc/sphinx/issues/1379 + if app.builder.format == "html" and not exception: + with import_resources.path(static, "mystnb.css") as source_path: + destination = os.path.join(app.builder.outdir, "_static", "mystnb.css") + copy_asset_file(str(source_path), destination) -def add_js_files(app: Sphinx, pagename: str, *args: Any, **kwargs: Any) -> None: +def add_per_page_html_resources( + app: Sphinx, pagename: str, *args: Any, **kwargs: Any +) -> None: """Add JS files for this page, identified from the parsing of the notebook.""" if app.builder.format != "html": return @@ -381,46 +384,26 @@ def get_cell_render_config( def render_nb_metadata(self, token: SyntaxTreeNode) -> None: """Render the notebook metadata.""" - metadata = dict(token.meta) env = cast(BuildEnvironment, self.sphinx_env) + metadata = dict(token.meta) + special_keys = ("kernelspec", "language_info", "source_map") + for key in special_keys: + if key in metadata: + # save these special keys on the metadata, rather than as docinfo + # note, sphinx_book_theme checks kernelspec is in the metadata + env.metadata[env.docname][key] = metadata.get(key) - # save these special keys on the metadata, rather than as docinfo - for key in ("kernelspec", "language_info"): - # TODO sphinx_book_theme checks kernelspec in `_is_notebook` check - # NbMetadataCollector.set_doc_data( - # env, env.docname, key, metadata.pop(key, None) - # ) - env.metadata[env.docname][key] = metadata.pop(key, None) - - # TODO should we provide hook for NbElementRenderer? - - # store ipywidgets state in metadata, - # which will be later added to HTML page context - # The JSON inside the script tag is identified and parsed by: - # https://github.com/jupyter-widgets/ipywidgets/blob/32f59acbc63c3ff0acf6afa86399cb563d3a9a86/packages/html-manager/src/libembed.ts#L36 - # see also: https://ipywidgets.readthedocs.io/en/7.6.5/embedding.html - ipywidgets = metadata.pop("widgets", None) - ipywidgets_mime = (ipywidgets or {}).get(WIDGET_STATE_MIMETYPE, {}) - if ipywidgets_mime.get("state", None): - self.nb_renderer.add_js_file( - "ipywidgets_state", - None, - { - "type": "application/vnd.jupyter.widget-state+json", - "body": sanitize_script_content(json.dumps(ipywidgets_mime)), - }, - ) - for i, (path, kwargs) in enumerate(self.nb_config.ipywidgets_js.items()): - self.nb_renderer.add_js_file(f"ipywidgets_{i}", path, kwargs) + metadata = self.nb_renderer.render_nb_metadata(metadata) - # forward the rest to the front_matter renderer + # forward the remaining metadata to the front_matter renderer + top_matter = {k: v for k, v in metadata.items() if k not in special_keys} self.render_front_matter( Token( "front_matter", "", 0, map=[0, 0], - content=metadata, # type: ignore[arg-type] + content=top_matter, # type: ignore[arg-type] ), ) diff --git a/myst_nb/static/__init__.py b/myst_nb/static/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/myst_nb/_static/mystnb.css b/myst_nb/static/mystnb.css similarity index 100% rename from myst_nb/_static/mystnb.css rename to myst_nb/static/mystnb.css diff --git a/tests/test_docutils.py b/tests/test_docutils.py index c302c53a..4941d069 100644 --- a/tests/test_docutils.py +++ b/tests/test_docutils.py @@ -3,7 +3,7 @@ import json from pathlib import Path -from docutils.core import publish_doctree +from docutils.core import publish_doctree, publish_string import pytest import yaml @@ -50,3 +50,25 @@ def test_reporting(file_params): }, ) file_params.assert_expected(report_stream.getvalue(), rstrip=True) + + +def test_html_resources(tmp_path): + """Test HTML resources are correctly output.""" + report_stream = StringIO() + result = publish_string( + json.dumps({"cells": [], "metadata": {}, "nbformat": 4, "nbformat_minor": 4}), + parser=Parser(), + writer_name="html", + settings_overrides={ + "nb_execution_mode": "off", + "nb_output_folder": str(tmp_path), + "warning_stream": report_stream, + "output_encoding": "unicode", + "embed_stylesheet": False, + }, + ) + assert report_stream.getvalue().rstrip() == "" + assert "mystnb.css" in result + assert "pygments.css" in result + assert tmp_path.joinpath("mystnb.css").is_file() + assert tmp_path.joinpath("pygments.css").is_file() diff --git a/tests/test_text_based.py b/tests/test_text_based.py index 28cad54f..adab19b5 100644 --- a/tests/test_text_based.py +++ b/tests/test_text_based.py @@ -47,7 +47,6 @@ def test_basic_run_exec_off(sphinx_run, file_regression, check_nbs): "source_map", "wordcount", "kernelspec", - "language_info", } assert set(sphinx_run.env.nb_metadata["basic_unrun"].keys()) == set() assert sphinx_run.env.metadata["basic_unrun"]["author"] == "Chris" From d71d0cf4fe8674933fb483b56a8a6b820920cbe0 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Wed, 12 Jan 2022 04:22:30 +0100 Subject: [PATCH 62/75] remove no longer used method --- myst_nb/docutils_.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/myst_nb/docutils_.py b/myst_nb/docutils_.py index 19117958..2e171f12 100644 --- a/myst_nb/docutils_.py +++ b/myst_nb/docutils_.py @@ -301,18 +301,6 @@ def render_nb_metadata(self, token: SyntaxTreeNode) -> None: ), ) - def render_nb_widget_state(self, token: SyntaxTreeNode) -> None: - """Render the HTML defining the ipywidget state.""" - # TODO handle this more generally, - # by just passing all notebook metadata to the nb_renderer - node = self.nb_renderer.render_widget_state( - mime_type=token.attrGet("type"), data=token.meta - ) - node["nb_element"] = "widget_state" - self.add_line_and_source_path(node, token) - # always append to bottom of the document - self.document.append(node) - def render_nb_cell_markdown(self, token: SyntaxTreeNode) -> None: """Render a notebook markdown cell.""" # TODO this is currently just a "pass-through", but we could utilise the metadata From cff8ebcb3796ea2a4993e7a28c59b93cb34ccd7c Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Wed, 12 Jan 2022 04:46:18 +0100 Subject: [PATCH 63/75] rename render_unknown -> render_unhandled --- myst_nb/render.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/myst_nb/render.py b/myst_nb/render.py index ee9dc434..e0e705ab 100644 --- a/myst_nb/render.py +++ b/myst_nb/render.py @@ -324,9 +324,9 @@ def render_mime_type(self, data: MimeData) -> List[nodes.Element]: if data.mime_type == "text/markdown": return self.render_markdown(data) - return self.render_unknown(data) + return self.render_unhandled(data) - def render_unknown(self, data: MimeData) -> List[nodes.Element]: + def render_unhandled(self, data: MimeData) -> List[nodes.Element]: """Render a notebook output of unknown mime type.""" self.logger.warning( f"skipping unknown output mime type: {data.mime_type}", @@ -471,9 +471,9 @@ def render_mime_type_inline(self, data: MimeData) -> List[nodes.Element]: if data.mime_type == "text/markdown": return self.render_markdown_inline(data) - return self.render_unknown_inline(data) + return self.render_unhandled_inline(data) - def render_unknown_inline(self, data: MimeData) -> List[nodes.Element]: + def render_unhandled_inline(self, data: MimeData) -> List[nodes.Element]: """Render a notebook output of unknown mime type.""" self.logger.warning( f"skipping unknown output mime type: {data.mime_type}", From c20db70c08dbded2f77ebec281efcad5c74de316 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Wed, 12 Jan 2022 05:28:01 +0100 Subject: [PATCH 64/75] windows fix --- tests/test_execute.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/test_execute.py b/tests/test_execute.py index 46948a68..d9de8db0 100644 --- a/tests/test_execute.py +++ b/tests/test_execute.py @@ -1,4 +1,7 @@ """Test sphinx builds which execute notebooks.""" +import os +from pathlib import Path + import pytest from myst_nb.sphinx_ import NbMetadataCollector @@ -17,6 +20,10 @@ def regress_nb_doc(file_regression, sphinx_run, check_nbs): "438c56ea3dcf99d86cd64df1b23e2b436afb25846434efb1cfec7b660ef01127", "e2dfbe330154316cfb6f3186e8f57fc4df8aee03b0303ed1345fc22cd51f66de", ) + if os.name == "nt": # on Windows image file paths are absolute + doctree_string = doctree_string.replace( + Path(sphinx_run.app.srcdir).as_posix() + "/", "" + ) file_regression.check(doctree_string, extension=".xml", encoding="utf8") From 51bb988442e2541953d3e9dbb5e9ce3e269d9518 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Thu, 13 Jan 2022 11:08:35 +0100 Subject: [PATCH 65/75] minor updates --- myst_nb/execute.py | 4 ++-- myst_nb/read.py | 15 ++++++++------- tests/test_parser.py | 12 +++++++++--- 3 files changed, 19 insertions(+), 12 deletions(-) diff --git a/myst_nb/execute.py b/myst_nb/execute.py index a3f454b0..b7dd89b4 100644 --- a/myst_nb/execute.py +++ b/myst_nb/execute.py @@ -137,7 +137,7 @@ def execute_notebook( # use the cached notebook if it exists if cache_record is not None: - logger.info(f"Using cached notebook: PK={cache_record.pk}") + logger.info(f"Using cached notebook: ID={cache_record.pk}") _, notebook = cache.merge_match_into_notebook(notebook) exec_metadata = { "mtime": cache_record.created.timestamp(), @@ -195,7 +195,7 @@ def execute_notebook( check_validity=False, overwrite=True, ) - logger.info(f"Cached executed notebook: PK={cache_record.pk}") + logger.info(f"Cached executed notebook: ID={cache_record.pk}") exec_metadata = { "mtime": datetime.now().timestamp(), diff --git a/myst_nb/read.py b/myst_nb/read.py index 30b38869..3fa135af 100644 --- a/myst_nb/read.py +++ b/myst_nb/read.py @@ -147,7 +147,7 @@ def is_myst_markdown_notebook(text: Union[str, Iterator[str]]) -> bool: def read_myst_markdown_notebook( text, - config: MdParserConfig, + config: MdParserConfig = None, code_directive="{code-cell}", raw_directive="{raw-cell}", add_source_map=False, @@ -162,11 +162,12 @@ def read_myst_markdown_notebook( which is a list of the starting source line number for each cell. :param path: path to notebook (required for :load:) - :raises _MystMetadataParsingError if the metadata block is not valid JSON/YAML + :raises MystMetadataParsingError if the metadata block is not valid JSON/YAML NOTE: we assume here that all of these directives are at the top-level, i.e. not nested in other directives. """ + config = config or MdParserConfig() # parse markdown file up to the block level (i.e. don't worry about inline text) inline_config = attr.evolve( config, disable_syntax=(config.disable_syntax + ["inline"]) @@ -184,7 +185,7 @@ def read_myst_markdown_notebook( try: metadata_nb = yaml.safe_load(metadata.content) except (yaml.parser.ParserError, yaml.scanner.ScannerError) as error: - raise _MystMetadataParsingError("Notebook metadata: {}".format(error)) + raise MystMetadataParsingError("Notebook metadata: {}".format(error)) # create an empty notebook nbf_version = nbf.v4 @@ -254,7 +255,7 @@ def _flush_markdown(start_line, token, md_metadata): return notebook -class _MystMetadataParsingError(Exception): +class MystMetadataParsingError(Exception): """Error when parsing metadata from myst formatted text""" @@ -287,7 +288,7 @@ def _read_fenced_cell(token, cell_index, cell_type): validate_options=False, ) except DirectiveParsingError as err: - raise _MystMetadataParsingError( + raise MystMetadataParsingError( "{0} cell {1} at line {2} could not be read: {3}".format( cell_type, cell_index, token.map[0] + 1, err ) @@ -301,13 +302,13 @@ def _read_cell_metadata(token, cell_index): try: metadata = json.loads(token.content.strip()) except Exception as err: - raise _MystMetadataParsingError( + raise MystMetadataParsingError( "Markdown cell {0} at line {1} could not be read: {2}".format( cell_index, token.map[0] + 1, err ) ) if not isinstance(metadata, dict): - raise _MystMetadataParsingError( + raise MystMetadataParsingError( "Markdown cell {0} at line {1} is not a dict".format( cell_index, token.map[0] + 1 ) diff --git a/tests/test_parser.py b/tests/test_parser.py index 2f42281a..aec866ca 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -1,4 +1,7 @@ """Test parsing of already executed notebooks.""" +import os +from pathlib import Path + import pytest @@ -56,9 +59,12 @@ def test_complex_outputs(sphinx_run, file_regression): "language": "python", "name": "python3", } - file_regression.check( - sphinx_run.get_doctree().pformat(), extension=".xml", encoding="utf8" - ) + doctree_string = sphinx_run.get_doctree().pformat() + if os.name == "nt": # on Windows image file paths are absolute + doctree_string = doctree_string.replace( + Path(sphinx_run.app.srcdir).as_posix() + "/", "" + ) + file_regression.check(doctree_string, extension=".xml", encoding="utf8") filenames = { p.replace(".jpeg", ".jpg") From ac493b05c5f22dfa1a35e6f131fc931117c734a0 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Thu, 13 Jan 2022 13:07:21 +0100 Subject: [PATCH 66/75] pin ipython to <8 --- setup.cfg | 2 ++ 1 file changed, 2 insertions(+) diff --git a/setup.cfg b/setup.cfg index 612eab8c..997ee68b 100644 --- a/setup.cfg +++ b/setup.cfg @@ -78,6 +78,8 @@ rtd = bokeh coconut~=1.4.3 ipykernel~=5.5 + # ipython v8 is only available for Python 3.8+, and it changes exception text + ipython<8 ipywidgets jupytext~=1.11.2 matplotlib From 5cc68ef2b6280f12f2b2ccfcf1619e6021b3fb12 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Thu, 13 Jan 2022 13:08:15 +0100 Subject: [PATCH 67/75] pin ipython to <8 for testing --- setup.cfg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.cfg b/setup.cfg index 997ee68b..213ddee9 100644 --- a/setup.cfg +++ b/setup.cfg @@ -78,8 +78,6 @@ rtd = bokeh coconut~=1.4.3 ipykernel~=5.5 - # ipython v8 is only available for Python 3.8+, and it changes exception text - ipython<8 ipywidgets jupytext~=1.11.2 matplotlib @@ -95,6 +93,8 @@ testing = coverage<5.0 beautifulsoup4 ipykernel~=5.5 + # ipython v8 is only available for Python 3.8+, and it changes exception text + ipython<8 ipywidgets jupytext~=1.11.2 # TODO: 3.4.0 has some warnings that need to be fixed in the tests. From 15fc96c875c52b4fcdfa6cb2671fbebd2ed8d5d4 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Thu, 13 Jan 2022 15:47:37 +0100 Subject: [PATCH 68/75] PoP inline variables --- docs/conf.py | 1 + docs/use/index.md | 1 + docs/use/inline_execution.md | 80 +++++++++++ myst_nb/configuration.py | 3 +- myst_nb/docutils_.py | 90 +++++++++--- myst_nb/execute.py | 231 +++++++++++++++++++++++++++++- myst_nb/{parse.py => md_parse.py} | 26 ++-- myst_nb/nb_glue/elements.py | 77 ++++++++++ myst_nb/sphinx_.py | 97 +++++++++---- tests/nb_fixtures/basic.txt | 87 +++++++---- tests/test_docutils.py | 5 +- 11 files changed, 606 insertions(+), 92 deletions(-) create mode 100644 docs/use/inline_execution.md rename myst_nb/{parse.py => md_parse.py} (88%) diff --git a/docs/conf.py b/docs/conf.py index b33b29ba..24ca72bf 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -78,6 +78,7 @@ "force", "auto", "cache", + "inline", "commonmark", "gfm", "myst", diff --git a/docs/use/index.md b/docs/use/index.md index a85d5e29..b526a042 100644 --- a/docs/use/index.md +++ b/docs/use/index.md @@ -9,6 +9,7 @@ They cover how to use Jupyter Notebooks with MyST markdown, as well as start myst execute +inline_execution hiding formatting_outputs glue diff --git a/docs/use/inline_execution.md b/docs/use/inline_execution.md new file mode 100644 index 00000000..f5cbedfe --- /dev/null +++ b/docs/use/inline_execution.md @@ -0,0 +1,80 @@ +--- +jupytext: + text_representation: + extension: .md + format_name: myst + format_version: '0.8' + jupytext_version: 1.4.1+dev +kernelspec: + display_name: Python 3 + language: python + name: python3 +mystnb: + execution_mode: inline +--- + +# Inline execution mode and Markdown variables + +This is a Proof of Concept notebook for inline variables. + +This notebook is executed using top-matter: + +```md + +--- +mystnb: + execution_mode: inline +--- +``` + +which turns on the experimental inline execution mode. + +Inline execution starts the Jupyter kernel, then executes code cells as they are visited during the conversion to docutils AST. + +When an `eval` role or directive is encountered, the name is run by the kernel and the result is inserted into the document. + +You can see here that the variable `a` that is inserted by the `eval` role will change based on the order of execution, relative to the code cells. + +```{code-cell} ipython3 +a=1 +``` + +First call to `` {eval}`a` `` gives us: {eval}`a` + +```{code-cell} ipython3 +a=2 +``` + +Second call to `` {eval}`a` `` gives us: {eval}`a` + +```{code-cell} ipython3 +from IPython.display import Image +image = Image("images/fun-fish.png") +``` + +You can evaluate any type of variable: + +````md +```{eval} image +``` +```` + +```{eval} image +``` + +```{code-cell} ipython3 +from IPython.display import Markdown +markdown = Markdown(""" +This can have **nested syntax**. +""") +``` + +````md +```{eval} markdown +``` +```` + +```{eval} markdown +``` + +This will work for any Jupyter kernel, independent of language! diff --git a/myst_nb/configuration.py b/myst_nb/configuration.py index 30796d19..8d0ceae0 100644 --- a/myst_nb/configuration.py +++ b/myst_nb/configuration.py @@ -200,7 +200,7 @@ class NbParserConfig: # notebook execution options - execution_mode: Literal["off", "force", "auto", "cache"] = attr.ib( + execution_mode: Literal["off", "force", "auto", "cache", "inline"] = attr.ib( default="auto", validator=in_( [ @@ -208,6 +208,7 @@ class NbParserConfig: "auto", "force", "cache", + "inline", ] ), metadata={ diff --git a/myst_nb/docutils_.py b/myst_nb/docutils_.py index 2e171f12..b0cd64ad 100644 --- a/myst_nb/docutils_.py +++ b/myst_nb/docutils_.py @@ -22,9 +22,12 @@ from myst_nb import static from myst_nb.configuration import NbParserConfig -from myst_nb.execute import execute_notebook +from myst_nb.execute import NbClientRunner, PreExecutedNbRunner, execute_notebook from myst_nb.loggers import DEFAULT_LOG_TYPE, DocutilsDocLogger +from myst_nb.md_parse import nb_node_to_dict, notebook_to_tokens from myst_nb.nb_glue.elements import ( + EvalDirective, + EvalRole, PasteAnyDirective, PasteFigureDirective, PasteMarkdownDirective, @@ -33,7 +36,6 @@ PasteRoleAny, PasteTextRole, ) -from myst_nb.parse import nb_node_to_dict, notebook_to_tokens from myst_nb.preprocess import preprocess_notebook from myst_nb.read import ( NbReader, @@ -79,12 +81,14 @@ def parse(self, inputstring: str, document: nodes.document) -> None: ("glue:figure", PasteFigureDirective), ("glue:math", PasteMathDirective), ("glue:md", PasteMarkdownDirective), + ("eval", EvalDirective), ) new_roles = ( ("glue:", PasteRoleAny()), ("glue:any", PasteRoleAny()), ("glue:text", PasteTextRole()), ("glue:md", PasteMarkdownRole()), + ("eval", EvalRole()), ) for name, directive in new_directives: _directives[name] = directive @@ -167,7 +171,6 @@ def _parse(self, inputstring: str, document: nodes.document) -> None: # Setup the markdown parser mdit_parser = create_md_parser(nb_reader.md_config, DocutilsNbRenderer) mdit_parser.options["document"] = document - mdit_parser.options["notebook"] = notebook mdit_parser.options["nb_config"] = nb_config mdit_env: Dict[str, Any] = {} @@ -190,9 +193,17 @@ def _parse(self, inputstring: str, document: nodes.document) -> None: mdit_parser.renderer.md_options["nb_resources"] = resources # parse to tokens - mdit_tokens = notebook_to_tokens(notebook, mdit_parser, mdit_env, logger) + mdit_tokens = notebook_to_tokens(notebook, mdit_parser, mdit_env) # convert to docutils AST, which is added to the document - mdit_parser.renderer.render(mdit_tokens, mdit_parser.options, mdit_env) + runner_cls = ( + NbClientRunner + if nb_config.execution_mode == "inline" + else PreExecutedNbRunner + ) + with runner_cls(notebook, os.path.dirname(document_source)) as runner: + mdit_parser.options["_nb_runner"] = runner + mdit_parser.renderer.render(mdit_tokens, mdit_parser.options, mdit_env) + notebook = runner.get_final_notebook() if nb_config.output_folder: # write final (updated) notebook to output folder (utf8 is standard encoding) @@ -239,6 +250,40 @@ def nb_config(self) -> NbParserConfig: """Get the notebook element renderer.""" return self.md_options["nb_config"] + def get_nb_source_code_lexer(self) -> Optional[str]: + """Get the lexer name for code cell source.""" + runner = self.md_options["_nb_runner"] + lexer = runner.get_source_code_lexer() + if lexer is None: + # TODO allow user to set default lexer? + self.create_warning( + "No source code lexer found for notebook", + wtype=DEFAULT_LOG_TYPE, + subtype="lexer", + append_to=self.current_node, + ) + return lexer + + def _create_code_outputs( + self, cell_index + ) -> Tuple[Optional[int], List[NotebookNode]]: + """Create the outputs for a code cell. + + IMPORTANT: this should only be called once per code cell, + since it may execute the code. + + :param source: The source code of the cell + :param cell_index: The index of the cell + :param metadata: The metadata of the cell + :returns: (execution count, list of outputs) + """ + runner = self.md_options["_nb_runner"] + return runner.execute_next_cell(cell_index) + + def get_nb_variable(self, name): + runner = self.md_options["_nb_runner"] + return runner.get_variable(name) + @property def nb_renderer(self) -> NbElementRenderer: """Get the notebook element renderer.""" @@ -321,16 +366,20 @@ def render_nb_cell_raw(self, token: SyntaxTreeNode) -> None: def render_nb_cell_code(self, token: SyntaxTreeNode) -> None: """Render a notebook code cell.""" cell_index = token.meta["index"] - tags = token.meta["metadata"].get("tags", []) + metadata = token.meta["metadata"] + tags = metadata.get("tags", []) + + # this must be called per code cell + exec_count, outputs = self._create_code_outputs(cell_index) # TODO do we need this -/_ duplication of tag names, or can we deprecate one? remove_input = ( - self.get_cell_render_config(token.meta["metadata"], "remove_code_source") + self.get_cell_render_config(metadata, "remove_code_source") or ("remove_input" in tags) or ("remove-input" in tags) ) remove_output = ( - self.get_cell_render_config(token.meta["metadata"], "remove_code_outputs") + self.get_cell_render_config(metadata, "remove_code_outputs") or ("remove_output" in tags) or ("remove-output" in tags) ) @@ -347,8 +396,8 @@ def render_nb_cell_code(self, token: SyntaxTreeNode) -> None: nb_element="cell_code", cell_index=cell_index, # TODO some way to use this to allow repr of count in outputs like HTML? - exec_count=token.meta["execution_count"], - cell_metadata=token.meta["metadata"], + exec_count=exec_count, + cell_metadata=metadata, classes=classes, ) self.add_line_and_source_path(cell_container, token) @@ -361,26 +410,22 @@ def render_nb_cell_code(self, token: SyntaxTreeNode) -> None: ) self.add_line_and_source_path(cell_input, token) with self.current_node_context(cell_input, append=True): - self.render_nb_cell_code_source(token) + self._render_nb_cell_code_source(token) # render the execution output, if any - has_outputs = self.md_options["notebook"]["cells"][cell_index].get( - "outputs", [] - ) - if (not remove_output) and has_outputs: + if (not remove_output) and outputs: cell_output = nodes.container( nb_element="cell_code_output", classes=["cell_output"] ) self.add_line_and_source_path(cell_output, token) with self.current_node_context(cell_output, append=True): - self.render_nb_cell_code_outputs(token) + self._render_nb_cell_code_outputs(token, outputs) - def render_nb_cell_code_source(self, token: SyntaxTreeNode) -> None: + def _render_nb_cell_code_source(self, token: SyntaxTreeNode) -> None: """Render a notebook code cell's source.""" - lexer = token.meta.get("lexer", None) node = self.create_highlighted_code_block( token.content, - lexer, + self.get_nb_source_code_lexer(), number_lines=self.get_cell_render_config( token.meta["metadata"], "number_source_lines" ), @@ -390,14 +435,13 @@ def render_nb_cell_code_source(self, token: SyntaxTreeNode) -> None: self.add_line_and_source_path(node, token) self.current_node.append(node) - def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: + def _render_nb_cell_code_outputs( + self, token: SyntaxTreeNode, outputs: List[NotebookNode] + ) -> None: """Render a notebook code cell's outputs.""" cell_index = token.meta["index"] metadata = token.meta["metadata"] line = token_line(token) - outputs: List[NotebookNode] = self.md_options["notebook"]["cells"][ - cell_index - ].get("outputs", []) # render the outputs mime_priority = self.get_cell_render_config(metadata, "mime_priority") for output_index, output in enumerate(outputs): diff --git a/myst_nb/execute.py b/myst_nb/execute.py index b7dd89b4..44c30380 100644 --- a/myst_nb/execute.py +++ b/myst_nb/execute.py @@ -1,16 +1,27 @@ """Module for executing notebooks.""" +import asyncio from contextlib import nullcontext, suppress from datetime import datetime +from functools import lru_cache from logging import Logger import os from pathlib import Path, PurePosixPath +import re from tempfile import TemporaryDirectory -from typing import Optional, Tuple +from typing import List, Optional, Tuple from jupyter_cache import get_cache from jupyter_cache.base import NbBundleIn from jupyter_cache.cache.db import NbStageRecord from jupyter_cache.executors.utils import single_nb_execution +from nbclient.client import ( + CellControlSignal, + DeadKernelError, + NotebookClient, + ensure_async, + run_sync, +) +import nbformat from nbformat import NotebookNode from typing_extensions import TypedDict @@ -207,3 +218,221 @@ def execute_notebook( } return notebook, exec_metadata + + +class NotebookRunnerBase: + """A client for interacting with a notebook server. + + The runner should be initialised with a notebook as a context manager, + and all code cells executed, then the final notebook returned:: + + with NotebookRunner(nb) as runner: + for i, cell in enumerate(runner.cells): + if cell.cell_type == "code": + exec_count, outputs = runner.execute_next_cell(i) + final_nb = runner.get_final_notebook() + """ + + def __init__(self, notebook: NotebookNode, cwd: Optional[str]): + """Initialise the client.""" + self._notebook = notebook + self._cwd = cwd + self._in_context = False + + @property + def notebook(self) -> NotebookNode: + """Return the input notebook.""" + if not self._in_context: + raise ValueError("not in context") + return self._notebook + + def __enter__(self): + """Open the client.""" + self._current_index = 0 + self._in_context = True + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + """Close the client.""" + self._in_context = False + + def get_source_code_lexer(self) -> Optional[str]: + """Return the lexer name for code cell sources, if available""" + raise NotImplementedError + + def get_next_code_cell( + self, expected_index: Optional[int] = None + ) -> Tuple[int, NotebookNode]: + """Return the next code cell (index, cell), if available. + + We check against an expected index, + to ensure that we are receiving the outputs for the cell we expected. + """ + for i, cell in enumerate(self.notebook.cells[self._current_index :]): + if cell.cell_type == "code": + assert (expected_index is None) or ( + self._current_index + i == expected_index + ), f"{i} != {expected_index}" + self._current_index += i + 1 + return cell + raise StopIteration("No more code cells") + + def get_final_notebook(self) -> NotebookNode: + """Return the final notebook.""" + try: + self.get_next_code_cell() + except StopIteration: + pass + else: + raise ValueError("Un-executed code cell(s)") + return self.notebook + + def execute_next_cell( + self, cell_index: int + ) -> Tuple[Optional[int], List[NotebookNode]]: + """Execute the next code cell. + + :param cell_index: the index of the cell we expect to execute + :returns: (execution count, list of outputs) + """ + raise NotImplementedError + + def get_variable(self, name: str): + """Return the value of a variable, if available.""" + raise NotImplementedError + + +class PreExecutedNbRunner(NotebookRunnerBase): + """Works on pre-executed notebooks.""" + + @lru_cache(maxsize=1) + def get_source_code_lexer(self) -> Optional[str]: + metadata = self.notebook["metadata"] + # attempt to get language lexer name + langinfo = metadata.get("language_info") or {} + lexer = langinfo.get("pygments_lexer") or langinfo.get("name", None) + if lexer is None: + lexer = (metadata.get("kernelspec") or {}).get("language", None) + return lexer + + def execute_next_cell( + self, cell_index: int + ) -> Tuple[Optional[int], List[NotebookNode]]: + next_cell = self.get_next_code_cell(cell_index) + return next_cell.get("execution_count", None), next_cell.get("outputs", []) + + +class NbClientRunner(NotebookRunnerBase): + def __init__(self, notebook: NotebookNode, cwd: Optional[str]): + super().__init__(notebook, cwd) + resources = {"metadata": {"path": cwd}} if cwd else {} + self._client = ModifiedNotebookClient( + notebook, record_timing=False, resources=resources + ) + self._lexer = None + + def __enter__(self): + super().__enter__() + self._client.reset_execution_trackers() + if self._client.km is None: + self._client.km = self._client.create_kernel_manager() + + if not self._client.km.has_kernel: + self._client.start_new_kernel() + self._client.start_new_kernel_client() + msg_id = self._client.kc.kernel_info() + info_msg = self._client.wait_for_reply(msg_id) + if info_msg is not None: + if "language_info" in info_msg["content"]: + language_info = info_msg["content"]["language_info"] + self.notebook.metadata["language_info"] = language_info + lexer = language_info.get("pygments_lexer") or language_info.get("name", None) + if lexer is None: + lexer = (self.notebook.metadata.get("kernelspec") or {}).get( + "language", None + ) + self._lexer = lexer + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + try: + # TODO because we set the widget state at the end, + # it won't be output by the renderer at present + self._client.set_widgets_metadata() + except Exception: + pass + if self._client.owns_km: + self._client._cleanup_kernel() + return super().__exit__(exc_type, exc_val, exc_tb) + + def get_source_code_lexer(self) -> Optional[str]: + return self._lexer + + def execute_next_cell( + self, cell_index: int + ) -> Tuple[Optional[int], List[NotebookNode]]: + next_cell = self.get_next_code_cell(cell_index) + self._client.execute_cell( + next_cell, cell_index, execution_count=self._client.code_cells_executed + 1 + ) + return next_cell.get("execution_count", None), next_cell.get("outputs", []) + + def get_variable(self, name: str): + # this MUST NOT change the state of the jupyter kernel + # so we allow execution of variable names + if not re.match(r"^[a-zA-Z_][a-zA-Z0-9_]*$", name): + raise ValueError(f"Invalid variable name: {name}") + return self._client.user_expression(name) + + +class ModifiedNotebookClient(NotebookClient): + async def async_user_expression(self, name: str) -> NotebookNode: + """ """ + assert self.kc is not None + self.log.debug(f"Executing user_expression: {name}") + parent_msg_id = await ensure_async( + self.kc.execute( + str(name), + store_history=False, + stop_on_error=False, + # user_expressions={"name": name}, + ) + ) + # We launched a code cell to execute + exec_timeout = 10 + + cell = nbformat.v4.new_code_cell(source=f"{name}") + cell_index = -1 + self.clear_before_next_output = False + + task_poll_kernel_alive = asyncio.ensure_future(self._async_poll_kernel_alive()) + task_poll_output_msg = asyncio.ensure_future( + self._async_poll_output_msg(parent_msg_id, cell, cell_index) + ) + self.task_poll_for_reply = asyncio.ensure_future( + self._async_poll_for_reply( + parent_msg_id, + cell, + exec_timeout, + task_poll_output_msg, + task_poll_kernel_alive, + ) + ) + try: + await self.task_poll_for_reply + except asyncio.CancelledError: + # can only be cancelled by task_poll_kernel_alive when the kernel is dead + task_poll_output_msg.cancel() + raise DeadKernelError("Kernel died") + except Exception as e: + # Best effort to cancel request if it hasn't been resolved + try: + # Check if the task_poll_output is doing the raising for us + if not isinstance(e, CellControlSignal): + task_poll_output_msg.cancel() + finally: + raise + + return cell.outputs[0] + + user_expression = run_sync(async_user_expression) diff --git a/myst_nb/parse.py b/myst_nb/md_parse.py similarity index 88% rename from myst_nb/parse.py rename to myst_nb/md_parse.py index cf2be298..cc1e59d5 100644 --- a/myst_nb/parse.py +++ b/myst_nb/md_parse.py @@ -1,5 +1,4 @@ """Module for parsing notebooks to Markdown-it tokens.""" -import logging from typing import Any, Dict, List from markdown_it.main import MarkdownIt @@ -24,8 +23,12 @@ def notebook_to_tokens( notebook: NotebookNode, mdit_parser: MarkdownIt, mdit_env: Dict[str, Any], - logger: logging.Logger, ) -> List[Token]: + """Convert a notebook to a list of markdown-it tokens. + + This may be done before the notebook is executed, so we do not record outputs, + or language info. + """ # disable front-matter, since this is taken from the notebook mdit_parser.disable("front_matter", ignoreInvalid=True) # this stores global state, such as reference definitions @@ -34,16 +37,6 @@ def notebook_to_tokens( # (required to collect all reference definitions, before assessing references). metadata = nb_node_to_dict(notebook.metadata) - # attempt to get language lexer name - langinfo = metadata.get("language_info") or {} - lexer = langinfo.get("pygments_lexer") or langinfo.get("name", None) - if lexer is None: - lexer = (metadata.get("kernelspec") or {}).get("language", None) - if lexer is None: - logger.warning( - "No source code lexer found in notebook metadata", subtype="lexer" - ) - block_tokens = [ Token("nb_metadata", "", 0, meta=metadata, map=[0, 0]), ] @@ -54,9 +47,10 @@ def notebook_to_tokens( continue # skip cells tagged for removal - tags = nb_cell.metadata.get("tags", []) - if ("remove_cell" in tags) or ("remove-cell" in tags): - continue + # TODO this breaks inline execution + # tags = nb_cell.metadata.get("tags", []) + # if ("remove_cell" in tags) or ("remove-cell" in tags): + # continue # generate tokens tokens: List[Token] @@ -118,8 +112,6 @@ def notebook_to_tokens( content=nb_cell["source"], meta={ "index": cell_index, - "execution_count": nb_cell.get("execution_count", None), - "lexer": lexer, "metadata": nb_node_to_dict(nb_cell["metadata"]), }, map=[0, 0], diff --git a/myst_nb/nb_glue/elements.py b/myst_nb/nb_glue/elements.py index fd26f6d7..17484f7c 100644 --- a/myst_nb/nb_glue/elements.py +++ b/myst_nb/nb_glue/elements.py @@ -58,6 +58,7 @@ class RetrievedData: def retrieve_glue_data(document: nodes.document, key: str) -> RetrievedData: """Retrieve the glue data from a specific document.""" if "nb_renderer" not in document: + # TODO say this is is because it is not a myst-document return RetrievedData(False, warning="No 'nb_renderer' found on the document.") nb_renderer: NbElementRenderer = document["nb_renderer"] resources = nb_renderer.get_resources() @@ -202,6 +203,46 @@ def run(self) -> Tuple[List[nodes.Node], List[nodes.system_message]]: raise NotImplementedError +class EvalRole(_PasteRoleBase): + """Inline evaluation from the jupyter kernel.""" + + def run(self) -> Tuple[List[nodes.Node], List[nodes.system_message]]: + document = self.inliner.document + if "nb_renderer" not in document: + # TODO say this is because not a myst-document + return [], [ + warning( + "No 'nb_renderer' found on the document.", document, self.lineno + ) + ] + nb_renderer: NbElementRenderer = document["nb_renderer"] + try: + output = nb_renderer.renderer.get_nb_variable(self.text) + # TODO handle if output is stdout/stderr + data = output["data"] + metadata = output.get("metadata", {}) + except Exception as err: + return [], [ + warning(f"variable retrieval failed: {err}", document, self.lineno) + ] + if is_sphinx(document): + _nodes = render_output_sphinx( + nb_renderer, + data, + metadata, + document["source"], + self.lineno, + inline=True, + ) + else: + _nodes = render_output_docutils( + nb_renderer, data, metadata, document, self.lineno, inline=True + ) + if _nodes and isinstance(_nodes[0], nodes.system_message): + return [], _nodes + return _nodes, [] + + class PasteRoleAny(_PasteRoleBase): """A role for pasting inline code outputs from notebooks, using render priority to decide the output mime type. @@ -335,6 +376,42 @@ def run(self) -> List[nodes.Node]: return paste_nodes +class EvalDirective(_PasteDirectiveBase): + """Block evaluation from the jupyter kernel.""" + + def run(self) -> List[nodes.Node]: + document = self.state.document + if "nb_renderer" not in document: + # TODO say this is because not a myst-document + return [ + warning( + "No 'nb_renderer' found on the document.", document, self.lineno + ) + ] + nb_renderer: NbElementRenderer = document["nb_renderer"] + try: + output = nb_renderer.renderer.get_nb_variable(self.arguments[0]) + # TODO handle if output is stdout/stderr + data = output["data"] + metadata = output.get("metadata", {}) + except Exception as err: + return [warning(f"variable retrieval failed: {err}", document, self.lineno)] + if is_sphinx(document): + _nodes = render_output_sphinx( + nb_renderer, + data, + metadata, + document["source"], + self.lineno, + inline=False, + ) + else: + _nodes = render_output_docutils( + nb_renderer, data, metadata, document, self.lineno, inline=False + ) + return _nodes + + class PasteMarkdownDirective(_PasteDirectiveBase): """A directive for pasting markdown outputs from notebooks as MyST Markdown.""" diff --git a/myst_nb/sphinx_.py b/myst_nb/sphinx_.py index 551c5d95..cfb816ed 100644 --- a/myst_nb/sphinx_.py +++ b/myst_nb/sphinx_.py @@ -27,11 +27,17 @@ from myst_nb import __version__, static from myst_nb.configuration import NbParserConfig -from myst_nb.execute import ExecutionResult, execute_notebook +from myst_nb.execute import ( + ExecutionResult, + NbClientRunner, + PreExecutedNbRunner, + execute_notebook, +) from myst_nb.loggers import DEFAULT_LOG_TYPE, SphinxDocLogger +from myst_nb.md_parse import nb_node_to_dict, notebook_to_tokens from myst_nb.nb_glue import glue_dict_to_nb from myst_nb.nb_glue.domain import NbGlueDomain -from myst_nb.parse import nb_node_to_dict, notebook_to_tokens +from myst_nb.nb_glue.elements import EvalDirective, EvalRole from myst_nb.preprocess import preprocess_notebook from myst_nb.read import UnexpectedCellDirective, create_nb_reader from myst_nb.render import ( @@ -90,6 +96,10 @@ def sphinx_setup(app: Sphinx): app.add_directive("code-cell", UnexpectedCellDirective, override=True) app.add_directive("raw-cell", UnexpectedCellDirective, override=True) + # add eval role/directive + app.add_role("eval", EvalRole()) + app.add_directive("eval", EvalDirective) + # add directive for downloading an executed notebook app.add_role("nb-download", NbDownloadRole()) @@ -284,7 +294,6 @@ def parse(self, inputstring: str, document: nodes.document) -> None: # Setup the parser mdit_parser = create_md_parser(nb_reader.md_config, SphinxNbRenderer) mdit_parser.options["document"] = document - mdit_parser.options["notebook"] = notebook mdit_parser.options["nb_config"] = nb_config mdit_env: Dict[str, Any] = {} @@ -307,9 +316,17 @@ def parse(self, inputstring: str, document: nodes.document) -> None: mdit_parser.renderer.md_options["nb_resources"] = resources # parse to tokens - mdit_tokens = notebook_to_tokens(notebook, mdit_parser, mdit_env, logger) + mdit_tokens = notebook_to_tokens(notebook, mdit_parser, mdit_env) # convert to docutils AST, which is added to the document - mdit_parser.renderer.render(mdit_tokens, mdit_parser.options, mdit_env) + runner_cls = ( + NbClientRunner + if nb_config.execution_mode == "inline" + else PreExecutedNbRunner + ) + with runner_cls(notebook, cwd=os.path.dirname(document_path)) as runner: + mdit_parser.options["_nb_runner"] = runner + mdit_parser.renderer.render(mdit_tokens, mdit_parser.options, mdit_env) + notebook = runner.get_final_notebook() # write final (updated) notebook to output folder (utf8 is standard encoding) path = self.env.docname.split("/") @@ -349,6 +366,40 @@ def nb_config(self) -> NbParserConfig: """Get the notebook element renderer.""" return self.md_options["nb_config"] + def get_nb_source_code_lexer(self) -> Optional[str]: + """Get the lexer name for code cell source.""" + runner = self.md_options["_nb_runner"] + lexer = runner.get_source_code_lexer() + if lexer is None: + # TODO allow user to set default lexer? + self.create_warning( + "No source code lexer found for notebook", + wtype=DEFAULT_LOG_TYPE, + subtype="lexer", + append_to=self.current_node, + ) + return lexer + + def _create_code_outputs( + self, cell_index + ) -> Tuple[Optional[int], List[NotebookNode]]: + """Create the outputs for a code cell. + + IMPORTANT: this should only be called once per code cell, + since it may execute the code. + + :param source: The source code of the cell + :param cell_index: The index of the cell + :param metadata: The metadata of the cell + :returns: (execution count, list of outputs) + """ + runner = self.md_options["_nb_runner"] + return runner.execute_next_cell(cell_index) + + def get_nb_variable(self, name): + runner = self.md_options["_nb_runner"] + return runner.get_variable(name) + @property def nb_renderer(self) -> NbElementRenderer: """Get the notebook element renderer.""" @@ -427,16 +478,20 @@ def render_nb_cell_raw(self, token: SyntaxTreeNode) -> None: def render_nb_cell_code(self, token: SyntaxTreeNode) -> None: """Render a notebook code cell.""" cell_index = token.meta["index"] - tags = token.meta["metadata"].get("tags", []) + metadata = token.meta["metadata"] + tags = metadata.get("tags", []) + + # this must be called per code cell + exec_count, outputs = self._create_code_outputs(cell_index) # TODO do we need this -/_ duplication of tag names, or can we deprecate one? remove_input = ( - self.get_cell_render_config(token.meta["metadata"], "remove_code_source") + self.get_cell_render_config(metadata, "remove_code_source") or ("remove_input" in tags) or ("remove-input" in tags) ) remove_output = ( - self.get_cell_render_config(token.meta["metadata"], "remove_code_outputs") + self.get_cell_render_config(metadata, "remove_code_outputs") or ("remove_output" in tags) or ("remove-output" in tags) ) @@ -453,8 +508,8 @@ def render_nb_cell_code(self, token: SyntaxTreeNode) -> None: nb_element="cell_code", cell_index=cell_index, # TODO some way to use this to allow repr of count in outputs like HTML? - exec_count=token.meta["execution_count"], - cell_metadata=token.meta["metadata"], + exec_count=exec_count, + cell_metadata=metadata, classes=classes, ) self.add_line_and_source_path(cell_container, token) @@ -467,27 +522,22 @@ def render_nb_cell_code(self, token: SyntaxTreeNode) -> None: ) self.add_line_and_source_path(cell_input, token) with self.current_node_context(cell_input, append=True): - self.render_nb_cell_code_source(token) + self._render_nb_cell_code_source(token) # render the execution output, if any - has_outputs = self.md_options["notebook"]["cells"][cell_index].get( - "outputs", [] - ) - if (not remove_output) and has_outputs: + if (not remove_output) and outputs: cell_output = nodes.container( nb_element="cell_code_output", classes=["cell_output"] ) self.add_line_and_source_path(cell_output, token) with self.current_node_context(cell_output, append=True): - self.render_nb_cell_code_outputs(token) + self._render_nb_cell_code_outputs(token, outputs) - def render_nb_cell_code_source(self, token: SyntaxTreeNode) -> None: + def _render_nb_cell_code_source(self, token: SyntaxTreeNode) -> None: """Render a notebook code cell's source.""" - # cell_index = token.meta["index"] - lexer = token.meta.get("lexer", None) node = self.create_highlighted_code_block( token.content, - lexer, + self.get_nb_source_code_lexer(), number_lines=self.get_cell_render_config( token.meta["metadata"], "number_source_lines" ), @@ -497,14 +547,13 @@ def render_nb_cell_code_source(self, token: SyntaxTreeNode) -> None: self.add_line_and_source_path(node, token) self.current_node.append(node) - def render_nb_cell_code_outputs(self, token: SyntaxTreeNode) -> None: + def _render_nb_cell_code_outputs( + self, token: SyntaxTreeNode, outputs: List[NotebookNode] + ) -> None: """Render a notebook code cell's outputs.""" line = token_line(token, 0) cell_index = token.meta["index"] metadata = token.meta["metadata"] - outputs: List[NotebookNode] = self.md_options["notebook"]["cells"][ - cell_index - ].get("outputs", []) # render the outputs for output_index, output in enumerate(outputs): if output.output_type == "stream": diff --git a/tests/nb_fixtures/basic.txt b/tests/nb_fixtures/basic.txt index 2793a434..34b3a288 100644 --- a/tests/nb_fixtures/basic.txt +++ b/tests/nb_fixtures/basic.txt @@ -1,4 +1,4 @@ -Markdown Cell: +[Markdown_Cells] . cells: - cell_type: markdown @@ -6,12 +6,12 @@ cells: source: | # A Title . -<document ids="a-title" names="a\ title" source="<string>" title="A Title"> +<document ids="a-title" names="a\ title" nb_language_info="{'pygments_lexer': 'ipython'}" source="<string>" title="A Title"> <title> A Title . -Code Cell (no output): +[Code_Cell_no_output] . cells: - cell_type: code @@ -22,15 +22,28 @@ cells: print(a) outputs: [] . -<document source="<string>"> +<document nb_language_info="{'pygments_lexer': 'ipython'}" source="<string>"> <container cell_index="0" cell_metadata="{}" classes="cell" exec_count="True" nb_element="cell_code"> <container classes="cell_input" nb_element="cell_code_source"> - <literal_block classes="code" xml:space="preserve"> - a=1 - print(a) + <literal_block classes="code ipython" xml:space="preserve"> + <inline classes="n"> + a + <inline classes="o"> + = + <inline classes="mi"> + 1 + + <inline classes="nb"> + print + <inline classes="p"> + ( + <inline classes="n"> + a + <inline classes="p"> + ) . -Code Cell (with lexer): +[Code_Cell_with_lexer] . metadata: language_info: @@ -55,7 +68,7 @@ cells: 1 . -Code Cell (simple output): +[Code_Cell_simple_output] . cells: - cell_type: code @@ -69,18 +82,31 @@ cells: output_type: stream text: "1" . -<document source="<string>"> +<document nb_language_info="{'pygments_lexer': 'ipython'}" source="<string>"> <container cell_index="0" cell_metadata="{}" classes="cell" exec_count="1" nb_element="cell_code"> <container classes="cell_input" nb_element="cell_code_source"> - <literal_block classes="code" xml:space="preserve"> - a=1 - print(a) + <literal_block classes="code ipython" xml:space="preserve"> + <inline classes="n"> + a + <inline classes="o"> + = + <inline classes="mi"> + 1 + + <inline classes="nb"> + print + <inline classes="p"> + ( + <inline classes="n"> + a + <inline classes="p"> + ) <container classes="cell_output" nb_element="cell_code_output"> <literal_block classes="code myst-ansi output stream" xml:space="preserve"> 1 . -Raw Cell +[raw_cell] . cells: - cell_type: raw @@ -90,14 +116,14 @@ cells: <h1>A Title</h1> </div> . -<document source="<string>"> +<document nb_language_info="{'pygments_lexer': 'ipython'}" source="<string>"> <raw classes="output text_html" format="html" xml:space="preserve"> <div> <h1>A Title</h1> </div> . -Mixed Cells: +[mixed_cells] . cells: - cell_type: markdown @@ -116,19 +142,32 @@ cells: source: | b . -<document ids="a-title" names="a\ title" source="<string>" title="A Title"> +<document ids="a-title" names="a\ title" nb_language_info="{'pygments_lexer': 'ipython'}" source="<string>" title="A Title"> <title> A Title <container cell_index="1" cell_metadata="{}" classes="cell" exec_count="True" nb_element="cell_code"> <container classes="cell_input" nb_element="cell_code_source"> - <literal_block classes="code" xml:space="preserve"> - a=1 - print(a) + <literal_block classes="code ipython" xml:space="preserve"> + <inline classes="n"> + a + <inline classes="o"> + = + <inline classes="mi"> + 1 + + <inline classes="nb"> + print + <inline classes="p"> + ( + <inline classes="n"> + a + <inline classes="p"> + ) <paragraph> b . -Reference definitions defined in different cells: +[ref_defs] Reference definitions defined in different cells . cells: - cell_type: markdown @@ -144,7 +183,7 @@ cells: source: | [b]: after . -<document source="<string>"> +<document nb_language_info="{'pygments_lexer': 'ipython'}" source="<string>"> <paragraph> <reference refuri="before"> a @@ -153,7 +192,7 @@ cells: b . -Footnote definitions defined in different cells: +[foot_defs] Footnote definitions defined in different cells . cells: - cell_type: markdown @@ -169,7 +208,7 @@ cells: source: | [^b]: after . -<document source="<string>"> +<document nb_language_info="{'pygments_lexer': 'ipython'}" source="<string>"> <paragraph> <footnote_reference auto="1" ids="id1" refid="a"> 1 diff --git a/tests/test_docutils.py b/tests/test_docutils.py index 4941d069..05d8ebe2 100644 --- a/tests/test_docutils.py +++ b/tests/test_docutils.py @@ -17,7 +17,7 @@ def test_basic(file_params): """Test basic parsing.""" dct = yaml.safe_load(file_params.content) dct.update({"nbformat": 4, "nbformat_minor": 4}) - dct.setdefault("metadata", {}) + dct.setdefault("metadata", {"language_info": {"pygments_lexer": "ipython"}}) report_stream = StringIO() doctree = publish_doctree( json.dumps(dct), @@ -38,7 +38,8 @@ def test_basic(file_params): def test_reporting(file_params): """Test that warnings and errors are reported as expected.""" dct = yaml.safe_load(file_params.content) - dct.update({"metadata": {}, "nbformat": 4, "nbformat_minor": 4}) + dct.update({"nbformat": 4, "nbformat_minor": 4}) + dct.setdefault("metadata", {"language_info": {"pygments_lexer": "ipython"}}) report_stream = StringIO() publish_doctree( json.dumps(dct), From d1dcc613f4ab38c4cbe29c1345aa8c4db2c67007 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Thu, 13 Jan 2022 16:01:03 +0100 Subject: [PATCH 69/75] Update inline_execution.md --- docs/use/inline_execution.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/use/inline_execution.md b/docs/use/inline_execution.md index f5cbedfe..2c242e47 100644 --- a/docs/use/inline_execution.md +++ b/docs/use/inline_execution.md @@ -47,12 +47,16 @@ a=2 Second call to `` {eval}`a` `` gives us: {eval}`a` +```{note} +The evaluation works in any nested environment: {eval}`a` +``` + ```{code-cell} ipython3 from IPython.display import Image image = Image("images/fun-fish.png") ``` -You can evaluate any type of variable: +You can also evaluate any type of variable: ````md ```{eval} image From ae49323fd9df97496f72bd2938ec3cf7251488ad Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Thu, 13 Jan 2022 17:08:23 +0100 Subject: [PATCH 70/75] Handle errors --- docs/use/inline_execution.md | 4 +++ myst_nb/nb_glue/elements.py | 53 ++++++++++++++++++++++-------------- myst_nb/render.py | 7 ++++- 3 files changed, 42 insertions(+), 22 deletions(-) diff --git a/docs/use/inline_execution.md b/docs/use/inline_execution.md index 2c242e47..ac23f975 100644 --- a/docs/use/inline_execution.md +++ b/docs/use/inline_execution.md @@ -82,3 +82,7 @@ This can have **nested syntax**. ``` This will work for any Jupyter kernel, independent of language! + +Incorrect variables will currently like `` {eval}`b` ``, will currently log warnings: + +> `/docs/use/inline_execution.md:88: WARNING: NameError: name 'b' is not defined [mystnb.eval]` diff --git a/myst_nb/nb_glue/elements.py b/myst_nb/nb_glue/elements.py index 17484f7c..2a7f8fa7 100644 --- a/myst_nb/nb_glue/elements.py +++ b/myst_nb/nb_glue/elements.py @@ -20,13 +20,15 @@ def is_sphinx(document) -> bool: return hasattr(document.settings, "env") -def warning(message: str, document: nodes.document, line: int) -> nodes.system_message: +def warning( + message: str, document: nodes.document, line: int, subtype="glue" +) -> nodes.system_message: """Create a warning.""" if is_sphinx(document): logger = SphinxDocLogger(document) else: logger = DocutilsDocLogger(document) - logger.warning(message, subtype="glue") + logger.warning(message, subtype=subtype, line=line) return nodes.system_message( message, type="WARNING", @@ -208,35 +210,34 @@ class EvalRole(_PasteRoleBase): def run(self) -> Tuple[List[nodes.Node], List[nodes.system_message]]: document = self.inliner.document + source, line = self.get_source_info() if "nb_renderer" not in document: # TODO say this is because not a myst-document return [], [ - warning( - "No 'nb_renderer' found on the document.", document, self.lineno - ) + warning("No 'nb_renderer' found on the document.", document, line) ] nb_renderer: NbElementRenderer = document["nb_renderer"] try: output = nb_renderer.renderer.get_nb_variable(self.text) - # TODO handle if output is stdout/stderr - data = output["data"] - metadata = output.get("metadata", {}) except Exception as err: - return [], [ - warning(f"variable retrieval failed: {err}", document, self.lineno) - ] + return [], [warning(f"variable retrieval failed: {err}", document, line)] + if output.get("output_type") == "error": + msg = f"{output.get('ename', '')}: {output.get('evalue', '')}" + return [], [warning(msg, document, line, subtype="eval")] + data = output.get("data", {}) + metadata = output.get("metadata", {}) if is_sphinx(document): _nodes = render_output_sphinx( nb_renderer, data, metadata, - document["source"], - self.lineno, + source, + line, inline=True, ) else: _nodes = render_output_docutils( - nb_renderer, data, metadata, document, self.lineno, inline=True + nb_renderer, data, metadata, document, line, inline=True ) if _nodes and isinstance(_nodes[0], nodes.system_message): return [], _nodes @@ -381,6 +382,7 @@ class EvalDirective(_PasteDirectiveBase): def run(self) -> List[nodes.Node]: document = self.state.document + source, line = self.get_source_info() if "nb_renderer" not in document: # TODO say this is because not a myst-document return [ @@ -391,23 +393,32 @@ def run(self) -> List[nodes.Node]: nb_renderer: NbElementRenderer = document["nb_renderer"] try: output = nb_renderer.renderer.get_nb_variable(self.arguments[0]) - # TODO handle if output is stdout/stderr - data = output["data"] - metadata = output.get("metadata", {}) except Exception as err: - return [warning(f"variable retrieval failed: {err}", document, self.lineno)] + return [ + warning( + f"variable retrieval failed: {err}", + document, + self.lineno, + subtype="eval", + ) + ] + if output.get("output_type") == "error": + msg = f"{output.get('ename', '')}: {output.get('evalue', '')}" + return [warning(msg, document, line, subtype="eval")] + data = output.get("data", {}) + metadata = output.get("metadata", {}) if is_sphinx(document): _nodes = render_output_sphinx( nb_renderer, data, metadata, - document["source"], - self.lineno, + source, + line, inline=False, ) else: _nodes = render_output_docutils( - nb_renderer, data, metadata, document, self.lineno, inline=False + nb_renderer, data, metadata, document, line, inline=False ) return _nodes diff --git a/myst_nb/render.py b/myst_nb/render.py index e0e705ab..cbbcd803 100644 --- a/myst_nb/render.py +++ b/myst_nb/render.py @@ -281,6 +281,7 @@ def render_error( cell_metadata: Dict[str, Any], cell_index: int, source_line: int, + inline: bool = False, ) -> List[nodes.Element]: """Render a notebook error output. @@ -296,7 +297,11 @@ def render_error( cell_metadata, "error_lexer", "render_error_lexer" ) node = self.renderer.create_highlighted_code_block( - traceback, lexer, source=self.source, line=source_line + traceback, + lexer, + source=self.source, + line=source_line, + node_cls=nodes.literal if inline else nodes.literal_block, ) node["classes"] += ["output", "traceback"] return [node] From fe297ec45b2716b929c4215f48b71d164ae2c333 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Thu, 13 Jan 2022 17:22:54 +0100 Subject: [PATCH 71/75] Update inline_execution.md --- docs/use/inline_execution.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/use/inline_execution.md b/docs/use/inline_execution.md index ac23f975..846cbf1f 100644 --- a/docs/use/inline_execution.md +++ b/docs/use/inline_execution.md @@ -31,9 +31,9 @@ which turns on the experimental inline execution mode. Inline execution starts the Jupyter kernel, then executes code cells as they are visited during the conversion to docutils AST. -When an `eval` role or directive is encountered, the name is run by the kernel and the result is inserted into the document. +When an `eval` role or directive is encountered, the name is evaluated by the kernel and the result is inserted into the document. -You can see here that the variable `a` that is inserted by the `eval` role will change based on the order of execution, relative to the code cells. +You can see here that the variable `a`, which is inserted by the `eval` role, will change based on the order of execution (relative to the code cells). ```{code-cell} ipython3 a=1 From 2bfd3dba854df3a5584d1146543b6a2d2fd2477c Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Thu, 13 Jan 2022 17:23:46 +0100 Subject: [PATCH 72/75] Update inline_execution.md --- docs/use/inline_execution.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/use/inline_execution.md b/docs/use/inline_execution.md index 846cbf1f..13b4e585 100644 --- a/docs/use/inline_execution.md +++ b/docs/use/inline_execution.md @@ -17,10 +17,9 @@ mystnb: This is a Proof of Concept notebook for inline variables. -This notebook is executed using top-matter: +This notebook's execution mode is set by using the top-matter: ```md - --- mystnb: execution_mode: inline From cf90d39177aeebe2d2fd5d76634c8b70717909e8 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Thu, 13 Jan 2022 17:25:52 +0100 Subject: [PATCH 73/75] Update inline_execution.md --- docs/use/inline_execution.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/use/inline_execution.md b/docs/use/inline_execution.md index 13b4e585..73d918b0 100644 --- a/docs/use/inline_execution.md +++ b/docs/use/inline_execution.md @@ -82,6 +82,6 @@ This can have **nested syntax**. This will work for any Jupyter kernel, independent of language! -Incorrect variables will currently like `` {eval}`b` ``, will currently log warnings: +Incorrect variables, like `` {eval}`b` ``, will currently log warnings: > `/docs/use/inline_execution.md:88: WARNING: NameError: name 'b' is not defined [mystnb.eval]` From 7fa858bd40be622fb4c88770e2dd98b839adaac8 Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Thu, 13 Jan 2022 17:35:34 +0100 Subject: [PATCH 74/75] Update inline_execution.md --- docs/use/inline_execution.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/docs/use/inline_execution.md b/docs/use/inline_execution.md index 73d918b0..279fd451 100644 --- a/docs/use/inline_execution.md +++ b/docs/use/inline_execution.md @@ -16,6 +16,7 @@ mystnb: # Inline execution mode and Markdown variables This is a Proof of Concept notebook for inline variables. +These work for any Jupyter kernel, independent of programming language, and requires no cell metadata! This notebook's execution mode is set by using the top-matter: @@ -29,7 +30,6 @@ mystnb: which turns on the experimental inline execution mode. Inline execution starts the Jupyter kernel, then executes code cells as they are visited during the conversion to docutils AST. - When an `eval` role or directive is encountered, the name is evaluated by the kernel and the result is inserted into the document. You can see here that the variable `a`, which is inserted by the `eval` role, will change based on the order of execution (relative to the code cells). @@ -80,8 +80,6 @@ This can have **nested syntax**. ```{eval} markdown ``` -This will work for any Jupyter kernel, independent of language! - Incorrect variables, like `` {eval}`b` ``, will currently log warnings: > `/docs/use/inline_execution.md:88: WARNING: NameError: name 'b' is not defined [mystnb.eval]` From ccd3565d116411a9e5d89eb61ffda2124285e9bc Mon Sep 17 00:00:00 2001 From: Chris Sewell <chrisj_sewell@hotmail.com> Date: Thu, 13 Jan 2022 17:37:12 +0100 Subject: [PATCH 75/75] Update inline_execution.md --- docs/use/inline_execution.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/use/inline_execution.md b/docs/use/inline_execution.md index 279fd451..ecc639d8 100644 --- a/docs/use/inline_execution.md +++ b/docs/use/inline_execution.md @@ -29,7 +29,7 @@ mystnb: which turns on the experimental inline execution mode. -Inline execution starts the Jupyter kernel, then executes code cells as they are visited during the conversion to docutils AST. +Inline execution starts the Jupyter kernel then executes code cells, as they are visited, during the conversion to a docutils document. When an `eval` role or directive is encountered, the name is evaluated by the kernel and the result is inserted into the document. You can see here that the variable `a`, which is inserted by the `eval` role, will change based on the order of execution (relative to the code cells).