diff --git a/.gitignore b/.gitignore index 262f8169..2ec237ec 100644 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,7 @@ tests/pdf/_build/ tests/pdf/_static/ tests/pdf/theme/ tests/no_inline_exercises/_build +tests/execute/_build/ tests/ipynb/*checkpoints *.pyc .DS_Store diff --git a/.travis.yml b/.travis.yml index 919a48a0..116cb682 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,18 +3,12 @@ sudo: false #Use new Container Infrastructure language: python python: - - 2.7 - 3.7 env: - - SPHINX_VERSION=1.7.9 - SPHINX_VERSION=1.8.5 - SPHINX_VERSION=2.1.2 - -matrix: - exclude: - - python: 2.7 - env: SPHINX_VERSION=2.1.2 + - SPHINX_VERSION=2.4.1 directories: - "/tmp/texlive" @@ -33,15 +27,13 @@ before_install: - travis_wait 45 bash ./util/setup_latex.sh - export PATH="/tmp/texlive/bin/x86_64-linux:$PATH" +addons: + apt_packages: + - pandoc install: - - if [ $TRAVIS_PYTHON_VERSION == 2.7 ]; then - pip install sphinx==$SPHINX_VERSION nbconvert nbformat dask distributed nbdime; - pip install sphinxcontrib-bibtex==0.4.2; - else - pip install sphinx==$SPHINX_VERSION nbconvert nbformat dask==2.5.2 distributed==2.5.2 nbdime; - pip install sphinxcontrib-bibtex; - fi + - pip install sphinx==$SPHINX_VERSION nbconvert nbformat dask==2.5.2 distributed==2.5.2 nbdime; + - pip install sphinxcontrib-bibtex; - python setup.py install script: diff --git a/docs/builders.rst b/docs/builders.rst index 11e03fd6..cb8ed36b 100644 --- a/docs/builders.rst +++ b/docs/builders.rst @@ -3,36 +3,38 @@ Builders ========= -This extension has the following Builders +.. contents:: Options + :depth: 1 + :local: -jupyter +execute ------- -This builder currently handles `jupyter`, `html`, and `coverage` output - -.. code-block:: bash - - @$(SPHINXBUILD) -M jupyter "$(SOURCEDIR)" "$(BUILDCOVERAGE)" $(FILES) $(SPHINXOPTS) $(O) +This builder extracts code-blocks from the `RST` files and manages the +execution of each block. It saves the results in a `json` type codetree +object for use by other builders that construct notebooks. +jupyter +------- -.. warning:: +This builder build IPYNB notebooks. - If your project needs to build `jupyter` and `html` then configuration for `html` - and `coverage` is currently handled through Makefile overrides. - The project is working on separate builders for html and coverage +if `jupyter_execute_notebooks=True` is set in the `conf.py` file then +the `execute` builder is automatically called to run all code. -Example Configuration for `HTML` production -.. code-block:: bash +jupyterhtml +----------- - @$(SPHINXBUILD) -M jupyter "$(SOURCEDIR)" "$(BUILDWEBSITE)" $(FILES) $(SPHINXOPTS) $(O) -D jupyter_make_site=1 -D jupyter_generate_html=1 -D jupyter_download_nb=1 -D jupyter_execute_notebooks=1 -D jupyter_target_html=1 -D jupyter_download_nb_image_urlpath="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/py/_static/" -D jupyter_images_markdown=0 -D jupyter_html_template="python-html.tpl" -D jupyter_download_nb_urlpath="https://lectures.quantecon.org/" -D jupyter_coverage_dir=$(BUILDCOVERAGE) +This builder manages the construction of project websites +if `jupyter_execute_notebooks=True` is set in the `conf.py` file then +the `execute` builder is automatically called to run all code. jupyterpdf ---------- -This builder handles production of `pdf` - -.. code-block:: bash +This builder handles construction of `pdf` versions of the RST documents. - @$(SPHINXBUILD) -M jupyterpdf "$(SOURCEDIR)" "$(BUILDCOVERAGE)" $(FILES) $(SPHINXOPTS) $(O) \ No newline at end of file +if `jupyter_execute_notebooks=True` is set in the `conf.py` file then +the `execute` builder is automatically called to run all code. diff --git a/docs/config-extension-exercise.rst b/docs/config-directive-exercise.rst similarity index 100% rename from docs/config-extension-exercise.rst rename to docs/config-directive-exercise.rst diff --git a/docs/config-execution.rst b/docs/config-execution.rst new file mode 100644 index 00000000..5e74ceaa --- /dev/null +++ b/docs/config-execution.rst @@ -0,0 +1,182 @@ +.. _config_execution: + +Execution Configuration +======================= + +.. contents:: Options + :depth: 1 + :local: + +Code execution is handled by a specialised `execute` builder. + +jupyter_file_dependencies +-------------------------- + +Specify support (dependencies) for notebook collection at the `file` or +the `directory` level. + +``conf.py`` usage: + +.. code-block:: python + + jupyter_dependencies = { + : ['file1', 'file2'], + {}/ : ['file1'] + } + +.. note:: + + to specify a support file at the root level of the source directory + the key should be `""` + +jupyter_notebook_dependencies +----------------------------- + +Dependency of notebooks on other notebooks for execution can +be added to the configuration file above in the form of a dictionary. +The key/value pairs will contain the names of the notebook files. + +``conf.py`` usage: + +.. code-block:: python + + # add your dependency lists here + jupyter_notebook_dependencies = { + 'python_advanced_features' : ['python_essentials','python_oop'], + 'discrete_dp' : ['dp_essentials'], + } + +jupyter_number_workers +------------------------- + +Specify the number cores to use with dask + +.. list-table:: + :header-rows: 1 + + * - Values + * - Integer (**default** = 1) + +``conf.py`` usage: + + jupyter_number_workers = 4 + + +jupyter_execution_threads_per_worker +------------------------------------ + +Specify the number of threads per worker for dask + +.. list-table:: + :header-rows: 1 + + * - Values + * - Integer (**default** = 1) + +``conf.py`` usage: + + jupyter_threads_per_worker = 1 + +jupyter_coverage_report_template +----------------------------------- + +Provide path to template coverage report file + +.. todo:: + + Document format for template + +``conf.py`` usage: + +.. code-block:: python + + jupyter_template_coverage_file_path = "theme/templates/.json" + +.. warning:: + + ``coverage`` will currently produce a ``json`` report + that can be used to support an execution status page. But adding + badges and a status page needs to be made into an option and + specified via the default theme. + See `#237 `__ + + +jupyter_options +--------------- + +An dict-type object that is used by dask to control execution + + +.. TODO:: + + @aakash is this used? + +jupyter_drop_tests +------------------ + +**Note:** Future Feature? + +Drop ``code-blocks` that include ``:class: test`` + +.. list-table:: + :header-rows: 1 + + * - Values + * - False (**default**) + * - True + +.. TODO:: + + This option should be added to `execute` builder so tests can + be switched on/off for execution testing + +jupyter_ignore_no_execute: +-------------------------- + +**Note:** Deprecate? + +.. list-table:: + :header-rows: 1 + + * - Values + * - False (**default**) + * - True + +When constructing notebooks this option can be enabled to ignore `:class: no-execute` +for `code-blocks`. This is useful for `html` writer for pages that are meant to fail +but shouldn't be included in `coverage` tests. + +``conf.py`` usage: + +.. code-block:: python + + jupyter_ignore_no_execute = True + +.. todo:: + + @aakash this is no longer required as all code-blocks are run correct? + +jupyter_ignore_skip_test +------------------------ + +**Note:** Deprecate? + +When constructing notebooks this option can be enabled to ignore `:class: skip-test` +for `code-blocks`. + +.. list-table:: + :header-rows: 1 + + * - Values + * - False (**default**) + * - True + +``conf.py`` usage: + +.. code-block:: python + + jupyter_ignore_skip_test = True + +.. todo:: + + @aakash is this required? \ No newline at end of file diff --git a/docs/config-extension-coverage.rst b/docs/config-extension-coverage.rst deleted file mode 100644 index edc418bb..00000000 --- a/docs/config-extension-coverage.rst +++ /dev/null @@ -1,40 +0,0 @@ -.. _coverage_extension_coverage: - -Computing Coverage Statistics -============================= - -.. warning:: - - ``make coverage`` will currently produce a ``json`` report - that can be used to support an execution status page. But adding - badges and a status page needs to be made into an option and - specified via the default theme. - See `#237 `__ - -jupyter_make_coverage ---------------------- - -Enable coverage statistics to be computed - -.. list-table:: - :header-rows: 1 - - * - Values - * - False (**default**) - * - True - - -jupyter_template_coverage_file_path ------------------------------------ - -Provide path to template coverage file - -.. todo:: - - Document format for template - -``conf.py`` usage: - -.. code-block:: python - - jupyter_template_coverage_file_path = "theme/templates/.json" \ No newline at end of file diff --git a/docs/config-extension-execution.rst b/docs/config-extension-execution.rst deleted file mode 100644 index 203ad47d..00000000 --- a/docs/config-extension-execution.rst +++ /dev/null @@ -1,108 +0,0 @@ -.. _config_extension_execution: - -Executing Notebooks -=================== - -jupyter_execute_nb ------------------- - -Enables the execution of generated notebooks - -.. list-table:: - :header-rows: 1 - - * - Values - * - False (**default**) - * - True - -.. todo:: - - deprecate this option in favour of jupyter_execute_notebooks - -jupyter_execute_notebooks -------------------------- - -Enables the execution of generated notebooks - -.. list-table:: - :header-rows: 1 - - * - Values - * - False (**default**) - * - True - -``conf.py`` usage: - -.. code-block:: python - - jupyter_execute_notebooks = True - -jupyter_dependency_lists ------------------------- - -Dependency of notebooks on other notebooks for execution can also -be added to the configuration file above in the form of a dictionary. -The key/value pairs will contain the names of the notebook files. - -``conf.py`` usage: - -.. code-block:: python - - # add your dependency lists here - jupyter_dependency_lists = { - 'python_advanced_features' : ['python_essentials','python_oop'], - 'discrete_dp' : ['dp_essentials'], - } - - -jupyter_dependencies --------------------- - -Specify support (dependencies) for notebook collection at the `file` or -the `directory` level. - -``conf.py`` usage: - -.. code-block:: python - - jupyter_dependencies = { - : ['file1', 'file2'], - {}/ : ['file1'] - } - -.. note:: - - to specify a support file at the root level of the source directory - the key should be `""` - -jupyter_number_workers ----------------------- - -Specify the number cores to use with dask - -.. list-table:: - :header-rows: 1 - - * - Values - * - Integer (**default** = 1) - -``conf.py`` usage: - - jupyter_number_workers = 4 - - -jupyter_threads_per_worker --------------------------- - -Specify the number of threads per worker for dask - -.. list-table:: - :header-rows: 1 - - * - Values - * - Integer (**default** = 1) - -``conf.py`` usage: - - jupyter_threads_per_worker = 1 - diff --git a/docs/config-extension-notebooks.rst b/docs/config-extension-notebooks.rst deleted file mode 100644 index 0d099a55..00000000 --- a/docs/config-extension-notebooks.rst +++ /dev/null @@ -1,279 +0,0 @@ -.. _config_extension_notebooks: - -Constructing Jupyter Notebooks -============================== - -.. contents:: Options - :depth: 1 - :local: - -jupyter_conversion_mode ------------------------ - -Specifies which writer to use when constructing notebooks. - -.. list-table:: - :header-rows: 1 - - * - Option - - Description - * - "all" (**default**) - - compile complete notebooks which include ``markdown cells`` and ``code blocks`` - * - "code" - - compile notebooks that only contain the ``code blocks``. - -``conf.py`` usage: - -.. code-block:: python - - jupyter_conversion_mode = "all" - -jupyter_static_file_path -------------------------- - -Specify path to `_static` folder. - -``conf.py`` usage: - -.. code-block:: python - - jupyter_static_file_path = ["source/_static"] - - -jupyter_header_block ---------------------- - -Add a header block to every generated notebook by specifying an RST file - -``conf.py`` usage: - -.. code-block:: python - - jupyter_header_block = ["source/welcome.rst"] - -jupyter_default_lang --------------------- - -Specify default language for collection of RST files - -``conf.py`` usage: - -.. code-block:: python - - jupyter_default_lang = "python3" - -jupyter_lang_synonyms ---------------------- - -Specify any language synonyms. - -This will be used when parsing code blocks. For example, python and ipython -have slightly different highlighting directives but contain code that can both be executed on -the same kernel - -``conf.py`` usage: - -.. code-block:: python - - jupyter_lang_synonyms = ["pycon", "ipython"] - -jupyter_kernels ---------------- - -Specify kernel information for the jupyter notebook metadata. - -This is used by jupyter to connect the correct language kernel and is **required** in ``conf.py``. - -``conf.py`` usage: - -.. code-block:: python - - jupyter_kernels = { - "python3": { - "kernelspec": { - "display_name": "Python", - "language": "python3", - "name": "python3" - }, - "file_extension": ".py", - }, - } - -.. TODO:: - - See Issue `196 `__ - -jupyter_write_metadata ----------------------- - -write time and date information at the top of each notebook as notebook metadata - -.. note:: - - This option is slated to be deprecated - -jupyter_options ---------------- - -An dict-type object that is used by dask to control execution - - -.. TODO:: - - This option needs to be reviewed - -jupyter_drop_solutions ----------------------- - -Drop ``code-blocks`` that include ``:class: solution`` - -.. list-table:: - :header-rows: 1 - - * - Values - * - False (**default**) - * - True - -.. TODO:: - - This option needs to be reviewed - -jupyter_drop_tests ------------------- - -Drop ``code-blocks` that include ``:class: test`` - -.. list-table:: - :header-rows: 1 - - * - Values - * - False (**default**) - * - True - -.. TODO:: - - This option needs to be reviewed - -jupyter_ignore_no_execute: --------------------------- - -.. list-table:: - :header-rows: 1 - - * - Values - * - False (**default**) - * - True - -When constructing notebooks this option can be enabled to ignore `:class: no-execute` -for `code-blocks`. This is useful for `html` writer for pages that are meant to fail -but shouldn't be included in `coverage` tests. - -``conf.py`` usage: - -.. code-block:: python - - jupyter_ignore_no_execute = True - -jupyter_ignore_skip_test ------------------------- - -When constructing notebooks this option can be enabled to ignore `:class: skip-test` -for `code-blocks`. - -.. list-table:: - :header-rows: 1 - - * - Values - * - False (**default**) - * - True - -``conf.py`` usage: - -.. code-block:: python - - jupyter_ignore_skip_test = True - -jupyter_allow_html_only ------------------------ - -Enable this option to allow ``.. only:: html`` pass through to the notebooks. - -.. list-table:: - :header-rows: 1 - - * - Values - * - False (**default**) - * - True - -``conf.py`` usage: - -.. code-block:: python - - jupyter_allow_html_only = True - -jupyter_target_html -------------------- - -Enable this option to generate notebooks that favour the inclusion of ``html`` -in notebooks to support more advanced features. - -.. list-table:: - :header-rows: 1 - - * - Values - * - False (**default**) - * - True - -Supported Features: - -#. html based table support -#. image inclusion as ``html`` figures - -``conf.py`` usage: - -.. code-block:: python - - jupyter_target_html = True - - -jupyter_images_markdown ------------------------ - -Force the inclusion of images as native markdown - -.. list-table:: - :header-rows: 1 - - * - Values - * - False (**default**) - * - True - -.. note:: - - when this option is enabled the `:scale:` option is not supported - in RST. - -``conf.py`` usage: - -.. code-block:: python - - jupyter_images_markdown = True - - -jupyter_dependencies --------------------- - -Specify file or directory level dependencies - -``conf.py`` usage: - -.. code-block:: python - - jupyter_dependencies = { - : ['file1', 'file2'], - {}/ : ['file1'] - } - -this allows you to specify a companion data file for -a given ``RST`` document and it will get copied through sphinx -to the ``_build`` folder. \ No newline at end of file diff --git a/docs/config-extension.rst b/docs/config-extension.rst index 57fbe7aa..cd006f67 100644 --- a/docs/config-extension.rst +++ b/docs/config-extension.rst @@ -3,18 +3,27 @@ Extension Configuration and Options =================================== -The options are split into the different parts of the compilation pipeline -that are available in this extension: +.. note:: + + The extension has recently been re-written to include separate translators + for notebooks that have different end purposes. There is now IPYNB, HTML, + and PDF translators which allows the number of configuration options to be + substantially reduced. + + Execution is now also handled by a specialised execution builder with results + shared across IPYNB, HTML and PDF translators. + +Options are available for each builder and translator: .. toctree:: :maxdepth: 1 - config-extension-notebooks - config-extension-execution - config-extension-html - config-extension-coverage - config-extension-exercise - config-extension-pdf + config-general + config-ipynb-translator + config-html-translator + config-pdf-translator + config-execution + config-directive-exercise This extension also offers additional directives that can be used while writing your documents diff --git a/docs/config-general.rst b/docs/config-general.rst new file mode 100644 index 00000000..3b47570b --- /dev/null +++ b/docs/config-general.rst @@ -0,0 +1,87 @@ +.. _config_general: + +General Configuration +===================== + +.. contents:: Options + :depth: 1 + :local: + +jupyter_language +---------------- + +Specifies default language for Jupyter Kernel + +.. list-table:: + :header-rows: 1 + + * - Option + - Description + * - "python3" (**default**) + - Str, default(use the python3 kernel) + +``conf.py`` usage: + +.. code-block:: python + + jupyter_language = "julia" + +.. note:: + + Other languages can be displayed (i.e. julia). This option sets the + primary execution language for the collection of notebooks + + +jupyter_lang_synonyms +--------------------- + +Specify any language synonyms. + +This will be used when parsing code blocks. For example, python and ipython +have slightly different highlighting directives but contain code that can both be executed on +the same kernel + +``conf.py`` usage: + +.. code-block:: python + + jupyter_lang_synonyms = ["pycon", "ipython"] + +jupyter_execute_notebooks +------------------------- + +Enables the execution of generated notebooks by calling the +`execute` builder + +.. list-table:: + :header-rows: 1 + + * - Values + * - False (**default**) + * - True + +``conf.py`` usage: + +.. code-block:: python + + jupyter_execute_notebooks = True + +.. note:: + + `jupyter_dependencies `__ can be specified to support notebook + execution. + +jupyter_static_file_path +------------------------- + +Specify path to `_static` folder. + +``conf.py`` usage: + +.. code-block:: python + + jupyter_static_file_path = ["source/_static"] + +.. todo:: + + will be deprecated once static assets feature is implemented. \ No newline at end of file diff --git a/docs/config-extension-html.rst b/docs/config-html-translator.rst similarity index 74% rename from docs/config-extension-html.rst rename to docs/config-html-translator.rst index ed3c26b2..2d62c801 100644 --- a/docs/config-extension-html.rst +++ b/docs/config-html-translator.rst @@ -1,45 +1,49 @@ -.. config_extension_html: +.. _config_html_translator: -Converting Notebooks to HTML -============================ +IPYNB(HTML) Notebook Translator +=============================== .. contents:: Options :depth: 1 :local: -jupyter_generate_html ---------------------- +Options available when constructing Jupyter notebooks that are +targeting HTML and website support -Enable sphinx to generate HTML versions of notebooks +jupyter_html_template +--------------------- -.. list-table:: - :header-rows: 1 +Specify path to nbconvert html template file - * - Values - * - False (**default**) - * - True +.. note:: + + Documentation on nbconvert templates can be found + `here `_ ``conf.py`` usage: .. code-block:: python - jupyter_generate_html = True + jupyter_html_template = "theme/template/.tpl" -jupyter_html_template +jupyter_template_path --------------------- -Specify path to nbconvert html template file +Specify path for templates -.. note:: +.. list-table:: + :header-rows: 1 - Documentation on nbconvert templates can be found - `here `_ + * - Value + * - "templates" (**default**) ``conf.py`` usage: .. code-block:: python - jupyter_html_template = "theme/template/.tpl" + jupyter_template_path = "templates" + +.. TODO: Should this be in general settings jupyter_make_site ----------------- @@ -60,7 +64,6 @@ This option: jupyter_make_site = True - jupyter_download_nb ------------------- @@ -72,7 +75,6 @@ Request Sphinx to generate a collection of download notebooks to support a websi jupyter_download_nb = True - jupyter_download_nb_images_urlpath ---------------------------------- @@ -98,7 +100,6 @@ Specify theme name The theme should be located in the path of `jupyter_theme_path`. The default path would be: ``theme//`` - jupyter_theme_path ------------------ @@ -116,33 +117,4 @@ Specify location for theme files jupyter_theme_path = "theme" -jupyter_template_path ---------------------- - -Specify path for templates - -.. list-table:: - :header-rows: 1 - - * - Value - * - "templates" (**default**) - -``conf.py`` usage: - -.. code-block:: python - - jupyter_template_path = "templates" - -jupyter_template_html ---------------------- - -Specify ``html`` template to be used by ``nbconvert`` - -``conf.py`` usage: - -.. code-block:: python - - jupyter_template_html = - -The template file should be located in the path of ``jupyter_template_path``. -The default path would be: ``templates/`` +.. TODO: should this be general settings? \ No newline at end of file diff --git a/docs/config-ipynb-translator.rst b/docs/config-ipynb-translator.rst new file mode 100644 index 00000000..229ab299 --- /dev/null +++ b/docs/config-ipynb-translator.rst @@ -0,0 +1,110 @@ +.. _config_ipynb_translator: + +IPYNB Notebook Translator +========================= + +.. contents:: Options + :depth: 1 + :local: + +Options available when constructing Jupyter notebooks + +jupyter_allow_html_only +----------------------- + +Enable this option to allow ``.. only:: html`` pass through to the notebooks. + +.. list-table:: + :header-rows: 1 + + * - Values + * - False (**default**) + * - True + +``conf.py`` usage: + +.. code-block:: python + + jupyter_allow_html_only = True + +.. note:: + + This is turned on by default in the HTMLTranslator + + +jupyter_images_html +------------------- + +Force the inclusion of images as html objects in the notebook + +.. list-table:: + :header-rows: 1 + + * - Values + * - False + * - True (**default**) + +.. note:: + + this is useful to support the full suite of attributes associated + with the image directive (i.e. scale). + +``conf.py`` usage: + +.. code-block:: python + + jupyter_images_html = True + +jupyter_section_blocks +----------------------- + +Writes RST sections to independant markdown blocks in IPYNB + +.. list-table:: + :header-rows: 1 + + * - Values + * - False + * - True (**default**) + +``conf.py`` usage: + +.. code-block:: python + + jupyter_section_blocks = False + +jupyter_drop_tests +------------------ + +Drop ``code-blocks` that include ``:class: test`` +Allows notebooks to be constructed without including tests from the +source RST file + +.. list-table:: + :header-rows: 1 + + * - Values + * - False + * - True (**default**) + +jupyter_drop_solutions +---------------------- + +**Note:** Future Feature + +Drop ``code-blocks`` that include ``:class: solution`` + +.. list-table:: + :header-rows: 1 + + * - Values + * - False (**default**) + * - True + +.. TODO:: + + This option needs to be reviewed. A new implementation should be + considered where the option builds a set of notebooks with and + without solutions so special configuration isn't required. + + diff --git a/docs/config-extension-pdf.rst b/docs/config-pdf-translator.rst similarity index 95% rename from docs/config-extension-pdf.rst rename to docs/config-pdf-translator.rst index acec9b5e..0ec76cef 100644 --- a/docs/config-extension-pdf.rst +++ b/docs/config-pdf-translator.rst @@ -1,13 +1,12 @@ -.. _config_extension_pdf: +.. _config_html_translator: -Converting Notebooks to PDF -============================ +IPYNB(PDF) Notebook Translator +=============================== .. contents:: Options :depth: 1 :local: - jupyter_latex_template ----------------------- @@ -30,7 +29,6 @@ Add project logo to pdf document jupyter_pdf_logo = "theme/img/logo.png" - jupyter_bib_file ----------------- diff --git a/docs/config-project.rst b/docs/config-project.rst deleted file mode 100644 index 4a13b162..00000000 --- a/docs/config-project.rst +++ /dev/null @@ -1,40 +0,0 @@ - -.. _config_project: - -Managing Large Projects -======================= - -Large projects may require different build pathways due to the time required -for execution of embedded code. This can be done by modifying the ``Makefile`` -to accommodate multiple build pathways. - -You may, for example, wish to leave ``make jupyter`` simply building notebooks -while setting up an alternative ``make`` command to target a full ``website`` -build. - -In the ``Makefile`` you can add an alternative build target such as: - -.. code-block:: bash - - BUILDWEBSITE = _build/website - -and then you can modify options (set in the ``conf.py`` file) using the `-D` flag. - -.. code-block:: bash - - website: - @$(SPHINXBUILD) -M jupyter "$(SOURCEDIR)" "$(BUILDWEBSITE)" $(SPHINXOPTS) $(O) -D jupyter_make_site=1 -D jupyter_generate_html=1 -D jupyter_download_nb=1 -D jupyter_execute_notebooks=1 -D jupyter_target_html=1 -D jupyter_images_markdown=0 -D jupyter_html_template="theme/templates/lectures-nbconvert.tpl" -D jupyter_download_nb_urlpath="https://lectures.quantecon.org/" - -this will setup a new folder ``_build/website`` for the new build pathway to -store resultant files from the options selected. See :doc:`builders` for further details. - -.. note:: - - This method also preserves the ``sphinx`` cache mechanism for each build pathway. - -.. warning:: - - Issue `#199 `_ will - alter this approach to include all `configuration` settings in the ``conf.py`` file - and then the different pipelines can be switched off in the Makefile which will - be less error prone. \ No newline at end of file diff --git a/docs/index.rst b/docs/index.rst index 66d31e39..46d7332b 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -3,6 +3,7 @@ This sphinx extension can be used to build a collection of `Jupyter `__ notebooks for Sphinx Projects. +It can also be used to manage websites and build pdf files. .. note:: @@ -23,10 +24,9 @@ the task of version control for large projects. installation config-sphinx config-extension + builders examples config-example - config-project - builders Credits @@ -36,11 +36,10 @@ This project is supported by `QuantEcon `__ Many thanks to the lead developers of this project. - * `@AakashGfude `__ * `@mmcky `__ -Contributors +Past Contributors * `FelipeMaldonado `__ * `@myuuuuun `__ @@ -49,7 +48,9 @@ Contributors Projects using Extension ------------------------ -1. `QuantEcon Lectures `__ +1. `QuantEcon Python Lectures `__ +1. `QuantEcon Julia Lectures `__ +1. `QuantEcon Datascience Lectures `__ If you find this extension useful please let us know at contact@quantecon.org @@ -57,7 +58,7 @@ contact@quantecon.org LICENSE ======= -Copyright © 2019 QuantEcon Development Team: BSD-3 All rights reserved. +Copyright © 2020 QuantEcon Development Team: BSD-3 All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/setup.py b/setup.py index f6fa2791..0294d5b0 100644 --- a/setup.py +++ b/setup.py @@ -8,20 +8,21 @@ This package contains a `Sphinx `_ extension for compiling RST to Jupyter notebooks. -It contains two primary builders: +It contains four primary builders: -1. jupyter -2. jupyterpdf +1. JupyterBuilder +2. JupyterCodeBuilder +3. JupyterHTMLBuilder +4. JupyterPDFBuilder -The default behaviour of the `jupyter` builder is to provide notebooks that are readable -with an emphasis on adding markdown into the notebooks. However, it can also be configured -to generate websites (such as `python.quantecon.org `_) -and run coverage tests, which uses `ipynb` as an intermediate format. +The default behavior of `JupyterBuilder` is to provide notebooks that are readable +with an emphasis on adding markdown into the notebooks. -The `jupyterpdf` builder is focused on building `pdf` files (via an intermediate ipynb format). +`JupyterHTMLBuilder` is useful for targetting the construction of websites -This project is maintained and supported by `QuantEcon `_. +`JupyterPDFBuilder` is useful for building PDF files. +This project is maintained and supported by `QuantEcon `_. Status ------ @@ -58,7 +59,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', - 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', 'Framework :: Sphinx :: Extension', 'Topic :: Documentation', @@ -67,6 +67,6 @@ platforms='any', packages=find_packages(), include_package_data=True, - install_requires=['docutils', 'nbformat', 'sphinx', 'dask<=2.5.2', 'distributed<=2.5.2', 'ipython', 'nbconvert', 'jupyter_client'], + install_requires=['docutils', 'nbformat', 'sphinx', 'dask<=2.5.2', 'distributed<=2.5.2', 'ipython', 'nbconvert', 'jupyter_client', 'munch'], namespace_packages=['sphinxcontrib'], ) diff --git a/sphinxcontrib/jupyter/__init__.py b/sphinxcontrib/jupyter/__init__.py index eb1fdb65..e6285397 100644 --- a/sphinxcontrib/jupyter/__init__.py +++ b/sphinxcontrib/jupyter/__init__.py @@ -6,7 +6,9 @@ pass from .builders.jupyter import JupyterBuilder -from .builders.jupyterpdf import JupyterPDFBuilder +from .builders.jupyter_pdf import JupyterPDFBuilder +from .builders.jupyter_code import JupyterCodeBuilder +from .builders.jupyter_html import JupyterHTMLBuilder from .directive.jupyter import jupyter_node from .directive.jupyter import Jupyter as JupyterDirective from .directive.jupyter import JupyterDependency @@ -29,72 +31,62 @@ def depart_exercise_node(self, node): def setup(app): - execute_nb_obj = { - "no-text": True, - "timeout": 600, - "text_reports": True, - "coverage": False, - } - #Add Sphinx Version to ENV Configuration app.add_config_value('SPHINX_VERSION', SPHINX_VERSION, 'env') # Jupyter Builder and Options - app.add_builder(JupyterPDFBuilder) app.add_builder(JupyterBuilder) - app.add_config_value("jupyter_kernels", None, "jupyter") - app.add_config_value("jupyter_conversion_mode", "all", "jupyter") - app.add_config_value("jupyter_write_metadata", True, "jupyter") - app.add_config_value("jupyter_static_file_path", [], "jupyter") - app.add_config_value("jupyter_header_block", None, "jupyter") - app.add_config_value("jupyter_options", None, "jupyter") - app.add_config_value("jupyter_default_lang", "python3", "jupyter") - app.add_config_value("jupyter_lang_synonyms", [], "jupyter") - app.add_config_value("jupyter_drop_solutions", True, "jupyter") - app.add_config_value("jupyter_drop_tests", True, "jupyter") - app.add_config_value("jupyter_ignore_no_execute", False, "jupyter") - app.add_config_value("jupyter_ignore_skip_test", False, "jupyter") - app.add_config_value("jupyter_execute_nb", execute_nb_obj, "jupyter") - app.add_config_value("jupyter_template_coverage_file_path", None, "jupyter") - app.add_config_value("jupyter_generate_html", False, "jupyter") + app.add_builder(JupyterCodeBuilder) + app.add_builder(JupyterHTMLBuilder) + app.add_builder(JupyterPDFBuilder) + app.add_config_value("jupyter_language", "python3", "jupyter") + app.add_config_value("jupyter_language_synonyms", [], "jupyter") + + #-IPYNB-# + app.add_config_value("jupyter_images_html", True, "jupyter") + app.add_config_value("jupyter_section_blocks", True, "jupyter") + + #-HTML-# + app.add_config_value("jupyter_static_file_path", [], "jupyter") #TODO: future deprecation app.add_config_value("jupyter_html_template", None, "jupyter") - app.add_config_value("jupyter_execute_notebooks", False, "jupyter") - app.add_config_value("jupyter_make_site", False, "jupyter") - app.add_config_value("jupyter_dependency_lists", {}, "jupyter") + app.add_config_value("jupyter_html_theme", "theme", "jupyter") + app.add_config_value("jupyter_allow_html_only", False, "") + app.add_config_value("jupyter_download_nb_urlpath", None, "jupyter") + app.add_config_value("jupyter_download_nb_image_urlpath", None, "jupyter") + app.add_config_value("jupyter_images_markdown", False, "jupyter") #TODO: remove + + #-EXECUTE-# + app.add_config_value("jupyter_execute", True, "jupyter") + app.add_config_value("jupyter_execute_allow_errors", True, "jupyter") + app.add_config_value("jupyter_coverage_template", None, "jupyter") app.add_config_value("jupyter_threads_per_worker", 1, "jupyter") app.add_config_value("jupyter_number_workers", 1, "jupyter") - app.add_config_value("jupyter_make_coverage", False, "jupyter") - app.add_config_value("jupyter_target_pdf", False, "jupyter") - app.add_config_value("jupyter_coverage_dir", None, "jupyter") - app.add_config_value("jupyter_theme", None, "jupyter") - app.add_config_value("jupyter_theme_path", "theme", "jupyter") - app.add_config_value("jupyter_template_path", "templates", "jupyter") - app.add_config_value("jupyter_dependencies", None, "jupyter") - app.add_config_value("jupyter_download_nb_execute", None, "jupyter") - - # Jupyter pdf options - app.add_config_value("jupyter_latex_template", None, "jupyter") - app.add_config_value("jupyter_latex_template_book", None, "jupyter") + app.add_config_value("jupyter_dependencies", None, "jupyter") #TODO: rename + app.add_config_value("jupyter_dependency_lists", {}, "jupyter") #TODO: rename + + #-PDF-# + app.add_config_value("jupyter_template_latex", None, "jupyter") + app.add_config_value("jupyter_template_latexbook", None, "jupyter") app.add_config_value("jupyter_pdf_logo", None, "jupyter") app.add_config_value("jupyter_bib_file", None, "jupyter") app.add_config_value("jupyter_pdf_author", None, "jupyter") - app.add_config_value("jupyter_pdf_showcontentdepth", 2, "jupyter") app.add_config_value("jupyter_pdf_urlpath", None, "jupyter") - app.add_config_value("jupyter_pdf_excludepatterns", [], "jupyter") + app.add_config_value("jupyter_pdf_excludepatterns", [], "jupyter") app.add_config_value("jupyter_pdf_book", False, "jupyter") app.add_config_value("jupyter_pdf_book_index", None, "jupyter") app.add_config_value("jupyter_pdf_book_title", None, "jupyter") app.add_config_value("jupyter_pdf_book_name", None, "jupyter") - - + #TODO: REVIEW + app.add_config_value("jupyter_solution_notebook", True, "jupyter") + app.add_config_value("jupyter_drop_tests", True, "jupyter") #TODO: class hide - # Jupyter Directive + # Jupyter Directive-# app.add_node(jupyter_node, html=(_noop, _noop), latex=(_noop, _noop)) app.add_directive("jupyter", JupyterDirective) app.add_directive("jupyter-dependency", JupyterDependency) - # Exercise directive + #-Exercise Directive-# if SPHINX_VERSION[0] >= 2: app.add_config_value('exercise_include_exercises', True, 'html') app.add_config_value('exercise_inline_exercises', False, 'html') @@ -110,14 +102,8 @@ def setup(app): app.connect('doctree-resolved', exercise.process_exercise_nodes) app.connect('env-purge-doc', exercise.purge_exercises) - # jupyter setup + #-Transforms-# app.add_transform(JupyterOnlyTransform) - app.add_config_value("jupyter_allow_html_only", False, "jupyter") - app.add_config_value("jupyter_target_html", False, "jupyter") - app.add_config_value("jupyter_download_nb", False, "jupyter") - app.add_config_value("jupyter_download_nb_urlpath", None, "jupyter") - app.add_config_value("jupyter_download_nb_image_urlpath", None, "jupyter") - app.add_config_value("jupyter_images_markdown", False, "jupyter") return { "version": VERSION, diff --git a/sphinxcontrib/jupyter/builders/jupyter.py b/sphinxcontrib/jupyter/builders/jupyter.py index 79b9ed14..199ebb35 100644 --- a/sphinxcontrib/jupyter/builders/jupyter.py +++ b/sphinxcontrib/jupyter/builders/jupyter.py @@ -10,24 +10,25 @@ from sphinx.util.fileutil import copy_asset from ..writers.execute_nb import ExecuteNotebookWriter from ..writers.make_site import MakeSiteWriter -from ..writers.convert import convertToHtmlWriter +from ..writers.convert import ConvertToHTMLWriter from dask.distributed import Client, progress from sphinx.util import logging -import pdb import time -from ..writers.utils import copy_dependencies +import json +from hashlib import md5 +from .utils import copy_dependencies, combine_executed_files, check_codetree_validity, run_build +from sphinx.cmd.make_mode import run_make_mode +from sphinx.cmd.build import build_main +import os class JupyterBuilder(Builder): - """ - Builds Jupyter Notebook - """ + name = "jupyter" - format = "ipynb" + docformat = "ipynb" out_suffix = ".ipynb" allow_parallel = True _writer_class = JupyterWriter - _make_site_class = MakeSiteWriter dask_log = dict() futuresInfo = dict() futures = [] @@ -36,74 +37,10 @@ class JupyterBuilder(Builder): logger = logging.getLogger(__name__) def init(self): - ### initializing required classes - self._execute_notebook_class = ExecuteNotebookWriter(self) - self._make_site_class = MakeSiteWriter(self) - self.executedir = self.outdir + '/executed' - self.reportdir = self.outdir + '/reports/' - self.errordir = self.outdir + "/reports/{}" - self.downloadsdir = self.outdir + "/_downloads" - self.downloadsExecutedir = self.downloadsdir + "/executed" - self.client = None - - # Check default language is defined in the jupyter kernels - def_lng = self.config["jupyter_default_lang"] - if def_lng not in self.config["jupyter_kernels"]: - self.logger.warning( - "Default language defined in conf.py ({}) is not " - "defined in the jupyter_kernels in conf.py. " - "Set default language to python3" - .format(def_lng)) - self.config["jupyter_default_lang"] = "python3" - # If the user has overridden anything on the command line, set these things which have been overridden. - instructions = [] - overrides = self.config['jupyter_options'] - if overrides: - instructions = overrides.split(",") - - for instruction in instructions: - if instruction: - if instruction == 'code_only': - self.config["jupyter_conversion_mode"] = "code" - else: - # Fail on unrecognised command. - self.logger.warning("Unrecognise command line parameter " + instruction + ", ignoring.") - - #threads per worker for dask distributed processing - if "jupyter_threads_per_worker" in self.config: - self.threads_per_worker = self.config["jupyter_threads_per_worker"] - - #number of workers for dask distributed processing - if "jupyter_number_workers" in self.config: - self.n_workers = self.config["jupyter_number_workers"] - - # start a dask client to process the notebooks efficiently. - # processes = False. This is sometimes preferable if you want to avoid inter-worker communication and your computations release the GIL. This is common when primarily using NumPy or Dask Array. - - if (self.config["jupyter_execute_notebooks"]): - self.client = Client(processes=False, threads_per_worker = self.threads_per_worker, n_workers = self.n_workers) - self.execution_vars = { - 'target': 'website', - 'dependency_lists': self.config["jupyter_dependency_lists"], - 'executed_notebooks': [], - 'delayed_notebooks': dict(), - 'futures': [], - 'delayed_futures': [], - 'destination': self.executedir - } - - if (self.config["jupyter_download_nb_execute"]): - if self.client is None: - self.client = Client(processes=False, threads_per_worker = self.threads_per_worker, n_workers = self.n_workers) - self.download_execution_vars = { - 'target': 'downloads', - 'dependency_lists': self.config["jupyter_dependency_lists"], - 'executed_notebooks': [], - 'delayed_notebooks': dict(), - 'futures': [], - 'delayed_futures': [], - 'destination': self.downloadsExecutedir - } + """ + A Sphinx Builder for Jupyter Notebooks + """ + self.executedir = self.confdir + '/_build/execute' def get_outdated_docs(self): for docname in self.env.found_docs: @@ -128,95 +65,38 @@ def get_target_uri(self, docname, typ=None): def prepare_writing(self, docnames): self.writer = self._writer_class(self) - - ## copies the dependencies to the notebook folder + # Copies the dependencies to the notebook folder copy_dependencies(self) - - if (self.config["jupyter_execute_notebooks"]): - ## copies the dependencies to the executed folder - copy_dependencies(self, self.executedir) - - if (self.config["jupyter_download_nb_execute"]): - copy_dependencies(self, self.downloadsExecutedir) def write_doc(self, docname, doctree): # work around multiple string % tuple issues in docutils; # replace tuples in attribute values with lists doctree = doctree.deepcopy() destination = docutils.io.StringOutput(encoding="utf-8") - ### print an output for downloading notebooks as well with proper links if variable is set - if "jupyter_download_nb" in self.config and self.config["jupyter_download_nb"]: - - outfilename = os.path.join(self.downloadsdir, os_path(docname) + self.out_suffix) - ensuredir(os.path.dirname(outfilename)) - self.writer._set_ref_urlpath(self.config["jupyter_download_nb_urlpath"]) - self.writer._set_jupyter_download_nb_image_urlpath((self.config["jupyter_download_nb_image_urlpath"])) - self.writer.write(doctree, destination) - - # get a NotebookNode object from a string - nb = nbformat.reads(self.writer.output, as_version=4) - nb = self.update_Metadata(nb) - try: - with codecs.open(outfilename, "w", "utf-8") as f: - self.writer.output = nbformat.writes(nb, version=4) - f.write(self.writer.output) - except (IOError, OSError) as err: - self.warn("error writing file %s: %s" % (outfilename, err)) - - ### executing downloaded notebooks - if (self.config['jupyter_download_nb_execute']): - strDocname = str(docname) - if strDocname in self.download_execution_vars['dependency_lists'].keys(): - self.download_execution_vars['delayed_notebooks'].update({strDocname: nb}) - else: - self._execute_notebook_class.execute_notebook(self, nb, docname, self.download_execution_vars, self.download_execution_vars['futures']) - - ### output notebooks for executing - self.writer._set_ref_urlpath(None) - self.writer._set_jupyter_download_nb_image_urlpath(None) self.writer.write(doctree, destination) - - # get a NotebookNode object from a string + # Get a NotebookNode object from a string nb = nbformat.reads(self.writer.output, as_version=4) - nb = self.update_Metadata(nb) - ### execute the notebook - if (self.config["jupyter_execute_notebooks"]): - strDocname = str(docname) - if strDocname in self.execution_vars['dependency_lists'].keys(): - self.execution_vars['delayed_notebooks'].update({strDocname: nb}) - else: - self._execute_notebook_class.execute_notebook(self, nb, docname, self.execution_vars, self.execution_vars['futures']) - else: - #do not execute - if (self.config['jupyter_generate_html']): - language_info = nb.metadata.kernelspec.language - self._convert_class = convertToHtmlWriter(self) - self._convert_class.convert(nb, docname, language_info, self.outdir) - - ### mkdir if the directory does not exist + if self.config["jupyter_execute"]: + # Combine the executed code with output of this builder + update = check_codetree_validity(self, nb, docname) + if update: + run_build('execute') + nb = combine_executed_files(self.executedir, nb, docname) + outfilename = os.path.join(self.outdir, os_path(docname) + self.out_suffix) ensuredir(os.path.dirname(outfilename)) - + #Write Document try: - with codecs.open(outfilename, "w", "utf-8") as f: - self.writer.output = nbformat.writes(nb, version=4) - f.write(self.writer.output) + with open(outfilename, "wt", encoding="UTF-8") as f: + nbformat.write(nb, f) except (IOError, OSError) as err: self.logger.warning("error writing file %s: %s" % (outfilename, err)) - def update_Metadata(self, nb): - nb.metadata.date = time.time() - return nb - def copy_static_files(self): # copy all static files self.logger.info(bold("copying static files... "), nonl=True) ensuredir(os.path.join(self.outdir, '_static')) - if (self.config["jupyter_execute_notebooks"]): - self.logger.info(bold("copying static files to executed folder... \n"), nonl=True) - ensuredir(os.path.join(self.executed_notebook_dir, '_static')) - # excluded = Matcher(self.config.exclude_patterns + ["**/.*"]) for static_path in self.config["jupyter_static_file_path"]: @@ -227,39 +107,7 @@ def copy_static_files(self): .format(entry)) else: copy_asset(entry, os.path.join(self.outdir, "_static")) - if (self.config["jupyter_execute_notebooks"]): - copy_asset(entry, os.path.join(self.executed_notebook_dir, "_static")) self.logger.info("done") - def finish(self): - - self.finish_tasks.add_task(self.copy_static_files) - - if self.config["jupyter_execute_notebooks"]: - self.save_executed_and_generate_coverage(self.execution_vars,'website', self.config['jupyter_make_coverage']) - - if self.config["jupyter_download_nb_execute"]: - self.save_executed_and_generate_coverage(self.download_execution_vars, 'downloads') - - if "jupyter_make_site" in self.config and self.config['jupyter_make_site']: - self._make_site_class.build_website(self) - - def save_executed_and_generate_coverage(self, params, target, coverage = False): - - # watch progress of the execution of futures - self.logger.info(bold("Starting notebook execution for %s and html conversion(if set in config)..."), target) - #progress(self.futures) - - # save executed notebook - error_results = self._execute_notebook_class.save_executed_notebook(self, params) - - ##generate coverage if config value set - if coverage: - ## produces a JSON file of dask execution - self._execute_notebook_class.produce_dask_processing_report(self, params) - - ## generate the JSON code execution reports file - error_results = self._execute_notebook_class.produce_code_execution_report(self, error_results, params) - - self._execute_notebook_class.create_coverage_report(self, error_results, params) + self.finish_tasks.add_task(self.copy_static_files) \ No newline at end of file diff --git a/sphinxcontrib/jupyter/builders/jupyter_code.py b/sphinxcontrib/jupyter/builders/jupyter_code.py new file mode 100644 index 00000000..e0808388 --- /dev/null +++ b/sphinxcontrib/jupyter/builders/jupyter_code.py @@ -0,0 +1,127 @@ +import codecs +import os.path +import docutils.io + +import nbformat +from ..writers.jupyter import JupyterWriter +from sphinx.builders import Builder +from ..writers.execute_nb import ExecuteNotebookWriter +from dask.distributed import Client +from sphinx.util import logging +from sphinx.util.console import bold +import time +from .utils import copy_dependencies, create_hash, normalize_cell, check_codetree_validity +import json +from collections import OrderedDict + +logger = logging.getLogger(__name__) + +class JupyterCodeBuilder(Builder): + + #Builder Settings + name="execute" + docformat = "json" + out_suffix = ".codetree" + allow_parallel = True + nbversion = 4 + #Dask Configuration + threads_per_worker = 1 + n_workers = 1 + #-Sphinx Writer + _writer_class = JupyterWriter + + def init(self): + """ + Code Execution Builder + + This builder runs all code-blocks in RST files to compile + a set of `codetree` objects that include executed outputs. + + The results are saved in `_build/execute` by default + + Notes + ----- + 1. Used by jupyter, jupyterhtml, and jupyterpdf to extract + executed outputs. + """ + self.executenb = ExecuteNotebookWriter(self) + self.executedir = self.outdir + self.codetreedir = self.outdir + "/execute/" + self.reportdir = self.outdir + '/reports/' + self.errordir = self.outdir + "/reports/{}" + self.client = None + + #threads per worker for dask distributed processing + if "jupyter_threads_per_worker" in self.config: + self.threads_per_worker = self.config["jupyter_threads_per_worker"] + + #number of workers for dask distributed processing + if "jupyter_number_workers" in self.config: + self.n_workers = self.config["jupyter_number_workers"] + + # start a dask client to process the notebooks efficiently. + # processes = False. This is sometimes preferable if you want to avoid + # inter-worker communication and your computations release the GIL. + # This is common when primarily using NumPy or Dask Array. + + self.client = Client(processes=False, threads_per_worker = self.threads_per_worker, n_workers = self.n_workers) + self.execution_vars = { + 'dependency_lists': self.config["jupyter_dependency_lists"], + 'executed_notebooks': [], + 'delayed_notebooks': dict(), + 'futures': [], + 'delayed_futures': [], + 'destination': self.executedir + } + + def get_target_uri(self, docname: str, typ: str = None): #TODO: @aakash is this different to method in sphinx.builder? + return docname + + def get_outdated_docs(self): #TODO: @aakash is this different to method in sphinx.builder? + return '' + + + def prepare_writing(self, docnames): #TODO: @aakash is this different to method in sphinx.builder? + self.writer = self._writer_class(self) + + def write_doc(self, docname, doctree): + doctree = doctree.deepcopy() + destination = docutils.io.StringOutput(encoding="utf-8") + self.writer.write(doctree, destination) + nb = nbformat.reads(self.writer.output, as_version=self.nbversion) + #Codetree and Execution + update = check_codetree_validity(self, nb, docname) + if not update: + return + # Execute the notebook + strDocname = str(docname) + if strDocname in self.execution_vars['dependency_lists'].keys(): + self.execution_vars['delayed_notebooks'].update({strDocname: nb}) + else: + self.executenb.execute_notebook(self, nb, docname, self.execution_vars, self.execution_vars['futures']) + + def create_codetree(self, nb): + codetree = OrderedDict() + for cell in nb.cells: + cell = normalize_cell(cell) + cell = create_hash(cell) + codetree = self.create_codetree_entry(codetree, cell) + #Build codetree file + filename = self.executedir + "/" + nb.metadata.filename_with_path + self.out_suffix + with open(filename, "wt", encoding="UTF-8") as json_file: + json.dump(codetree, json_file) + + def create_codetree_entry(self, codetree, cell): + codetree[cell.metadata.hashcode] = dict() + key = codetree[cell.metadata.hashcode] + if hasattr(cell, 'source'): key['source']= cell.source + if hasattr(cell, 'outputs'): key['outputs'] = cell.outputs + if hasattr(cell, 'metadata'): key['metadata'] = cell.metadata + return codetree + + def finish(self): + logger.info(bold("Starting notebook execution")) + error_results = self.executenb.save_executed_notebook(self, self.execution_vars) + self.executenb.produce_dask_processing_report(self, self.execution_vars) + error_results = self.executenb.produce_code_execution_report(self, error_results, self.execution_vars) + self.executenb.create_coverage_report(self, error_results, self.execution_vars) \ No newline at end of file diff --git a/sphinxcontrib/jupyter/builders/jupyter_html.py b/sphinxcontrib/jupyter/builders/jupyter_html.py new file mode 100644 index 00000000..78625077 --- /dev/null +++ b/sphinxcontrib/jupyter/builders/jupyter_html.py @@ -0,0 +1,165 @@ +import codecs +import os.path +import docutils.io + +import nbformat +from sphinx.util.osutil import ensuredir, os_path +from ..writers.jupyter import JupyterWriter +from sphinx.builders import Builder +from sphinx.util.console import bold, darkgreen, brown +from sphinx.util.fileutil import copy_asset +from ..writers.execute_nb import ExecuteNotebookWriter +from ..writers.make_site import MakeSiteWriter +from ..writers.convert import ConvertToHTMLWriter +from dask.distributed import Client, progress +from sphinx.util import logging +from docutils import nodes +from docutils.nodes import Node +import time +from .utils import copy_dependencies, combine_executed_files, check_codetree_validity, run_build +from ..writers.utils import get_subdirectory_and_filename +from hashlib import md5 + +logger = logging.getLogger(__name__) + +class JupyterHTMLBuilder(Builder): + + name="jupyterhtml" + docformat = "ipynb" + out_suffix = ".html" + + allow_parallel = True + _writer_class = JupyterWriter + + def init(self): + """ + Builds IPYNB(HTML) notebooks and constructs web sites + """ + self.executedir = self.confdir + '/_build/execute' + self.downloadsdir = self.outdir + "/_downloads" + self.downloadsExecutedir = self.downloadsdir + "/executed" + self._convert_class = ConvertToHTMLWriter(self) + self._make_site_class = MakeSiteWriter(self) + + def get_target_uri(self, docname: str, typ: str = None): + return docname + + def get_outdated_docs(self): + for docname in self.env.found_docs: + if docname not in self.env.all_docs: + yield docname + continue + targetname = self.env.doc2path(docname, self.outdir + "/html", + self.out_suffix) + try: + targetmtime = os.path.getmtime(targetname) + except OSError: + targetmtime = 0 + try: + srcmtime = os.path.getmtime(self.env.doc2path(docname)) + # checks if the source file edited time is later then the html build time + if srcmtime > targetmtime: + yield docname + except EnvironmentError: + pass + + def prepare_writing(self, docnames): + self.writer = self._writer_class(self) + + def write_doc(self, docname, doctree): + # work around multiple string % tuple issues in docutils; + # replace tuples in attribute values with lists + doctree = doctree.deepcopy() + destination = docutils.io.StringOutput(encoding="utf-8") + + nb, outfilename = self.process_doctree_to_notebook(doctree, destination, docname, True) + # Download Notebooks + + nb = self.add_download_metadata(nb) ## add metadata for the downloaded notebooks + self.save_notebook(outfilename, nb) + + # Notebooks for producing HTML + nb, outfilename = self.process_doctree_to_notebook(doctree, destination, docname, False) + #self.save_notebook(outfilename, nb) + # Convert IPYNB to HTML + language_info = nb.metadata.kernelspec.language + self._convert_class.convert(nb, docname, language_info, nb['metadata']['path']) + + def process_doctree_to_notebook(self, doctree, destination, docname, download=False): + ref_urlpath = None + jupyter_download_nb_image_urlpath = None + outdir = self.outdir + outfilename = "" + + if download: + ref_urlpath = self.config["jupyter_download_nb_urlpath"] + jupyter_download_nb_image_urlpath = self.config["jupyter_download_nb_image_urlpath"] + outdir = self.downloadsdir + self.writer._set_ref_urlpath(ref_urlpath) + self.writer._set_jupyter_download_nb_image_urlpath(jupyter_download_nb_image_urlpath) + + # Combine the executed code with output of this builder + self.writer.write(doctree, destination) + + nb = nbformat.reads(self.writer.output, as_version=4) + + os.chdir(self.confdir) + if self.config["jupyter_execute"]: + ### check for codetree else, create it + update = check_codetree_validity(self, nb, docname) + + if update and self.config["jupyter_execute"]: + run_build('execute') + + nb = combine_executed_files(self.executedir, nb, docname) + + ## adding the site metadata here + nb = self.add_site_metadata(nb, docname) + + if download: + outfilename = os.path.join(outdir, os_path(docname) + ".ipynb") + ensuredir(os.path.dirname(outfilename)) + + return nb, outfilename + + def save_notebook(self, outfilename, nb): + try: + with codecs.open(outfilename, "w", "utf-8") as f: + self.writer.output = nbformat.writes(nb, version=4) + f.write(self.writer.output) + except (IOError, OSError) as err: + self.logger.warning("error writing file %s: %s" % (outfilename, err)) + + def copy_static_files(self): + logger.info(bold("copying static files... "), nonl=True) + ensuredir(os.path.join(self.outdir, '_static')) + + # excluded = Matcher(self.config.exclude_patterns + ["**/.*"]) + for static_path in self.config["jupyter_static_file_path"]: + entry = os.path.join(self.confdir, static_path) + if not os.path.exists(entry): + logger.warning( + "jupyter_static_path entry {} does not exist" + .format(entry)) + else: + copy_asset(entry, os.path.join(self.outdir, "_static")) + logger.info("done") + + def add_site_metadata(self, nb, docname): + """ + Site metadata is used when converting IPYNB to HTML (nbconvert) + """ + subdirectory, filename = get_subdirectory_and_filename(docname) + nb['metadata']['path'] = subdirectory + nb['metadata']['filename'] = filename + nb['metadata']['filename_with_path'] = docname + return nb + + def add_download_metadata(self, nb): + nb['metadata']['download_nb_path'] = self.config['jupyter_download_nb_urlpath'] + return nb + + def finish(self): + self.finish_tasks.add_task(self.copy_static_files) + #Construct complete website + self._make_site_class.build_website(self) diff --git a/sphinxcontrib/jupyter/builders/jupyter_pdf.py b/sphinxcontrib/jupyter/builders/jupyter_pdf.py new file mode 100644 index 00000000..4f448e66 --- /dev/null +++ b/sphinxcontrib/jupyter/builders/jupyter_pdf.py @@ -0,0 +1,195 @@ +import codecs +import os.path +import docutils.io + +import nbformat +import json +from sphinx.util.osutil import ensuredir, os_path +from ..writers.jupyter import JupyterWriter +from sphinx.builders import Builder +from sphinx.util.console import bold +from sphinx.util.fileutil import copy_asset +from ..writers.make_pdf import MakePDFWriter +from sphinx.util import logging +import shutil +from distutils.spawn import find_executable +import time +from .utils import combine_executed_files, check_codetree_validity, run_build +from ..writers.utils import get_subdirectory_and_filename + +logger = logging.getLogger(__name__) + +class JupyterPDFBuilder(Builder): + + name="jupyterpdf" + docformat = "ipynb" + out_suffix = ".pdf" + allow_parallel = True + _writer_class = JupyterWriter + + def init(self): + """ + Builds IPYNB(PDF) notebooks + """ + self.executedir = self.confdir + '/_build/execute' + self.texdir = self.outdir + "/latex" + self.texbookdir = self.outdir + "/texbook" + self.pdfdir = self.outdir + "/pdf" + + for path in [self.pdfdir, self.texdir]: + ensuredir(path) + + if not find_executable('xelatex'): + logger.warning( + "Cannot find xelatex executable for pdf compilation" + ) + exit(1) + + # TODO: We should write a separate function/class to check configs + if self.config["jupyter_pdf_book"] and ("jupyter_pdf_book_index" not in self.config or not self.config["jupyter_pdf_book_index"]): + logger.warning( + "You have switched on the book conversion option but not specified an index/contents file for book pdf" + ) + exit(1) + + #PDF Writer Object + self.pdf = MakePDFWriter(self) + + def get_outdated_docs(self): + for docname in self.env.found_docs: + if docname in self.config['jupyter_pdf_excludepatterns']: + continue + if docname not in self.env.all_docs: + yield docname + continue + targetname = self.env.doc2path(docname, self.outdir + "/pdf", + self.out_suffix) + try: + targetmtime = os.path.getmtime(targetname) + except OSError: + targetmtime = 0 + try: + srcmtime = os.path.getmtime(self.env.doc2path(docname)) + # checks if the source file edited time is later then the html build time + if srcmtime > targetmtime: + yield docname + except EnvironmentError: + pass + + def get_target_uri(self, docname, typ=None): + return docname + + def prepare_writing(self, docnames): + self.writer = self._writer_class(self) + + self.copy_static_files() + + def write_doc(self, docname, doctree): + # work around multiple string % tuple issues in docutils; + # replace tuples in attribute values with lists + doctree = doctree.deepcopy() + destination = docutils.io.StringOutput(encoding="utf-8") + + ### output notebooks for executing for single pdfs, the urlpath should be set to website url + self.writer._set_ref_urlpath(self.config["jupyter_pdf_urlpath"]) + self.writer._set_jupyter_download_nb_image_urlpath(None) + self.writer.write(doctree, destination) + + ## get a NotebookNode object from a string + nb = nbformat.reads(self.writer.output, as_version=4) + + os.chdir(self.confdir) + if self.config["jupyter_execute"]: + ### check for codetree else, create it + update = check_codetree_validity(self, nb, docname) + + if update: + run_build('execute') + + ## combine the executed code with output of this builder + nb = combine_executed_files(self.executedir, nb, docname) + + ## adding latex metadata + nb = self.add_latex_metadata(nb, docname) + + ### mkdir if the directory does not exist + outfilename = os.path.join(self.texdir, os_path(docname) + ".ipynb") + ensuredir(os.path.dirname(outfilename)) + + try: + with codecs.open(outfilename, "w", "utf-8") as f: + self.writer.output = nbformat.writes(nb, version=4) + f.write(self.writer.output) + except (IOError, OSError) as err: + logger.warning("error writing file %s: %s" % (outfilename, err)) + + self.pdf.convert_to_latex(self, docname, nb['metadata']['latex_metadata']) + self.pdf.move_pdf(self) + + def copy_static_files(self): + # copy all static files + logger.info(bold("copying static files... "), nonl=True) + ensuredir(os.path.join(self.texdir, '_static')) + + + # excluded = Matcher(self.config.exclude_patterns + ["**/.*"]) + for static_path in self.config["jupyter_static_file_path"]: + entry = os.path.join(self.confdir, static_path) + if not os.path.exists(entry): + logger.warning( + "jupyter_static_path entry {} does not exist" + .format(entry)) + else: + copy_asset(entry, os.path.join(self.texdir, "_static")) + self.copy_static_folder_to_subfolders(self.texdir, True) + + # Copying static folder to subfolders - TODO: will remove this later + def copy_static_folder_to_subfolders(self, sourcedir, skiptopdir): + dirs = os.listdir(sourcedir) + sourcefolder = sourcedir + "/_static" + for folder in dirs: + if skiptopdir and "." in folder: + continue + if "_static" not in folder: + destination = sourcedir + "/" + folder + "/_static" + if os.path.exists(sourcefolder) and not os.path.exists(destination): #ensure source exists and copy to destination to ensure latest version + shutil.copytree(sourcefolder , destination) + + def finish(self): + #self.finish_tasks.add_task(self.copy_static_files) + + ### making book pdf + if self.config["jupyter_pdf_book"]: + self.pdf.process_tex_for_book(self) + + def add_latex_metadata(self, nb, docname=""): + ## initialize latex metadata + if 'latex_metadata' not in nb['metadata']: + nb['metadata']['latex_metadata'] = {} + + ## check for relative paths + subdirectory, filename = get_subdirectory_and_filename(docname) + + path = '' + if subdirectory != '': + path = "../" + slashes = subdirectory.count('/') + for i in range(slashes): + path += "../" + + ## add check for logo here as well + if nb.metadata.title: + nb.metadata.latex_metadata.title = nb.metadata.title + if "jupyter_pdf_logo" in self.config and self.config['jupyter_pdf_logo']: + nb.metadata.latex_metadata.logo = path + self.config['jupyter_pdf_logo'] + + if self.config["jupyter_bib_file"]: + nb.metadata.latex_metadata.bib = path + self.config["jupyter_bib_file"] + + if self.config["jupyter_pdf_author"]: + nb.metadata.latex_metadata.author = self.config["jupyter_pdf_author"] + + if self.config["jupyter_pdf_book_index"] is not None and (filename and self.config["jupyter_pdf_book_index"] in filename): + nb.metadata.latex_metadata.jupyter_pdf_book_title = self.config["jupyter_pdf_book_title"] + + return nb \ No newline at end of file diff --git a/sphinxcontrib/jupyter/builders/jupyterpdf.py b/sphinxcontrib/jupyter/builders/jupyterpdf.py deleted file mode 100644 index 6467e1d9..00000000 --- a/sphinxcontrib/jupyter/builders/jupyterpdf.py +++ /dev/null @@ -1,233 +0,0 @@ -import codecs -import os.path -import docutils.io - -import nbformat -import json -from sphinx.util.osutil import ensuredir, os_path -from ..writers.jupyter import JupyterWriter -from sphinx.builders import Builder -from sphinx.util.console import bold, darkgreen, brown -from sphinx.util.fileutil import copy_asset -from dask.distributed import Client, progress -from ..writers.execute_nb import ExecuteNotebookWriter -from ..writers.make_pdf import MakePDFWriter -from sphinx.util import logging -import pdb -import shutil -from distutils.spawn import find_executable -import time - -class JupyterPDFBuilder(Builder): - """ - Builds pdf notebooks - """ - name="jupyterpdf" - format = "ipynb" - out_suffix = ".ipynb" - allow_parallel = True - - _writer_class = JupyterWriter - dask_log = dict() - futuresInfo = dict() - futures = [] - threads_per_worker = 1 - n_workers = 1 - logger = logging.getLogger(__name__) - - def init(self): - if not find_executable('xelatex'): - self.logger.warning( - "Cannot find xelatex executable for pdf compilation" - ) - exit(1) - - ### we should write a separate function/class to check configs - if self.config["jupyter_pdf_book"] and ("jupyter_pdf_book_index" not in self.config or not self.config["jupyter_pdf_book_index"]): - self.logger.warning( - "You have switched on the book conversion option but not specified an index/contents file for book pdf" - ) - exit(1) - ### initializing required classes - self._execute_notebook_class = ExecuteNotebookWriter(self) - self._pdf_class = MakePDFWriter(self) - self.executedir = self.outdir + '/executed' - self.reportdir = self.outdir + '/reports/' - self.errordir = self.outdir + "/reports/{}" - self.texbookdir = self.outdir + '/texbook' - self.client = None - - # Check default language is defined in the jupyter kernels - def_lng = self.config["jupyter_default_lang"] - if def_lng not in self.config["jupyter_kernels"]: - self.logger.warning( - "Default language defined in conf.py ({}) is not " - "defined in the jupyter_kernels in conf.py. " - "Set default language to python3" - .format(def_lng)) - self.config["jupyter_default_lang"] = "python3" - # If the user has overridden anything on the command line, set these things which have been overridden. - instructions = [] - overrides = self.config['jupyter_options'] - if overrides: - instructions = overrides.split(",") - - for instruction in instructions: - if instruction: - if instruction == 'code_only': - self.config["jupyter_conversion_mode"] = "code" - else: - # Fail on unrecognised command. - self.logger.warning("Unrecognise command line parameter " + instruction + ", ignoring.") - - #threads per worker for dask distributed processing - if "jupyter_threads_per_worker" in self.config: - self.threads_per_worker = self.config["jupyter_threads_per_worker"] - - #number of workers for dask distributed processing - if "jupyter_number_workers" in self.config: - self.n_workers = self.config["jupyter_number_workers"] - - ## check if flags are unset. Give a warning - if ("jupyter_execute_notebooks" in self.config and self.config['jupyter_execute_notebooks'] is False) or "jupyter_execute_notebooks" not in self.config: - self.config['jupyter_execute_notebooks'] = True - self.logger.info("execution of notebooks is mandatory for pdf conversion, so setting it on for pdf builder.") - - if ("jupyter_target_pdf" in self.config and self.config['jupyter_target_pdf'] is False) or "jupyter_target_pdf" not in self.config: - self.config['jupyter_target_pdf'] = True - self.logger.info("target pdf flag is mandatory for pdf conversion, so setting it on for pdf builder.") - - # start a dask client to process the notebooks efficiently. - # processes = False. This is sometimes preferable if you want to avoid inter-worker communication and your computations release the GIL. This is common when primarily using NumPy or Dask Array. - - #### forced execution of notebook - self.client = Client(processes=False, threads_per_worker = self.threads_per_worker, n_workers = self.n_workers) - self.execution_vars = { - 'target': 'website', - 'dependency_lists': self.config["jupyter_dependency_lists"], - 'executed_notebooks': [], - 'delayed_notebooks': dict(), - 'futures': [], - 'delayed_futures': [], - 'destination': self.executedir - } - - def get_outdated_docs(self): - for docname in self.env.found_docs: - if docname not in self.env.all_docs: - yield docname - continue - targetname = self.env.doc2path(docname, self.outdir, - self.out_suffix) - try: - targetmtime = os.path.getmtime(targetname) - except OSError: - targetmtime = 0 - try: - srcmtime = os.path.getmtime(self.env.doc2path(docname)) - if srcmtime > targetmtime: - yield docname - except EnvironmentError: - pass - - def get_target_uri(self, docname, typ=None): - return docname - - def prepare_writing(self, docnames): - self.writer = self._writer_class(self) - - def write_doc(self, docname, doctree): - # work around multiple string % tuple issues in docutils; - # replace tuples in attribute values with lists - doctree = doctree.deepcopy() - destination = docutils.io.StringOutput(encoding="utf-8") - - ### output notebooks for executing for single pdfs, the urlpath should be set to website url - self.writer._set_ref_urlpath(self.config["jupyter_pdf_urlpath"]) - self.writer._set_jupyter_download_nb_image_urlpath(None) - self.writer.write(doctree, destination) - - # get a NotebookNode object from a string - nb = nbformat.reads(self.writer.output, as_version=4) - nb = self.update_Metadata(nb) - - ### execute the notebook - keep it forcefully on - strDocname = str(docname) - if strDocname in self.execution_vars['dependency_lists'].keys(): - self.execution_vars['delayed_notebooks'].update({strDocname: nb}) - else: - self._execute_notebook_class.execute_notebook(self, nb, docname, self.execution_vars, self.execution_vars['futures']) - - ### mkdir if the directory does not exist - outfilename = os.path.join(self.outdir, os_path(docname) + self.out_suffix) - ensuredir(os.path.dirname(outfilename)) - - try: - with codecs.open(outfilename, "w", "utf-8") as f: - self.writer.output = nbformat.writes(nb, version=4) - f.write(self.writer.output) - except (IOError, OSError) as err: - self.logger.warning("error writing file %s: %s" % (outfilename, err)) - - def update_Metadata(self, nb): - nb.metadata.date = time.time() - return nb - - def copy_static_files(self): - # copy all static files - self.logger.info(bold("copying static files... "), nonl=True) - ensuredir(os.path.join(self.outdir, '_static')) - if (self.config["jupyter_execute_notebooks"]): - self.logger.info(bold("copying static files to executed folder... \n"), nonl=True) - ensuredir(os.path.join(self.executed_notebook_dir, '_static')) - - - # excluded = Matcher(self.config.exclude_patterns + ["**/.*"]) - for static_path in self.config["jupyter_static_file_path"]: - entry = os.path.join(self.confdir, static_path) - if not os.path.exists(entry): - self.logger.warning( - "jupyter_static_path entry {} does not exist" - .format(entry)) - else: - copy_asset(entry, os.path.join(self.outdir, "_static")) - if (self.config["jupyter_execute_notebooks"]): - copy_asset(entry, os.path.join(self.executed_notebook_dir, "_static")) - self.logger.info("done") - self.copy_static_folder_to_subfolders(self.executedir, True) - - ## copying static folder to subfolders - will remove this later - def copy_static_folder_to_subfolders(self, sourcedir, skiptopdir): - dirs = os.listdir(sourcedir) - sourcefolder = sourcedir + "/_static" - for folder in dirs: - if skiptopdir and "." in folder: - continue - if "_static" not in folder: - destination = sourcedir + "/" + folder + "/_static" - if os.path.exists(sourcefolder) and not os.path.exists(destination): #ensure source exists and copy to destination to ensure latest version - shutil.copytree(sourcefolder , destination) - - def add_bib_to_latex(self, nb, bool): - # get a NotebookNode object from a string - if 'latex_metadata' not in nb.metadata: - nb.metadata['latex_metadata'] = {} - - nb.metadata['latex_metadata']['bib_include'] = bool - - def finish(self): - self.finish_tasks.add_task(self.copy_static_files) - - #if (self.config["jupyter_execute_notebooks"]): - # watch progress of the execution of futures - self.logger.info(bold("Starting notebook execution and html conversion(if set in config)...")) - #progress(self.futures) - - # save executed notebook - error_results = self._execute_notebook_class.save_executed_notebook(self, self.execution_vars) - - ### making book pdf - #self.copy_static_folder_to_subfolders(self.texbookdir, False) - if "jupyter_target_pdf" in self.config and self.config["jupyter_target_pdf"] and self.config["jupyter_pdf_book"]: - self._pdf_class.process_tex_for_book(self) - diff --git a/sphinxcontrib/jupyter/builders/utils.py b/sphinxcontrib/jupyter/builders/utils.py new file mode 100644 index 00000000..623a9e7f --- /dev/null +++ b/sphinxcontrib/jupyter/builders/utils.py @@ -0,0 +1,110 @@ +""" +Utility Functions to support Builders +""" +import os +import json +from hashlib import md5 +from sphinx.util.osutil import ensuredir +from shutil import copy +from munch import munchify +import subprocess +import sys + + +def normalize_cell(cell): + cell.source = cell.source.strip().replace('\n','') + return cell + +def create_hash(cell): + hashcode = md5(cell.source.encode()).hexdigest() + cell.metadata.hashcode = hashcode + return cell + +#TODO: @aakash Does this need logger messages for failure cases? +def combine_executed_files(executedir, nb, docname): + codetreeFile = executedir + "/" + docname + ".codetree" + execution_count = 0 + count = 0 + if os.path.exists(codetreeFile): + with open(codetreeFile, "r", encoding="UTF-8") as f: + codetree = json.load(f) + + for cell in nb.cells: + if cell['cell_type'] == "code": + execution_count += 1 + cellcopy = normalize_cell(cell.copy()) + hashcode = md5(cellcopy.source.encode()).hexdigest() + if hashcode in codetree: + output = codetree[hashcode]['outputs'] + cell['execution_count'] = execution_count + cell['outputs'] = munchify(output) + if 'hide-output' in cell['metadata']: + cell['outputs'] = [] + + return nb + +def check_codetree_validity(builder, nb, docname): + """ + Check the validity of a codetree for each code block + This checks the md5 hash to see if the codetree data needs to be + updated + """ + if os.path.exists(builder.executedir): + codetreeFile = builder.executedir + "/" + docname + ".codetree" + if os.path.exists(codetreeFile): + with open(codetreeFile, "r", encoding="UTF-8") as f: + codetree = json.load(f) + for cell in nb.cells: + if cell['cell_type'] == "code": + cellcopy = normalize_cell(cell.copy()) + cellcopy = create_hash(cellcopy) + if cellcopy.metadata.hashcode not in codetree.keys(): + return True + else: + return True + else: + return True + + return False + +def run_build(target): + if sys.platform == 'win32': + makecmd = os.environ.get('MAKE', 'make.bat') + else: + makecmd = 'make' + try: + return subprocess.call([makecmd, target]) + except OSError: + print('Error: Failed to run: %s' % makecmd) + return 1 + +def copy_dependencies(builderSelf, outdir = None): + """ + Copies the dependencies of source files or folders specified in the config to their respective output directories + """ + if outdir is None: + outdir = builderSelf.outdir + else: + outdir = outdir + srcdir = builderSelf.srcdir + if 'jupyter_dependencies' in builderSelf.config and builderSelf.config['jupyter_dependencies'] is not None: + depenencyObj = builderSelf.config['jupyter_dependencies'] + for key, deps in depenencyObj.items(): + full_src_path = srcdir + "/" + key + if full_src_path.find('.') == -1: + ## handling the case of key being a directory + full_dest_path = outdir + "/" + key + ensuredir(full_dest_path) + for dep in deps: + copy(full_src_path + "/" + dep, full_dest_path,follow_symlinks=True) + elif os.path.isfile(full_src_path): + ## handling the case of key being a file + # removing the filename to get the directory path + index = key.rfind('/') + if index!=0 and index != -1: + key = key[0:index] + + full_src_path = srcdir + "/" + key + full_dest_path = outdir + "/" + key + for dep in deps: + copy(full_src_path + "/" + dep, full_dest_path,follow_symlinks=True) \ No newline at end of file diff --git a/sphinxcontrib/jupyter/writers/accumulators.py b/sphinxcontrib/jupyter/writers/accumulators.py new file mode 100644 index 00000000..7aaf9a12 --- /dev/null +++ b/sphinxcontrib/jupyter/writers/accumulators.py @@ -0,0 +1,195 @@ +""" +Provides accumulator objects to assist with building Syntax +""" + + +from docutils import nodes + +class List: + + indentation = " "*2 + marker = "*" + markers = dict() + item_no = 0 + + def __init__(self, level, markers, item_no=0): + """ + List Object + + Parameters + ---------- + level : int + Specify Level for List (base level=0) + markers : dict + A dictionary of markers with keys as levels and values as the current marker in that level + item_no : stores at what count the current item came in the list, if we consider the list as a queue of items + + Example + ------- + from markdown import List + a = List(level=0) + a.add_item("first") + a.add_item("second") + a.to_markdown() + """ + self.items = [] + self.level = level + self.markers = markers + self.item_no = item_no + + def __repr__(self): + return self.to_markdown() + + def add_item(self, item): + """ + Add Item to List + + Parameters + ---------- + item : str or List + add an element or a list object + """ + marker = self.markers[self.level] + itemtuple = (marker, self.item_no, self.level, item) + if len(self.items) > 0: + last_item = self.items.pop() + ### checking if the new item is a child of the same list item + if self.item_no == last_item[1] and last_item[2] == self.level: + last_item_text = last_item[3] + item_text = item + if not isinstance(last_item_text, str): + last_item_text = last_item_text.astext() + if not isinstance(item_text, str): + item_text = item_text.astext() + content = last_item_text + item_text + itemtuple = (marker, self.item_no, self.level, content) + else: + self.items.append(last_item) + self.items.append(itemtuple) + + def build_syntax(self, item): + indent = self.indentation * item[2] + marker = item[0] + if isinstance(item[0], int): + marker = str(item[0]) + "." + + content = "" + for children in item[3]: + if isinstance(children, str) or isinstance(children, int): + content += children + else: + content += children.astext() + return indent, marker, content + + def to_markdown(self): + """ + converts the list items to markdown + """ + markdown = [] + for item in self.items: + indent, marker, content = self.build_syntax(item) + markdown.append("{}{} {}".format(indent, marker, content)) + + ## need a new line at the end + markdown.append("\n") + return "\n".join(markdown) + + def to_latex(self): + """ + converts the list items to a latex string + """ + latex = [] + for item in self.items: + indent, marker, content = self.build_syntax(item) + latex.append("{}".format(content)) + + latex.append("\n") + return "\n".join(latex) + + def increment_level(self): + self.level += 1 + + def decrement_level(self): + self.level -= 1 + + def get_marker(self): + return self.markers + + def set_marker(self, node): + """ + sets the updated marker for the current level in the self.markers dictionary + + Parameters + ---------- + node : the node object under whose visit/depart method this function was called + """ + if isinstance(node.parent, nodes.enumerated_list) or isinstance(node.parent.parent, nodes.enumerated_list): + if self.level in self.markers: + count = self.markers[self.level] + self.markers[self.level] = count + 1 + else: + self.markers[self.level] = 1 + else: + self.markers[self.level] = "*" + self.item_no += 1 + + def itemlist(self): + return self.items + + def getlevel(self): + return self.level + + def get_item_no(self): + return self.item_no + +#-Table Builder-# + +class TableBuilder: + + align = "center" + def __init__(self, node): + self.table = [] + self.current_line = 0 + self.lines = [] + self.row = "" + self.column_widths = [] + if 'align' in node: + self.align = node['align'] + + def __repr__(self): + return self.to_markdown() + + def start_row(self): + self.row = "|" + + def add_item(self, text): + self.row += text + "|" + + def end_row(self): + self.row += "\n" + self.lines.append(self.row) + self.row = "" + + def add_title(self, node): + self.lines.append("### {}\n".format(node.astext())) + + def add_column_width(self, colwidth): + self.column_widths.append(colwidth) + + def generate_alignment_line(self, line_length, alignment): + left = ":" if alignment != "right" else "-" + right = ":" if alignment != "left" else "-" + return left + "-" * (line_length - 2) + right + + def add_header_line(self, header_line): + for col_width in self.column_widths: + header_line += self.generate_alignment_line( + col_width, self.align) + header_line += "|" + self.lines.append(header_line + "\n") + + def to_markdown(self): + """ + converts the table items to markdown + """ + return "".join(self.lines) diff --git a/sphinxcontrib/jupyter/writers/convert.py b/sphinxcontrib/jupyter/writers/convert.py index bc4e043a..11541702 100644 --- a/sphinxcontrib/jupyter/writers/convert.py +++ b/sphinxcontrib/jupyter/writers/convert.py @@ -4,28 +4,19 @@ from io import open from sphinx.util.osutil import ensuredir -class convertToHtmlWriter(): - +class ConvertToHTMLWriter(): """ Convert IPYNB to HTML using nbconvert and QuantEcon Template """ - def __init__(self, builderSelf): + def __init__(self, builder): - self.htmldir = builderSelf.outdir + "/html" #html directory + self.htmldir = builder.outdir + "/html" #html directory for path in [self.htmldir]: ensuredir(path) self.html_exporter = HTMLExporter() - - templateFolder = builderSelf.config['jupyter_template_path'] - - if os.path.exists(templateFolder): - pass - else: - builderSelf.logger.warning("template directory not found") - exit() - self.html_exporter.template_file = templateFolder + "/" + builderSelf.config["jupyter_html_template"] + self.html_exporter.template_file = builder.config["jupyter_html_template"] def convert(self, nb, filename, language, base_path, path=None): @@ -47,9 +38,4 @@ def convert(self, nb, filename, language, base_path, path=None): html, resources = self.html_exporter.from_notebook_node(nb) f.write(html) - nb['cells'] = nb['cells'][1:] #skip first code-cell as preamble - - # #Write Executed Notebook as File - # if (nb['metadata']['download_nb'] == True): - # with open(download_nb, "wt", encoding="UTF-8") as f: - # nbformat.write(nb, f) + nb['cells'] = nb['cells'][1:] #skip first code-cell as preamble, TODO: remove this diff --git a/sphinxcontrib/jupyter/writers/execute_nb.py b/sphinxcontrib/jupyter/writers/execute_nb.py index abfbbf73..2b57b72e 100644 --- a/sphinxcontrib/jupyter/writers/execute_nb.py +++ b/sphinxcontrib/jupyter/writers/execute_nb.py @@ -5,34 +5,32 @@ import time import json from nbconvert.preprocessors import ExecutePreprocessor -from ..writers.convert import convertToHtmlWriter from sphinx.util import logging from dask.distributed import as_completed from io import open +from hashlib import md5 +from sphinx.util.console import bold, red import sys +from .utils import get_subdirectory_and_filename + +logger = logging.getLogger(__name__) class ExecuteNotebookWriter(): """ Executes jupyter notebook written in python or julia """ - logger = logging.getLogger(__name__) startFlag = 0 - def __init__(self, builderSelf): + + dask_log = dict() + futuresInfo = dict() + def __init__(self, builder): pass - def execute_notebook(self, builderSelf, nb, filename, params, futures): - execute_nb_config = builderSelf.config["jupyter_execute_nb"] - coverage = builderSelf.config["jupyter_make_coverage"] - timeout = execute_nb_config["timeout"] - filename = filename - subdirectory = '' + def execute_notebook(self, builder, nb, filename, params, futures): full_path = filename # check if there are subdirectories - index = filename.rfind('/') - if index > 0: - subdirectory = filename[0:index] - filename = filename[index + 1:] + subdirectory, filename = get_subdirectory_and_filename(filename) language = nb.metadata.kernelspec.language if (language.lower().find('python') != -1): @@ -40,156 +38,116 @@ def execute_notebook(self, builderSelf, nb, filename, params, futures): elif (language.lower().find('julia') != -1): language = 'julia' - ## adding latex metadata - if builderSelf.config["jupyter_target_pdf"]: - nb = self.add_latex_metadata(builderSelf, nb, subdirectory, filename) - # - Parse Directories and execute them - # - if coverage: - self.execution_cases(builderSelf, params['destination'], False, subdirectory, language, futures, nb, filename, full_path) - else: - self.execution_cases(builderSelf, params['destination'], True, subdirectory, language, futures, nb, filename, full_path) + self.execution_cases(builder, params['destination'], True, subdirectory, language, futures, nb, filename, full_path) - def add_latex_metadata(self, builder, nb, subdirectory, filename=""): - - ## initialize latex metadata - if 'latex_metadata' not in nb['metadata']: - nb['metadata']['latex_metadata'] = {} - - ## check for relative paths - path = '' - if subdirectory != '': - path = "../" - slashes = subdirectory.count('/') - for i in range(slashes): - path += "../" - - ## add check for logo here as well - if nb.metadata.title: - nb.metadata.latex_metadata.title = nb.metadata.title - if "jupyter_pdf_logo" in builder.config and builder.config['jupyter_pdf_logo']: - nb.metadata.latex_metadata.logo = path + builder.config['jupyter_pdf_logo'] - - if builder.config["jupyter_bib_file"]: - nb.metadata.latex_metadata.bib = path + builder.config["jupyter_bib_file"] - - if builder.config["jupyter_pdf_author"]: - nb.metadata.latex_metadata.author = builder.config["jupyter_pdf_author"] - - if builder.config["jupyter_pdf_book_index"] is not None and (filename and builder.config["jupyter_pdf_book_index"] in filename): - nb.metadata.latex_metadata.jupyter_pdf_book_title = builder.config["jupyter_pdf_book_title"] - - # nb_string = json.dumps(nb_obj, indent=2, sort_keys=True) - return nb - - def execution_cases(self, builderSelf, directory, allow_errors, subdirectory, language, futures, nb, filename, full_path): + def execution_cases(self, builder, directory, allow_errors, subdirectory, language, futures, nb, filename, full_path): ## function to handle the cases of execution for coverage reports or html conversion pipeline if subdirectory != '': - builderSelf.executed_notebook_dir = directory + "/" + subdirectory + builder.executed_notebook_dir = directory + "/" + subdirectory else: - builderSelf.executed_notebook_dir = directory + builder.executed_notebook_dir = directory ## ensure that executed notebook directory - ensuredir(builderSelf.executed_notebook_dir) + ensuredir(builder.executed_notebook_dir) ## specifying kernels if language == 'python': - if (sys.version_info > (3, 0)): - # Python 3 code in this block - ep = ExecutePreprocessor(timeout=-1, allow_errors=allow_errors, kernel_name='python3') - else: - # Python 2 code in this block - ep = ExecutePreprocessor(timeout=-1, allow_errors=allow_errors, kernel_name='python2') + ep = ExecutePreprocessor(timeout=-1, allow_errors=allow_errors, kernel_name='python3') elif language == 'julia': ep = ExecutePreprocessor(timeout=-1, allow_errors=allow_errors) ### calling this function before starting work to ensure it starts recording if (self.startFlag == 0): self.startFlag = 1 - builderSelf.client.get_task_stream() + builder.client.get_task_stream() - future = builderSelf.client.submit(ep.preprocess, nb, {"metadata": {"path": builderSelf.executed_notebook_dir, "filename": filename, "filename_with_path": full_path}}) + future = builder.client.submit(ep.preprocess, nb, {"metadata": {"path": builder.executed_notebook_dir, "filename": filename, "filename_with_path": full_path}}) ### dictionary to store info for errors in future future_dict = { "filename": full_path, "filename_with_path": full_path, "language_info": nb['metadata']['kernelspec']} - builderSelf.futuresInfo[future.key] = future_dict + self.futuresInfo[future.key] = future_dict futures.append(future) - def task_execution_time(self, builderSelf): + def task_execution_time(self, builder): ## calculates execution time of each task in client using get task stream - task_Info_latest = builderSelf.client.get_task_stream()[-1] + task_Info_latest = builder.client.get_task_stream()[-1] time_tuple = task_Info_latest['startstops'][0] computing_time = time_tuple[2] - time_tuple[1] return computing_time - def check_execution_completion(self, builderSelf, future, nb, error_results, count, total_count, futures_name, params): + def halt_execution(self, builder, futures, traceback, filename): + builder.client.cancel(futures) + logger.info(bold(red("Error encountered in {}".format(filename)))) + logger.info(traceback) + logger.info(bold("Execution halted because an error was encountered, if you do not want to halt on error, set jupyter_execute_allow_errors in config to True")) + sys.exit() + + def check_execution_completion(self, builder, future, nb, error_results, count, total_count, futures_name, params): error_result = [] - builderSelf.dask_log['futures'].append(str(future)) + self.dask_log['futures'].append(str(future)) status = 'pass' + executed_nb = None # computing time for each task - computing_time = self.task_execution_time(builderSelf) + computing_time = self.task_execution_time(builder) - # store the exceptions in an error result array - if future.status == 'error': - status = 'fail' - for key,val in builderSelf.futuresInfo.items(): - if key == future.key: - filename_with_path = val['filename_with_path'] - filename = val['filename'] - language_info = val['language_info'] - error_result.append(future.exception()) + ## getting necessary variables from the notebook + passed_metadata = nb[1]['metadata'] + filename = passed_metadata['filename'] + filename_with_path = passed_metadata['filename_with_path'] + executed_nb = nb[0] + language_info = executed_nb['metadata']['kernelspec'] + executed_nb['metadata']['filename_with_path'] = filename_with_path - else: - passed_metadata = nb[1]['metadata'] - filename = passed_metadata['filename'] - filename_with_path = passed_metadata['filename_with_path'] - executed_nb = nb[0] - language_info = executed_nb['metadata']['kernelspec'] - executed_nb['metadata']['filename_with_path'] = filename_with_path - executed_nb['metadata']['download_nb'] = builderSelf.config['jupyter_download_nb'] - if (builderSelf.config['jupyter_download_nb']): - executed_nb['metadata']['download_nb_path'] = builderSelf.config['jupyter_download_nb_urlpath'] - if (futures_name.startswith('delayed') != -1): - # adding in executed notebooks list - params['executed_notebooks'].append(filename) + # store the exceptions in an error result array + for cell in nb[0].cells: + if 'outputs' in cell and len(cell['outputs']) and cell['outputs'][0]['output_type'] == 'error': + status = 'fail' + for key,val in self.futuresInfo.items(): + if key == future.key: + filename_with_path = val['filename_with_path'] + filename = val['filename'] + language_info = val['language_info'] + traceback = cell['outputs'][0]['traceback'] + error_result.append(cell['outputs'][0]) + if 'jupyter_execute_allow_errors' in builder.config and builder.config['jupyter_execute_allow_errors'] is False: + self.halt_execution(builder, params['futures'], traceback, filename) + + if (futures_name.startswith('delayed') != -1): + # adding in executed notebooks list + params['executed_notebooks'].append(filename) + key_to_delete = False + for nb, arr in params['dependency_lists'].items(): + executed = 0 + for elem in arr: + if elem in params['executed_notebooks']: + executed += 1 + if (executed == len(arr)): + key_to_delete = nb + notebook = params['delayed_notebooks'].get(nb) + builder.executenb.execute_notebook(builder, notebook, nb, params, params['delayed_futures']) + if (key_to_delete): + del params['dependency_lists'][str(key_to_delete)] key_to_delete = False - for nb, arr in params['dependency_lists'].items(): - executed = 0 - for elem in arr: - if elem in params['executed_notebooks']: - executed += 1 - if (executed == len(arr)): - key_to_delete = nb - notebook = params['delayed_notebooks'].get(nb) - builderSelf._execute_notebook_class.execute_notebook(builderSelf, notebook, nb, params, params['delayed_futures']) - if (key_to_delete): - del params['dependency_lists'][str(key_to_delete)] - key_to_delete = False - notebook_name = "{}.ipynb".format(filename) - executed_notebook_path = os.path.join(passed_metadata['path'], notebook_name) - - #Parse Executed notebook to remove hide-output blocks - for cell in executed_nb['cells']: - if cell['cell_type'] == "code": - if cell['metadata']['hide-output']: - cell['outputs'] = [] - #Write Executed Notebook as File - with open(executed_notebook_path, "wt", encoding="UTF-8") as f: - nbformat.write(executed_nb, f) - - ## generate html if needed - if (builderSelf.config['jupyter_generate_html'] and params['target'] == 'website'): - builderSelf._convert_class.convert(executed_nb, filename, language_info, params['destination'], passed_metadata['path']) - - ## generate pdfs if set to true - if (builderSelf.config['jupyter_target_pdf']): - builderSelf._pdf_class.convert_to_latex(builderSelf, filename_with_path, executed_nb['metadata']['latex_metadata']) - builderSelf._pdf_class.move_pdf(builderSelf) - + + #Parse Executed notebook to remove hide-output blocks + for cell in executed_nb['cells']: + if cell['cell_type'] == "code": + if 'hide-output' in cell['metadata']: + cell['outputs'] = [] + # #Write Executed Notebook as File + # with open(executed_notebook_path, "wt", encoding="UTF-8") as f: + # nbformat.write(executed_nb, f) + + #### processing the notebook and saving it in codetree + if executed_nb: + builder.create_codetree(executed_nb) + #builder.create_codetree_files(nb_with_hash) + print('({}/{}) {} -- {} -- {:.2f}s'.format(count, total_count, filename, status, computing_time)) @@ -203,15 +161,11 @@ def check_execution_completion(self, builderSelf, future, nb, error_results, cou error_results.append(results) return filename - def save_executed_notebook(self, builderSelf, params): + def save_executed_notebook(self, builder, params): error_results = [] - builderSelf.dask_log['scheduler_info'] = builderSelf.client.scheduler_info() - builderSelf.dask_log['futures'] = [] - - ## create an instance of the class id config set - if (builderSelf.config['jupyter_generate_html'] and params['target'] == 'website'): - builderSelf._convert_class = convertToHtmlWriter(builderSelf) + self.dask_log['scheduler_info'] = builder.client.scheduler_info() + self.dask_log['futures'] = [] # this for loop gathers results in the background total_count = len(params['futures']) @@ -219,23 +173,23 @@ def save_executed_notebook(self, builderSelf, params): update_count_delayed = 1 for future, nb in as_completed(params['futures'], with_results=True, raise_errors=False): count += 1 - builderSelf._execute_notebook_class.check_execution_completion(builderSelf, future, nb, error_results, count, total_count, 'futures', params) + builder.executenb.check_execution_completion(builder, future, nb, error_results, count, total_count, 'futures', params) for future, nb in as_completed(params['delayed_futures'], with_results=True, raise_errors=False): count += 1 if update_count_delayed == 1: update_count_delayed = 0 total_count += len(params['delayed_futures']) - builderSelf._execute_notebook_class.check_execution_completion(builderSelf, future, nb, error_results, count, total_count, 'delayed_futures', params) + builder.executenb.check_execution_completion(builder, future, nb, error_results, count, total_count, 'delayed_futures', params) return error_results - def produce_code_execution_report(self, builderSelf, error_results, params, fln = "code-execution-results.json"): + def produce_code_execution_report(self, builder, error_results, params, fln = "code-execution-results.json"): """ Updates the JSON file that contains the results of the execution of each notebook. """ - ensuredir(builderSelf.reportdir) - json_filename = builderSelf.reportdir + fln + ensuredir(builder.reportdir) + json_filename = builder.reportdir + fln if os.path.isfile(json_filename): with open(json_filename, encoding="UTF-8") as json_file: @@ -291,53 +245,41 @@ def produce_code_execution_report(self, builderSelf, error_results, params, fln json_data['run_time'] = time.strftime("%d-%m-%Y %H:%M:%S") try: - if (sys.version_info > (3, 0)): - with open(json_filename, "w") as json_file: - json.dump(json_data, json_file) - else: - with open(json_filename, "w") as json_file: - x = json.dumps(json_data, ensure_ascii=False) - if isinstance(x,str): - x = unicode(x, 'UTF-8') - json_file.write(x) + with open(json_filename, "w") as json_file: + json.dump(json_data, json_file) except IOError: - self.logger.warning("Unable to save lecture status JSON file. Does the {} directory exist?".format(builderSelf.reportdir)) + logger.warning("Unable to save lecture status JSON file. Does the {} directory exist?".format(builder.reportdir)) - def produce_dask_processing_report(self, builderSelf, params, fln= "dask-reports.json"): + return error_results + + def produce_dask_processing_report(self, builder, params, fln= "dask-reports.json"): """ produces a report of dask execution """ - ensuredir(builderSelf.reportdir) - json_filename = builderSelf.reportdir + fln + ensuredir(builder.reportdir) + json_filename = builder.reportdir + fln try: - if (sys.version_info > (3, 0)): - with open(json_filename, "w") as json_file: - json.dump(builderSelf.dask_log, json_file) - else: - with open(json_filename, "w") as json_file: - x = json.dumps(builderSelf.dask_log, ensure_ascii=False) - if isinstance(x,str): - x = unicode(x, 'UTF-8') - json_file.write(x) + with open(json_filename, "w") as json_file: + json.dump(self.dask_log, json_file) except IOError: - self.logger.warning("Unable to save dask reports JSON file. Does the {} directory exist?".format(builderSelf.reportdir)) + logger.warning("Unable to save dask reports JSON file. Does the {} directory exist?".format(builder.reportdir)) - def create_coverage_report(self, builderSelf, error_results, params): + def create_coverage_report(self, builder, error_results, params): """ Creates a coverage report of the errors in notebook """ errors = [] - error_results = [] + error_files = [] errors_by_language = dict() - produce_text_reports = builderSelf.config["jupyter_execute_nb"]["text_reports"] - + produce_text_reports = True + #Parse Error Set for full_error_set in error_results: error_result = full_error_set['errors'] filename = full_error_set['filename'] current_language = full_error_set['language'] - language_name = current_language['extension'] + language_name = current_language['name'] if error_result: if language_name not in errors_by_language: errors_by_language[language_name] = dict() @@ -351,8 +293,7 @@ def create_coverage_report(self, builderSelf, error_results, params): errors_by_language[language_name]['files'][filename] = error_result # Create the error report from the HTML template, if it exists. - templateFolder = builderSelf.config['jupyter_template_path'] - error_report_template_file = templateFolder + "/" + builderSelf.config["jupyter_template_coverage_file_path"] + error_report_template_file = builder.config["jupyter_coverage_template"] error_report_template = [] if not os.path.isfile(error_report_template_file): @@ -367,7 +308,7 @@ def create_coverage_report(self, builderSelf, error_results, params): language_display_name = errors_by_language[lang_ext]['display_name'] language = errors_by_language[lang_ext]['language'] errors_by_file = errors_by_language[lang_ext]['files'] - error_dir = builderSelf.errordir.format(lang_ext) + error_dir = builder.errordir.format(lang_ext) if produce_text_reports: # set specific language output file @@ -390,11 +331,11 @@ def create_coverage_report(self, builderSelf, error_results, params): results_file = open("{}/{}_overview.txt".format(error_dir, lang_ext), 'w') results_file.write(language_display_name + " execution errors occurred in the notebooks below:\n") - self.logger.error(language_display_name + " execution errors occurred in the notebooks below") + logger.error(language_display_name + " execution errors occurred in the notebooks below") error_number = 1 for filename in errors_by_file: - self.logger.error(filename) + logger.error(filename) number_of_errors = str(len(errors_by_file[filename])) if produce_text_reports: @@ -410,27 +351,27 @@ def create_coverage_report(self, builderSelf, error_results, params): for error in errors_by_file[filename]: # Some errors don't provide a traceback. Make sure that some useful information is provided # to the report - if nothing else, the type of error that was caught. - traceback = getattr(error, "traceback", None) - if traceback is None: - error.traceback = str(error) + print(error, "what is this?") + if error: + traceback = ' '.join(error.traceback) - last_line = error.traceback.splitlines()[-1] - if last_line not in error_files_dict: - error_files_dict[last_line] = [] - error_files_dict[last_line].append(error_number) + last_line = traceback[-1] + if last_line not in error_files_dict: + error_files_dict[last_line] = [] + error_files_dict[last_line].append(error_number) - if last_line not in error_count_dict: - error_count_dict[last_line] = 0 - error_count_dict[last_line] += 1 + if last_line not in error_count_dict: + error_count_dict[last_line] = 0 + error_count_dict[last_line] += 1 - notebook_looper += "
{}
\n".format(error.traceback) + notebook_looper += "
{}
\n".format(traceback) - error_number += 1 + error_number += 1 - if produce_text_reports: - error_file.write(error.traceback + "\n") + if produce_text_reports: + error_file.write(traceback + "\n") if produce_text_reports: error_file.close() @@ -474,4 +415,3 @@ def create_coverage_report(self, builderSelf, error_results, params): error_output_file.write(line) - diff --git a/sphinxcontrib/jupyter/writers/html.py b/sphinxcontrib/jupyter/writers/html.py new file mode 100644 index 00000000..c3ef7447 --- /dev/null +++ b/sphinxcontrib/jupyter/writers/html.py @@ -0,0 +1,36 @@ + +from .markdown import MarkdownSyntax + +class HTMLSyntax(MarkdownSyntax): + + def __init__(self): + """ + Provides syntax for IPYNB(HTML) notebooks + + HTML notebooks still make use of Markdown Syntax + but require alternative implementations for some + `visit` and `depart` methods to support additional + features. For example, images included as html have + access to more scale properties. + """ + pass + + def visit_image(self, uri, attrs): + """ + Construct HTML Image + """ + image = '\n\n" #Add double space for html + return image \ No newline at end of file diff --git a/sphinxcontrib/jupyter/writers/jupyter.py b/sphinxcontrib/jupyter/writers/jupyter.py index 92e578ad..5a347beb 100644 --- a/sphinxcontrib/jupyter/writers/jupyter.py +++ b/sphinxcontrib/jupyter/writers/jupyter.py @@ -1,27 +1,38 @@ -import docutils.writers -import nbformat +from docutils.writers import Writer + +from .translate_code import JupyterCodeBlockTranslator +from .translate_ipynb import JupyterIPYNBTranslator +from .translate_html import JupyterHTMLTranslator +from .translate_pdf import JupyterPDFTranslator + +class JupyterWriter(Writer): + + builder_translator = { + #Code Translators + "execute" : JupyterCodeBlockTranslator, + #RST + Code Translators + 'jupyter' : JupyterIPYNBTranslator, + 'jupyterhtml' : JupyterHTMLTranslator, + 'jupyterpdf' : JupyterPDFTranslator, + } -from .translate_code import JupyterCodeTranslator -from .translate_all import JupyterTranslator - - -class JupyterWriter(docutils.writers.Writer): def __init__(self, builder): - docutils.writers.Writer.__init__(self) - - self.output = None + super().__init__() #init docutils.writers.Writer self.builder = builder - self.translator_class = self._identify_translator(builder) + self.output = None + try: + self.translator = self.builder_translator[builder.name] + except: + msg = "Builder ({}) does not have a valid translator".format(builder.name) + raise InvalidTranslator(msg) def translate(self): - self.document.settings.newlines = \ - self.document.settings.indents = \ - self.builder.env.config.xml_pretty - - visitor = self.translator_class(self.builder, self.document) + self.document.settings.newlines = True #TODO: Is this required? + self.document.settings.indents = True #TODO: Is this required? - self.document.walkabout(visitor) - self.output = nbformat.writes(visitor.output) + visitor = self.translator(self.document, self.builder) + self.document.walkabout(visitor) #TODO: What is this doing? + self.output = visitor.output.notebook_as_string #writers/notebook -> JupyterNotebook def _set_ref_urlpath(self, urlpath=None): """ @@ -35,35 +46,5 @@ def _set_jupyter_download_nb_image_urlpath(self, urlpath=None): """ self.builder.jupyter_download_nb_image_urlpath = urlpath - def _identify_translator(self, builder): - """ - Determine which translator class to apply to this translation. The choices are 'code' and 'all'; all converts - the entire sphinx RST file to a Jupyter notebook, whereas 'code' only translates the code cells, and - skips over all other content. - - Typically, you would use 'code' when you're testing your code blocks, not for final publication of your - notebooks. - - The default translator to use is set in conf.py, but this value can be overridden on the command line. - - :param builder: The builder object provided by the Sphinx run-time - :return: The translator class object to instantiate. - """ - code_only = False - if "jupyter_conversion_mode" not in builder.config \ - or builder.config["jupyter_conversion_mode"] is None: - self.builder( - "jupyter_conversion_mode is not given in conf.py. " - "Set conversion_mode as default(code)") - code_only = True - else: - if builder.config["jupyter_conversion_mode"] == "code": - code_only = True - elif builder.config["jupyter_conversion_mode"] != "all": - builder.warn( - "Invalid jupyter_conversion_mode is given({}). " - "Set conversion_mode as default(code)" - .format(builder.config["jupyter_conversion_mode"])) - code_only = True - - return JupyterCodeTranslator if code_only else JupyterTranslator +class InvalidTranslator(Exception): + pass diff --git a/sphinxcontrib/jupyter/writers/make_pdf.py b/sphinxcontrib/jupyter/writers/make_pdf.py index 8bdbc9ca..30e29154 100644 --- a/sphinxcontrib/jupyter/writers/make_pdf.py +++ b/sphinxcontrib/jupyter/writers/make_pdf.py @@ -15,7 +15,7 @@ from sphinx.util import logging from nbconvert.preprocessors import LatexPreprocessor from distutils.dir_util import copy_tree -from .utils import python27_glob, get_list_of_files +from .utils import get_list_of_files, get_subdirectory_and_filename class MakePDFWriter(): """ @@ -23,13 +23,6 @@ class MakePDFWriter(): """ logger = logging.getLogger(__name__) def __init__(self, builder): - self.pdfdir = builder.outdir + "/pdf" #pdf directory - self.texdir = builder.outdir + "/executed" #latex directory - self.texbookdir = builder.outdir + "/texbook" # tex files for book pdf - - for path in [self.pdfdir, self.texdir]: - ensuredir(path) - self.pdf_exporter = PDFExporter() self.tex_exporter = LatexExporter() self.index_book = builder.config['jupyter_pdf_book_index'] @@ -37,23 +30,20 @@ def __init__(self, builder): def move_pdf(self, builder): dir_lists = [] move_files = True - for root, dirs, files in os.walk(self.texdir, topdown=True): + for root, dirs, files in os.walk(builder.texdir, topdown=True): if move_files: for f in files: if ".pdf" in f: source = root + "/" + f - self.check_remove_destination_file(self.pdfdir, f) - shutil.move(source, self.pdfdir) + self.check_remove_destination_file(builder.pdfdir, f) + shutil.move(source, builder.pdfdir) move_files = False for name in dirs: presentdir = os.path.join(root, name) source = root + "/" + name - subdirectory = source.replace(self.texdir, "") - destination = self.pdfdir + subdirectory - if sys.version_info[0] < 3: - pdfs = python27_glob(presentdir +"/", "*.pdf") - else: - pdfs = glob.glob(presentdir + "/*.pdf", recursive=True) + subdirectory = source.replace(builder.texdir, "") + destination = builder.pdfdir + subdirectory + pdfs = glob.glob(presentdir + "/*.pdf", recursive=True) if subdirectory in dir_lists: continue if len(pdfs): @@ -77,26 +67,25 @@ def convert_to_latex(self, builder, filename, latex_metadata): """ relative_path = '' tex_data = '' - tex_build_path = self.texdir + relative_path - pdf_build_path = self.pdfdir + relative_path - template_folder = builder.config['jupyter_template_path'] + tex_build_path = builder.texdir + relative_path + pdf_build_path = builder.pdfdir + relative_path ensuredir(tex_build_path) ensuredir(pdf_build_path) ## setting the working directory - os.chdir(self.texdir) + os.chdir(builder.texdir) ## copies all theme folder images to static folder if os.path.exists(builder.confdir + "/theme/static/img"): - copy_tree(builder.confdir + "/theme/static/img", self.texdir + "/_static/img/", preserve_symlinks=1) + copy_tree(builder.confdir + "/theme/static/img", builder.texdir + "/_static/img/", preserve_symlinks=1) else: self.logger.warning("Image folder not present inside the theme folder") - fl_ipynb = self.texdir + "/" + "{}.ipynb".format(filename) - fl_tex = self.texdir + "/" + "{}.tex".format(filename) - fl_tex_template = builder.confdir + "/" + template_folder + "/" + builder.config['jupyter_latex_template'] + fl_ipynb = builder.texdir + "/" + "{}.ipynb".format(filename) + fl_tex = builder.texdir + "/" + "{}.tex".format(filename) + fl_tex_template = builder.confdir + "/" + builder.config['jupyter_template_latex'] ## do not convert excluded patterns to latex excluded_files = [x in filename for x in builder.config['jupyter_pdf_excludepatterns']] @@ -104,20 +93,13 @@ def convert_to_latex(self, builder, filename, latex_metadata): if not True in excluded_files: ## --output-dir - forms a directory in the same path as fl_ipynb - need a way to specify properly? ### converting to pdf using xelatex subprocess - if sys.version_info[0] < 3: - subprocess.call(["jupyter", "nbconvert","--to","latex","--template",fl_tex_template,"from", fl_ipynb]) - else: - subprocess.run(["jupyter", "nbconvert","--to","latex","--template",fl_tex_template,"from", fl_ipynb]) + subprocess.run(["jupyter", "nbconvert","--to","latex","--template",fl_tex_template,"from", fl_ipynb]) ### check if subdirectory - subdirectory = "" - index = filename.rfind('/') - if index > 0: - subdirectory = filename[0:index] - filename = filename[index + 1:] + subdirectory, filename = get_subdirectory_and_filename(filename) ### set working directory for xelatex processing - os.chdir(self.texdir + "/" + subdirectory) + os.chdir(builder.texdir + "/" + subdirectory) try: self.subprocess_xelatex(fl_tex, filename) @@ -152,15 +134,10 @@ def subprocess_bibtex(self, filename): def nbconvert_index(self, builder): ## converts index ipynb file of book to tex with the help of the specified template - fl_ipynb = self.texbookdir + "/" + self.index_book + ".ipynb" - template_folder = builder.config['jupyter_template_path'] - fl_tex_template = builder.confdir + "/" + template_folder + "/" + builder.config['jupyter_latex_template_book'] + fl_ipynb = builder.texbookdir + "/" + self.index_book + ".ipynb" + fl_tex_template = builder.confdir + "/" + builder.config['jupyter_template_latexbook'] - - if sys.version_info[0] < 3: - subprocess.call(["jupyter", "nbconvert","--to","latex","--template",fl_tex_template,"from", fl_ipynb]) - else: - subprocess.run(["jupyter", "nbconvert","--to","latex","--template",fl_tex_template,"from", fl_ipynb]) + subprocess.run(["jupyter", "nbconvert","--to","latex","--template",fl_tex_template,"from", fl_ipynb]) def create_pdf_from_latex(self, fl_tex, filename): ## parses the latex file to create pdf @@ -243,20 +220,20 @@ def make_changes_tex(self, data, fullpath): alteredarr.append(line) return '\n'.join(alteredarr) - def copy_tex_for_book(self): + def copy_tex_for_book(self, builder): ## make a separate directory for tex files relevant to book - if os.path.exists(self.texbookdir): - shutil.rmtree(self.texbookdir) + if os.path.exists(builder.texbookdir): + shutil.rmtree(builder.texbookdir) - shutil.copytree(self.texdir, self.texbookdir) + shutil.copytree(builder.texdir, builder.texbookdir) def process_tex_for_book(self, builder): ## does all the preprocessing of latex files before calling them from the index file ## of the book and converting them to pdf. ## converts the index ipynb of the book and converts it to pdf via latex - self.copy_tex_for_book() + self.copy_tex_for_book(builder) - files = get_list_of_files(self.texbookdir) + files = get_list_of_files(builder.texbookdir) for filename in files: if ".tex" in filename: with open(filename, 'r', encoding="utf8") as f: @@ -269,9 +246,9 @@ def process_tex_for_book(self, builder): f.close() output.close() - os.chdir(self.texbookdir) + os.chdir(builder.texbookdir) self.nbconvert_index(builder) - fl_tex = self.texbookdir + "/" + self.index_book + ".tex" + fl_tex = builder.texbookdir + "/" + self.index_book + ".tex" filename = self.index_book ## checking if an explicit output filename is specified in the config file diff --git a/sphinxcontrib/jupyter/writers/make_site.py b/sphinxcontrib/jupyter/writers/make_site.py index 9013a8c0..12162f15 100644 --- a/sphinxcontrib/jupyter/writers/make_site.py +++ b/sphinxcontrib/jupyter/writers/make_site.py @@ -9,8 +9,8 @@ class MakeSiteWriter(): Makes website for each package """ logger = logging.getLogger(__name__) - def __init__(self, builderSelf): - builddir = builderSelf.outdir + def __init__(self, builder): + builddir = builder.outdir ## removing the /jupyter from path to get the top directory index = builddir.rfind('/jupyter') @@ -18,33 +18,28 @@ def __init__(self, builderSelf): builddir = builddir[0:index] ## defining directories - self.websitedir = builddir + "/jupyter_html/" + self.websitedir = builddir + "/website/" self.downloadipynbdir = self.websitedir + "/_downloads/ipynb/" - def build_website(self, builderSelf): + def build_website(self, builder): if os.path.exists(self.websitedir): shutil.rmtree(self.websitedir) - builderSelf.themePath = builderSelf.config['jupyter_theme_path'] - themeFolder = builderSelf.config['jupyter_theme'] - - if themeFolder is not None: - builderSelf.themePath = builderSelf.themePath + "/" + themeFolder + builder.themePath = builder.config['jupyter_html_theme'] - if os.path.exists(builderSelf.themePath): + if os.path.exists(builder.themePath): pass else: self.logger.warning("theme directory not found") exit() - htmlFolder = builderSelf.themePath + "/html/" - staticFolder = builderSelf.themePath + "/static" + staticFolder = builder.themePath + "/static" ## copies the html and downloads folder - shutil.copytree(builderSelf.outdir + "/html/", self.websitedir, symlinks=True) + shutil.copytree(builder.outdir + "/html/", self.websitedir, symlinks=True) ## copies all the static files - shutil.copytree(builderSelf.outdir + "/_static/", self.websitedir + "_static/", symlinks=True) + shutil.copytree(builder.outdir + "/_static/", self.websitedir + "_static/", symlinks=True) ## copies all theme files to _static folder if os.path.exists(staticFolder): @@ -52,35 +47,23 @@ def build_website(self, builderSelf): else: self.logger.warning("static folder not present in the themes directory") - ## copies the helper html files - if os.path.exists(htmlFolder): - copy_tree(htmlFolder, self.websitedir, preserve_symlinks=1) - else: - self.logger.warning("html folder not present in the themes directory") - - - if "jupyter_coverage_dir" in builderSelf.config and builderSelf.config["jupyter_coverage_dir"]: - if os.path.exists(builderSelf.config['jupyter_coverage_dir']): - self.coveragedir = builderSelf.config['jupyter_coverage_dir'] - ## copies the report of execution results - if os.path.exists(self.coveragedir + "/jupyter/reports/code-execution-results.json"): - shutil.copy2(self.coveragedir + "/jupyter/reports/code-execution-results.json", self.websitedir + "_static/") + if os.path.exists(builder.executedir): + self.coveragereport = builder.executedir + "/reports/code-execution-results.json" + ## copies the report of execution results + if os.path.exists(self.coveragereport): + shutil.copy2(self.coveragereport, self.websitedir + "_static/") else: - self.logger.error("coverage directory not found. Please ensure to run coverage build before running website build") + self.logger.error("coverage report not found. Please ensure to run make execute for creating website reports") else: - self.logger.error(" coverage directory nbot specified. Please specify coverage directory for creating website reports ") + self.logger.error("Notebooks are not executed. Please run make execute for creating website reports") ## copies the downloads folder - if "jupyter_download_nb" in builderSelf.config and builderSelf.config["jupyter_download_nb"]: - if builderSelf.config["jupyter_download_nb_execute"]: - sourceDownloads = builderSelf.outdir + "/_downloads/executed" - else: - sourceDownloads = builderSelf.outdir + "/_downloads" - if os.path.exists(sourceDownloads): - shutil.copytree(sourceDownloads, self.downloadipynbdir, symlinks=True) - else: - self.logger.warning("Downloads folder not created during build") + sourceDownloads = builder.outdir + "/_downloads" + if os.path.exists(sourceDownloads): + shutil.copytree(sourceDownloads, self.downloadipynbdir, symlinks=True) + else: + self.logger.warning("Downloads folder not created during build") diff --git a/sphinxcontrib/jupyter/writers/markdown.py b/sphinxcontrib/jupyter/writers/markdown.py new file mode 100644 index 00000000..97917e1d --- /dev/null +++ b/sphinxcontrib/jupyter/writers/markdown.py @@ -0,0 +1,119 @@ +""" +Contains Markdown Syntax and Object Accumulators +""" + + +class MarkdownSyntax: + """ + Provides Markdown Syntax + + visit_{}: + methods contain the begining of the markdown syntax + depart_{}: + methods contain the end of the markdown syntax and may not be needed + + Reference + --------- + [1] https://commonmark.org/help/ + [2] https://spec.commonmark.org/0.29/ + """ + + def __init__(self): + pass + + def visit_attribution(self): + return "> " + + def visit_block_quote(self): + return "> " + + def visit_bold(self): + return "**" + + def depart_bold(self): + return "**" + + def visit_citation(self, id_text): + return "\n".format(id_text) + + def visit_code_block(self, language): + return "``` {}".format(language) + + def depart_code_block(self): + return "```" + + def visit_definition(self): + return "
" #TODO: Is there a MD equivalent? + + def depart_definition(self): + return "
" #TODO: Is there a MD equivalent? + + def visit_definition_list(self): + return "
" + + def depart_definition_list(self): + return "
" + + def visit_horizontal_rule(self): + return "---" + + def visit_image(self, uri): + return "![{0}]({0})".format(uri) + + def visit_inline_code(self): + return "`" + + def depart_inline_code(self): + return "`" + + def visit_italic(self): + return "*" + + def depart_italic(self): + return "*" + + def visit_heading(self, depth): + return "#"*depth + + def visit_label(self): + return "\[" + + def depart_label(self): + return "\]" + + def visit_link(self, text, link): + return "[text](link)" + + #List(Start) + #Note: Not required as implemented as an Accumulator Object List() + + def visit_literal(self): + return "`" + + def depart_literal(self): + return "`" + + def visit_literal_block(self, language=None): + if language is None: + return "```" + else: + return "```{}".format(language) + + def depart_literal_block(self): + return "```" + + def visit_math(self, text): + return "$ {} $".format(text) + + def visit_math_block(self, text, label=None): + if label: + return "$$\n{0} {1}$$".format(text, label) + else: + return "$$\n{0}\n$$".format(text) + + def visit_note(self): + return ">**Note**\n>\n>" + + def visit_title(self, level): + return "#" * level + diff --git a/sphinxcontrib/jupyter/writers/notebook.py b/sphinxcontrib/jupyter/writers/notebook.py new file mode 100644 index 00000000..b0a92221 --- /dev/null +++ b/sphinxcontrib/jupyter/writers/notebook.py @@ -0,0 +1,226 @@ +""" +An Object Representing a Jupyter Notebook + +Example +------- + +from notebook import JupyterNotebook +nb = JupyterNotebook() +nb.add_code_cell("import numpy as np", metadata={'collapse' : True}) +nb.add_markdown_cell("Hi") +nb.add_raw_cell("--Bye--") +nb.add_kernelspec("python3") +nb.write("test.ipynb") + +""" + + +import nbformat +import nbformat.v4 as nbf +from jupyter_client.kernelspec import get_kernel_spec, find_kernel_specs + +class JupyterNotebook: + + def __init__(self, language="python"): + """ + A simple object that represents a Jupyter notebook + """ + self.nb = nbf.new_notebook() + self.add_kernelspec(language) + + @property + def notebook(self): + return self.nb + + @property + def notebook_as_string(self): + return self.writes() + + def add_cell(self, source, cell_type, metadata=None, **kwargs): + if cell_type == "markdown": + self.add_markdown_cell(source, metadata, **kwargs) + elif cell_type == "code": + self.add_code_cell(source, metadata, **kwargs) + elif cell_type == "raw": + self.add_raw_cell(source, metadata, **kwargs) + else: + raise InvalidJupyterCell("{} is not a valid Jupyter Cell type".format(cell_type)) + + def add_code_cell(self, source, metadata=None, **kwargs): + """ + Append a Code Cell to the Notebook + + Parameters + ---------- + source : str + metadata : dict, optional(default=None) + Add metadata to the cell + """ + code_cell = nbf.new_code_cell(source, **kwargs) + if metadata: + code_cell = self.add_metadata(code_cell, metadata) + self.nb["cells"].append(code_cell) + + def add_markdown_cell(self, formatted_text, metadata=None, **kwargs): + """ + Append a Markdown Cell to the Notebook + + Parameters + ---------- + formatted_text : str + """ + markdown_cell = nbf.new_markdown_cell(formatted_text, **kwargs) + if metadata: + markdown_cell = self.add_metadata(markdown_cell, metadata) + self.nb["cells"].append(markdown_cell) + + def add_raw_cell(self, source, metadata=None, **kwargs): + """ + Append a Raw Cell to the Notebook + + Parameters + ---------- + source : str + """ + raw_cell = nbf.new_raw_cell(source, **kwargs) + if metadata: + raw_cell = self.add_metadata(raw_cell, metadata) + self.nb["cells"].append(raw_cell) + + def add_kernelspec(self, language): + """ + https://jupyter-client.readthedocs.io/en/stable/api/kernelspec.html + """ + try: + self.kernelspec = get_kernel_spec(language) + except: + msg = "Requested Jupyter Kernel for language: {language} is not found" + raise JupyterKernelNotFound(msg) + kernelspec = { + "display_name": self.kernelspec.display_name, + "language": self.kernelspec.language, + "name" : language + } + self.nb.metadata.kernelspec = kernelspec + + def write(self, fl): + """ + https://nbformat.readthedocs.io/en/latest/api.html#nbformat.write + """ + nbformat.write(self.nb, fl) + + def writes(self): + """ + https://nbformat.readthedocs.io/en/latest/api.html#nbformat.writes + """ + return nbformat.writes(self.nb) + + def add_metadata(self, cell, metadata): + """ Attach Metadata to a cell """ + for k,v in metadata.items(): + cell.metadata[k] = v + return cell + + def add_metadata_notebook(self, metadata): + """ Attach Metadata to a notebook """ + for k,v in metadata.items(): + self.nb.metadata[k] = v + + def get_metadata(self, key, default_value={}): + if not key in self.nb.metadata: + self.nb.metadata[key] = default_value + return self.nb.metadata[key] + + + +#Custom Exceptions + +class JupyterKernelNotFound(Exception): + pass + +class InvalidJupyterCell(Exception): + pass + + + +#-Experimental Ideas-# + +import textwrap + +class JupyterCell: + """ + Example + ------- + from notebook import JupyterCell + c = JupyterCell() + c.add_text("Hi there") + c.add_highlighted_markdown("import numpy as np", "python") + c.content + """ + + def __init__(self): + """ + [Test] Concept Class -> May not be used + Add inline and blocks + """ + self.content = [] + self.type = None + + def __repr__(self): + print("{type}:\n{content}".format(self.type, self.content)) + + #Direct Entry + + def add_text(self, text): + self.content.append(text) + + #Accumulated Elements + + @property + def start_block(self): + """ Accumulator for constructing blocks """ + self.block = [] + + @property + def finish_block(self): + del self.block + + def add_highlighted_markdown(self, language): + template = textwrap.dedent( + """``` {language} + {code} + ``` + """ + ) + self.element.append(template.format(language, self.element)) + + def add_bold(self, text): + template = "**{text}**" + self.content.append(template) + + +class BlockAccumulator: + + def __init__(self): + self.__block = [] + self.__element = None + + @property + def block(self): + return "".join(self.__block) + + def add(self, val): + self.__block.append(val) + + @property + def element(self): + return self.__element + + @element.setter + def element(self, val): + self.__element = val + + @property + def add_element(self): + self.__block.append(self.element) + self.__element = None \ No newline at end of file diff --git a/sphinxcontrib/jupyter/writers/pdf.py b/sphinxcontrib/jupyter/writers/pdf.py new file mode 100644 index 00000000..ba4ef601 --- /dev/null +++ b/sphinxcontrib/jupyter/writers/pdf.py @@ -0,0 +1,31 @@ +from .markdown import MarkdownSyntax + +class PDFSyntax(MarkdownSyntax): + def __init__(self): + """ + Provides syntax for IPYNB(PDF) notebooks + + PDF notebooks still make use of Markdown Syntax + but require alternative implementations for some + `visit` and `depart` methods to support additional + features. + """ + pass + + def visit_math(self, text): + return "${}$".format(text) + + def visit_reference(self, node): + if "refuri" in node and "http" in node["refuri"]: + return "[" + elif "refid" in node: + if 'equation-' in node['refid']: + return "\eqref{" + elif self.topic: + return + else: + return "\hyperlink{" + elif "refuri" in node and 'references#' not in node["refuri"]: + return "[" + else: + return "\hyperlink{" \ No newline at end of file diff --git a/sphinxcontrib/jupyter/writers/translate_all.py b/sphinxcontrib/jupyter/writers/translate_all.py index ff710f17..7023dc19 100644 --- a/sphinxcontrib/jupyter/writers/translate_all.py +++ b/sphinxcontrib/jupyter/writers/translate_all.py @@ -540,7 +540,7 @@ def visit_reference(self, node): def depart_reference(self, node): subdirectory = False - + ## removing zreferences from the index file if self.in_book_index and node.attributes['refuri'] == 'zreferences': return diff --git a/sphinxcontrib/jupyter/writers/translate_code.py b/sphinxcontrib/jupyter/writers/translate_code.py index b3dd2633..5983d39e 100644 --- a/sphinxcontrib/jupyter/writers/translate_code.py +++ b/sphinxcontrib/jupyter/writers/translate_code.py @@ -1,276 +1,98 @@ -import docutils.nodes -import re -import nbformat.v4 -import os.path -import datetime -from .utils import LanguageTranslator, JupyterOutputCellGenerators, get_source_file_name +""" +Translators for working with Code Blocks +""" +from docutils import nodes +from docutils.nodes import SparseNodeVisitor +from sphinx.util.docutils import SphinxTranslator +from sphinx.util import logging -class JupyterCodeTranslator(docutils.nodes.GenericNodeVisitor): +from .notebook import JupyterNotebook - URI_SPACE_REPLACE_FROM = re.compile(r"\s") - URI_SPACE_REPLACE_TO = "-" +logger = logging.getLogger(__name__) - def __init__(self, builder, document): - docutils.nodes.NodeVisitor.__init__(self, document) - - self.lang = None - self.nodelang = None - self.visit_first_title = True - - self.langTranslator = LanguageTranslator(builder.config["templates_path"]) - - # Reporter - self.warn = self.document.reporter.warning - self.error = self.document.reporter.error - - # Settings - self.settings = document.settings +class SphinxSparseTranslator(SparseNodeVisitor): + def __init__(self, document, builder): + super().__init__(document) self.builder = builder - self.source_file_name = get_source_file_name( - self.settings._source, - self.settings.env.srcdir) - self.default_lang = builder.config["jupyter_default_lang"] - - # Create output notebook - self.output = nbformat.v4.new_notebook() - - # Variables defined in conf.py - self.jupyter_static_file_path = builder.config["jupyter_static_file_path"] - self.jupyter_kernels = builder.config["jupyter_kernels"] - self.jupyter_write_metadata = builder.config["jupyter_write_metadata"] - self.jupyter_drop_solutions = builder.config["jupyter_drop_solutions"] - self.jupyter_drop_tests = builder.config["jupyter_drop_tests"] - self.jupyter_ignore_no_execute = builder.config["jupyter_ignore_no_execute"] - self.jupyter_ignore_skip_test = builder.config["jupyter_ignore_skip_test"] - self.jupyter_lang_synonyms = builder.config["jupyter_lang_synonyms"] - self.jupyter_target_html = builder.config["jupyter_target_html"] - self.jupyter_download_nb_image_urlpath = builder.jupyter_download_nb_image_urlpath - self.jupyter_images_markdown = builder.config["jupyter_images_markdown"] - self.jupyter_target_pdf = builder.config["jupyter_target_pdf"] - self.jupyter_pdf_showcontentdepth = builder.config["jupyter_pdf_showcontentdepth"] - self.jupyter_pdf_book = builder.config["jupyter_pdf_book"] - self.book_index = builder.config["jupyter_pdf_book_index"] - if hasattr(builder, 'add_bib_to_latex'): - self.add_bib_to_latex = builder.add_bib_to_latex - - # set the value of the cell metadata["slideshow"] to slide as the default option - self.slide = "slide" - self.metadata_slide = False #value by default for all the notebooks, we change it for those we want - - - # Header Block - template_paths = builder.config["templates_path"] - header_block_filename = builder.config["jupyter_header_block"] - - full_path_to_header_block = None - for template_path in template_paths: - if header_block_filename: - if os.path.isfile(template_path + "/" + header_block_filename): - full_path_to_header_block = os.path.normpath( template_path + "/" + header_block_filename) - - if full_path_to_header_block: - with open(full_path_to_header_block) as input_file: - lines = input_file.readlines() - - line_text = "".join(lines) - formatted_line_text = self.strip_blank_lines_in_end_of_block( - line_text) - nb_header_block = nbformat.v4.new_markdown_cell( - formatted_line_text) - - # Add the header block to the output stream straight away - self.output["cells"].append(nb_header_block) - - # Write metadata - if self.jupyter_write_metadata: - meta_text = \ - "Notebook created: {:%Y-%m-%d %H:%M:%S} \n"\ - "Generated from: {} " - - metadata = meta_text.format( - datetime.datetime.now(), - self.source_file_name) - - self.output["cells"].append( - nbformat.v4.new_markdown_cell(metadata)) - - # Variables used in visit/depart - self.in_code_block = False # if False, it means in markdown_cell - self.output_cell_type = None - self.code_lines = [] - - # generic visit and depart methods - # -------------------------------- - simple_nodes = ( - docutils.nodes.TextElement, - docutils.nodes.image, - docutils.nodes.colspec, - docutils.nodes.transition) # empty elements - - def default_visit(self, node): - pass - - def default_departure(self, node): - pass + self.config = builder.config + self.settings = document.settings - # specific visit and depart methods - # --------------------------------- +class JupyterCodeBlockTranslator(SphinxSparseTranslator): + + #Configuration (Literal Block) + literal_block = dict() + literal_block['in'] = False + literal_block['no-execute'] = False - # ========= - # Sections - # ========= - def visit_document(self, node): - """at start + def __init__(self, document, builder): """ - # we need to give the translator a default language! - # the translator needs to know what language the document is written in - # before depart_document is called. - self.lang = self.default_lang - - def depart_document(self, node): - """at end + A sparse translator for extracting code-blocks from RST documents + and generating a Jupyter Notebook """ - if not self.lang: - self.warn( - "Highlighting language is not given in .rst file. " - "Set kernel as default(python3)") - self.lang = self.default_lang - - # metadata for slides, this activates the option where each cell can be a slide - if self.metadata_slide: - self.output.metadata.celltoolbar = "Slideshow" - - - # Update metadata - if self.jupyter_kernels is not None: - try: - self.output.metadata.kernelspec = \ - self.jupyter_kernels[self.lang]["kernelspec"] - self.output.metadata["filename"] = self.source_file_name.split("/")[-1] - self.output.metadata["title"] = self.title - except: - self.warn( - "Invalid jupyter kernels. " - "jupyter_kernels: {}, lang: {}" - .format(self.jupyter_kernels, self.lang)) - - def visit_highlightlang(self, node): - lang = node.attributes["lang"].strip() - if lang in self.jupyter_kernels: - self.lang = lang - else: - self.warn( - "Highlighting language({}) is not defined " - "in jupyter_kernels in conf.py. " - "Set kernel as default({})" - .format(lang, self.default_lang)) - self.lang = self.default_lang + super().__init__(document, builder) #add document, builder, config and settings to object + #-Jupyter Settings-# + self.language = self.config["jupyter_language"] #self.language = self.config['highlight_language'] (https://www.sphinx-doc.org/en/master/usage/configuration.html#confval-highlight_language) + self.language_synonyms = self.config['jupyter_language_synonyms'] - # ================= - # Inline elements - # ================= - def visit_Text(self, node): - text = node.astext() - if self.in_code_block: - self.code_lines.append(text) + def visit_document(self, node): + self.output = JupyterNotebook(language=self.language) + #Collector List for Current Cell + self.new_cell() - def depart_Text(self, node): + def depart_document(self, node): pass - def visit_title(self, node): - #TODO: add support for docutils .. title:: - if self.visit_first_title: - self.title = node.astext() - self.visit_first_title = False - - # ================ - # code blocks - # ================ def visit_literal_block(self, node): - _parse_class = JupyterOutputCellGenerators.GetGeneratorFromClasses(self, node) - self.output_cell_type = _parse_class["type"] - self.solution = _parse_class["solution"] - self.test = _parse_class["test"] + """ + Parse Literal Blocks (Code Blocks) + """ + #Start new cell and add add current cell to notebook + self.literal_block['in'] = True + self.new_cell(cell_type = "code") + self.cell_metadata = dict() - try: + #-Determine Language of Code Block-# + if "language" in node.attributes: self.nodelang = node.attributes["language"].strip() - except KeyError: - self.nodelang = self.lang + else: + self.cell_type = "markdown" if self.nodelang == 'default': - self.nodelang = self.lang - - # Translate the language name across from the Sphinx to the Jupyter namespace - self.nodelang = self.langTranslator.translate(self.nodelang) + self.nodelang = self.language #use notebook programming language - self.in_code_block = True - self.code_lines = [] - - # If the cell being processed contains code written in a language other than the one that - # was specified as the default language, do not create a code block for it - turn it into - # markup instead. - if self.nodelang != self.langTranslator.translate(self.lang): - if self.nodelang in self.jupyter_lang_synonyms: - pass + #Check for no-execute status + if "classes" in node.attributes: + if "no-execute" in node.attributes["classes"]: + self.literal_block['no-execute'] = True else: - self.output_cell_type = JupyterOutputCellGenerators.MARKDOWN - + self.literal_block['no-execute'] = False + + if "hide-output" in node.attributes["classes"]: + self.cell_metadata["hide-output"] = True + + ## Check node language is the same as notebook language else make it markdown + if (self.nodelang != self.language and self.nodelang not in self.language_synonyms) or self.literal_block['no-execute']: + logger.warning("Found a code-block with different programming \ + language to the notebook language. Adding as markdown" + ) + raise nodes.SkipNode + def depart_literal_block(self, node): - if self.solution and self.jupyter_drop_solutions: - pass # Skip solutions if we say to. - elif self.test and self.jupyter_drop_tests: - pass # Skip tests if we say to. - else: # Don't skip otherwise. - line_text = "".join(self.code_lines) - formatted_line_text = self.strip_blank_lines_in_end_of_block(line_text) - new_code_cell = self.output_cell_type.Generate(formatted_line_text, self) - - # add slide metadata on each cell, value by default: slide - if self.metadata_slide: #value by default for all the notebooks, we change it for those we want - new_code_cell.metadata["slideshow"] = { 'slide_type': self.slide} - self.slide = "slide" - #Save Collapse Cell Option for HTML Parser - if "collapse" in node["classes"]: - new_code_cell["metadata"]["html-class"] = 'collapse' - #Save hide-output cell option for HTML Parser - if "hide-output" in node["classes"]: - new_code_cell["metadata"]["hide-output"] = True - else: - new_code_cell["metadata"]["hide-output"] = False - #Code Output - if self.output_cell_type is JupyterOutputCellGenerators.CODE_OUTPUT: - # Output blocks must be added to code cells to make any sense. - # This script assumes that any output blocks will immediately follow a code - # cell; a warning is raised if the cell immediately preceding this output - # block is not a code cell. - # - # It is assumed that code cells may only have one output block - any more than - # one will raise a warning and be ignored. - mostRecentCell = self.output["cells"][-1] - if mostRecentCell.cell_type != "code": - self.warn("Warning: Class: output block found after a " + - mostRecentCell.cell_type + " cell. Outputs may only come after code cells.") - elif mostRecentCell.outputs: - self.warn( - "Warning: Multiple class: output blocks found after a code cell. Each code cell may only be followed by either zero or one output blocks.") - else: - mostRecentCell.outputs.append(new_code_cell) - else: - self.output["cells"].append(new_code_cell) + source = "".join(self.cell) + self.output.add_cell(source, self.cell_type, self.cell_metadata) + self.new_cell() + self.literal_block['in'] = False - self.in_code_block = False + def visit_Text(self, node): + if self.literal_block['in']: + text = node.astext() + self.cell.append(text) - # =================== - # general methods - # =================== - @staticmethod - def strip_blank_lines_in_end_of_block(line_text): - lines = line_text.split("\n") + def depart_Text(self, node): + pass - for line in range(len(lines)): - if len(lines[-1].strip()) == 0: - lines = lines[:-1] - else: - break + #Utilities - return "\n".join(lines) + def new_cell(self, cell_type="markdown"): + self.cell = [] + self.cell_type = cell_type diff --git a/sphinxcontrib/jupyter/writers/translate_html.py b/sphinxcontrib/jupyter/writers/translate_html.py new file mode 100644 index 00000000..81c66b80 --- /dev/null +++ b/sphinxcontrib/jupyter/writers/translate_html.py @@ -0,0 +1,131 @@ +""" +Translator to Support IPYNB(HTML) and Website Support +""" + +from __future__ import unicode_literals +import re +import nbformat.v4 +from docutils import nodes, writers +from shutil import copyfile +import copy +import os + +from .translate_ipynb import JupyterIPYNBTranslator +from .utils import JupyterOutputCellGenerators +from .notebook import JupyterNotebook +from .html import HTMLSyntax + +class JupyterHTMLTranslator(JupyterIPYNBTranslator): + + + def __init__(self, document, builder): + """ + Jupyter Translator for HTML End Target Support + + This will generate IPYNB files emphasis on HTML that are + built to work with the `nbconvert` template to support website + construction + """ + super().__init__(document, builder) + # HTML Settings + self.html_ext = ".html" + self.syntax = HTMLSyntax() + self.urlpath = builder.urlpath + self.jupyter_download_nb_image_urlpath = builder.jupyter_download_nb_image_urlpath + self.jupyter_static_file_path = builder.config["jupyter_static_file_path"] + + #-Nodes-# + + def visit_image(self, node): + """ + Image Directive + Include Images as HTML including attributes that + are available from the directive + """ + uri = node.attributes["uri"] + self.images.append(uri) + if self.jupyter_download_nb_image_urlpath: + for file_path in self.jupyter_static_file_path: + if file_path in uri: + uri = uri.replace(file_path +"/", self.jupyter_download_nb_image_urlpath) + break #don't need to check other matches + attrs = node.attributes + self.cell.append(self.syntax.visit_image(uri, attrs)) + + def visit_label(self, node): + if self.footnote['in']: + ids = node.parent.attributes["ids"] + id_text = "" + for id_ in ids: + id_text += "{} ".format(id_) + else: + id_text = id_text[:-1] + + self.cell.append("

[{}] ".format(id_text, id_text, node.astext())) + raise nodes.SkipNode + + if self.citation['in']: + self.cell.append(self.syntax.visit_label()) + + #References(Start) + + def depart_reference(self, node): + subdirectory = False + + if self.topic: + # Jupyter Notebook uses the target text as its id + uri_text = node.astext().replace(" ","-") + uri_text = uri_text.replace("(", "%28").replace(")", "%29") + formatted_text = "](#{})".format(uri_text) + else: + # if refuri exists, then it includes id reference + if "refuri" in node.attributes: + refuri = node["refuri"] + # add default extension(.ipynb) + if "internal" in node.attributes and node.attributes["internal"] == True: + refuri = self.add_extension_to_inline_link(refuri, self.html_ext) + if self.urlpath is not None: + refuri = self.urlpath + refuri + else: + # in-page link + if "refid" in node: + refid = node["refid"] + self.inpage_reference = True + #markdown doesn't handle closing brackets very well so will replace with %28 and %29 + #ignore adjustment when targeting pdf as pandoc doesn't parse %28 correctly + refid = refid.replace("(", "%28") + refid = refid.replace(")", "%29") + #markdown target + refuri = "#{}".format(refid) + # error + else: + self.error("Invalid reference") + refuri = "" + + #TODO: review if both %28 replacements necessary in this function? + # Propose delete above in-link refuri + #ignore adjustment when targeting pdf as pandoc doesn't parse %28 correctly + refuri = refuri.replace("(", "%28") #Special case to handle markdown issue with reading first ) + refuri = refuri.replace(")", "%29") + formatted_text = "]({})".format(refuri) + + if self.toctree: + formatted_text += "\n" + + ## if there is a list add to it, else add it to the cell directl + if self.List: + self.List.add_item(formatted_text) + else: + self.cell.append(formatted_text) + + + + def visit_footnote_reference(self, node): + self.footnote_reference['in'] = True + refid = node.attributes['refid'] + ids = node.astext() + self.footnote_reference['link'] = "[{}]".format(refid, refid, ids) + self.cell.append(self.footnote_reference['link']) + raise nodes.SkipNode + + #References(End) diff --git a/sphinxcontrib/jupyter/writers/translate_ipynb.py b/sphinxcontrib/jupyter/writers/translate_ipynb.py new file mode 100644 index 00000000..cf3c00b0 --- /dev/null +++ b/sphinxcontrib/jupyter/writers/translate_ipynb.py @@ -0,0 +1,982 @@ +""" +Translator for RST to IPYNB Conversion +""" + +from __future__ import unicode_literals +import re +import nbformat.v4 +from docutils import nodes, writers +from shutil import copyfile +import copy +import os +import time + +from sphinx.util import logging +from sphinx.util.docutils import SphinxTranslator + +from .translate_code import JupyterCodeBlockTranslator +from .utils import JupyterOutputCellGenerators, get_source_file_name +from .notebook import JupyterNotebook +from .markdown import MarkdownSyntax +from .html import HTMLSyntax +from .accumulators import List, TableBuilder + +logger = logging.getLogger(__name__) + +class JupyterIPYNBTranslator(SphinxTranslator): + + #Configuration (Attribution) + attribution = False + #Configuration (Block Quote) + block_quote = dict() + block_quote['in'] = False + block_quote['block_quote_type'] = "block-quote" #TODO: can this be removed? + #Configuration(Caption) + caption = False + #Configuration(Citation) + citation = dict() + citation['in'] = False + #Configuration (Download) + download_reference = dict() + download_reference['in'] = False + #Configuration (Formatting) + sep_lines = " \n" #TODO: needed? + sep_paragraph = "\n\n" #TODO: needed? + section_level = 0 + #Configuration (Footnote) + footnote = dict() + footnote['in'] = False + footnote_reference = dict() + footnote_reference['in'] = False + #Configuration (File) + default_ext = ".ipynb" + #Configuration (Image) + image = dict() + #Configuration (List) + List = None + #Configuration (Literal Block) + literal_block = dict() + literal_block['in'] = False + literal_block['no-execute'] = False + literal_block['hide-output'] = False + #Configuration (Math) + math = dict() + math['in'] = False + math_block = dict() + math_block['in'] = False + math_block['math_block_label'] = None + #Configuration (Note) + note = False + #Configuration (References) + reference_text_start = 0 + inpage_reference = False + #Configuration (Rubric) + rubric = False + #Configuration (Static Assets) + images = [] + files = [] + #Configuration (Tables) + Table = None + #Configuration (Text) + text = None + #Configuration (Titles) + visit_first_title = True + title = "" + #Configuration (Toctree) + toctree = False + #Configuration (Topic) + topic = False + + #->Review Options + + remove_next_content = False #TODO: what is this? PDF + target = dict() #TODO: needed? + + # Slideshow option + metadata_slide = False #TODO: move to JupyterSlideTranslator + slide = "slide" #TODO: move to JupyterSlideTranslator + + cached_state = dict() #A dictionary to cache states to support nested blocks + URI_SPACE_REPLACE_FROM = re.compile(r"\s") + URI_SPACE_REPLACE_TO = "-" + + def __init__(self, document, builder): + """ + A Jupyter Notebook Translator + + This translator supports the construction of Jupyter notebooks + with an emphasis on readability. It uses markdown structures + wherever possible. + + Notebooks geared towards HTML or PDF are available: + 1. JupyterHTMLTranslator, + 2. JupyterPDFTranslator + + Notes + ----- + 1. A capitalised variable name (i.e. List, Table) are attributes that + use an accumulator object to assist with constructing the object + """ + super().__init__(document, builder) + #-Jupyter Settings-# + self.language = self.config["jupyter_language"] #self.language = self.config['highlight_language'] (https://www.sphinx-doc.org/en/master/usage/configuration.html#confval-highlight_language) + self.language_synonyms = self.config['jupyter_language_synonyms'] + src_dir = self.settings.env.srcdir + self.source_file_name = self.settings._source.replace(src_dir+"/", "") + #-Syntax-# + self.syntax = MarkdownSyntax() + self.html_syntax = HTMLSyntax() #for HTML enabled options + + #-Document-# + + def visit_document(self, node): + self.output = JupyterNotebook(language=self.language) + self.new_cell() + + def depart_document(self, node): + self.output.add_metadata_notebook({"filename": self.source_file_name.split("/")[-1], "title": self.title}) + self.output.add_metadata_notebook({"date" : time.time()}) + self.cell_to_notebook() + #TODO: Should this be in the `builder` (otherwise helper function should be used) + if len(self.files) > 0: + for fl in self.files: + src_fl = os.path.join(self.builder.srcdir, fl) + out_fl = os.path.join(self.builder.outdir, os.path.basename(fl)) #copy file to same location as notebook (remove dir structure) + #Check if output directory exists + out_dir = os.path.dirname(out_fl) + if not os.path.exists(out_dir): + os.makedirs(out_dir) + print("Copying {} to {}".format(src_fl, out_fl)) + copyfile(src_fl, out_fl) + + #-Nodes-# + + def visit_attribution(self, node): + self.attribution = True + self.cell.append(self.syntax.visit_attribution()) + + def depart_attribution(self, node): + self.attribution = False + self.add_newline() + + def visit_block_quote(self, node): + self.block_quote['in'] = True + if "epigraph" in node.attributes["classes"]: + self.block_quote['block_quote_type'] = "epigraph" + if self.List: + self.add_newline() + return + self.cell.append(self.syntax.visit_block_quote()) + + def depart_block_quote(self, node): + if "epigraph" in node.attributes["classes"]: + self.block_quote['block_quote_type'] = "block-quote" + self.block_quote['in'] = False + self.add_newline() + + def visit_caption(self, node): + self.caption = True + + def depart_caption(self, node): + self.caption = False + if self.toctree: + self.cell.append("\n") + + def visit_citation(self, node): + self.citation['in'] = True + if "ids" in node.attributes: + id_text = "" + for id_ in node.attributes["ids"]: + id_text += "{} ".format(id_) + else: + id_text = id_text[:-1] + self.cell.append(self.syntax.visit_citation(id_text)) + + def depart_citation(self, node): + self.citation['in'] = False + + def visit_comment(self, node): + raise nodes.SkipNode + + def visit_compact_paragraph(self, node): + try: + if node.attributes['toctree']: + self.toctree = True + except: + pass #Should this execute visit_compact_paragragh in BaseTranslator? + + def depart_compact_paragraph(self, node): + try: + if node.attributes['toctree']: + self.toctree = False + except: + pass + + def visit_compound(self, node): + pass + + def depart_compound(self, node): + pass + + def visit_definition(self, node): + self.cell.append(self.syntax.visit_definition()) + self.add_newline() + + def depart_definition(self, node): + self.cell.append(self.syntax.depart_definition()) + self.add_newline() + + def visit_definition_list(self, node): + self.add_newline() + self.cell.append(self.syntax.visit_definition_list()) + self.add_newline() + + def depart_definition_list(self, node): + self.add_newline() + self.cell.append(self.syntax.depart_definition_list()) + self.add_newparagraph() + + def visit_definition_list_item(self, node): + pass + + def depart_definition_list_item(self, node): + pass + + def visit_doctest_block(self, node): + pass + + def depart_doctest_block(self, node): + pass + + def visit_figure(self, node): + pass + + def depart_figure(self, node): + self.add_newline() + + def visit_field_body(self, node): + self.visit_definition(node) + + def depart_field_body(self, node): + self.depart_definition(node) + + def visit_field_list(self, node): + self.visit_definition_list(node) + + def depart_field_list(self, node): + self.depart_definition_list(node) + + def visit_footnote(self, node): + self.footnote['in'] = True + + def depart_footnote(self, node): + self.footnote['in'] = False + + def visit_image(self, node): + """ + Image Directive + + Notes + ----- + 1. Should this use .has_attrs()? + 2. the scale, height and width properties are not combined in this + implementation as is done in http://docutils.sourceforge.net/docs/ref/rst/directives.html#image + 3. HTML images are available in HTMLTranslator (TODO: Should this be an available option here?) + """ + uri = node.attributes["uri"] + self.images.append(uri) + if self.config["jupyter_images_html"]: + attrs = node.attributes + syntax = self.html_syntax.visit_image(uri, attrs) + else: + syntax = self.syntax.visit_image(uri) + self.cell.append(syntax) + + def depart_image(self, node): + pass + + def visit_index(self, node): + pass + + def depart_index(self, node): + pass + + def visit_inline(self, node): + pass + + def depart_inline(self, node): + pass + + def visit_jupyter_node(self, node): + try: + if 'cell-break' in node.attributes: + self.add_markdown_cell() + if 'slide' in node.attributes: + self.metadata_slide = node['slide'] # this activates the slideshow metadata for the notebook + if 'slide-type' in node.attributes: + if "fragment" in node['slide-type']: + self.add_markdown_cell(slide_type=node['slide-type']) #start a new cell + self.slide = node['slide-type'] # replace the default value + except: + pass + #Parse jupyter_dependency directive (TODO: Should this be a separate node type?) + try: + self.files.append(node['uri']) + except: + pass + + def depart_jupyter_node(self, node): + if 'cell-break' in node.attributes: + pass + if 'slide' in node.attributes: + pass + if 'slide-type' in node.attributes: + pass + + def visit_label(self, node): + """ + Notes: footnote requires `html` to create links within the + notebooks as there is no markdown equivalent + """ + if self.footnote['in']: + ids = node.parent.attributes["ids"] + id_text = "" + for id_ in ids: + id_text += "{} ".format(id_) + else: + id_text = id_text[:-1] + self.cell.append("\n**[{}]** ".format(id_text, node.astext())) #TODO: can this be harmonized with HTML + raise nodes.SkipNode + + if self.citation['in']: + self.cell.append(self.syntax.visit_label()) + + def depart_label(self, node): + if self.citation['in']: + self.cell.append(self.syntax.depart_label()) + self.add_space() + + def visit_line(self, node): + pass + + def depart_line(self, node): + pass + + def visit_line_block(self, node): + pass + + def depart_line_block(self, node): + pass + + #List(Start) + + def visit_bullet_list(self, node): + if not self.List: + self.List = List(level=0,markers=dict()) + self.List.increment_level() + + + def depart_bullet_list(self, node): + if self.List is not None: + self.List.decrement_level() + if self.List and self.List.level == 0: + markdown = self.List.to_markdown() + self.cell.append(markdown) + self.List = None + + def visit_enumerated_list(self, node): + if not self.List: + self.List = List(level=0,markers=dict()) + self.List.increment_level() + + def depart_enumerated_list(self, node): + if self.List is not None: + self.List.decrement_level() + + if self.List.level == 0: + markdown = self.List.to_markdown() + self.cell.append(markdown) + self.List = None + + def visit_list_item(self, node): + if self.List: + self.List.set_marker(node) + + #List(End) + + def visit_literal(self, node): + if self.download_reference['in']: + return #TODO: can we just raise Skipnode? + + if self.List: + self.List.add_item(self.syntax.visit_literal()) + else: + self.cell.append(self.syntax.visit_literal()) + + def depart_literal(self, node): + if self.download_reference['in']: + return + if self.List: + self.List.add_item(self.syntax.depart_literal()) + else: + self.cell.append(self.syntax.depart_literal()) + + def visit_literal_block(self, node): + "Parse Literal Blocks (Code Blocks)" + self.literal_block['in'] = True + + # Check if inside a list. + # if in list append the contents till here to the cell, + # and make a new cell for code block + if self.List: + markdown = self.List.to_markdown() + self.cell.append(markdown) + self.cached_state['list_level'] = self.List.getlevel() + self.cached_state['list_marker'] = self.List.get_marker() + self.cached_state['list_item_no'] = self.List.get_item_no() + self.List = None + + #Start new cell and add add current cell to notebook + self.cell_to_notebook() + self.new_cell(cell_type = "code") + + #Check Code Language + if "language" in node.attributes: + self.nodelang = node.attributes["language"].strip() + else: + self.cell_type = "markdown" + if self.nodelang == 'default': + self.nodelang = self.language #use notebook language + + #Check for no-execute status + if "classes" in node.attributes and "no-execute" in node.attributes["classes"]: + self.literal_block['no-execute'] = True + else: + self.literal_block['no-execute'] = False + + #Check for hide-output status + if "classes" in node.attributes and "hide-output" in node.attributes["classes"]: + self.literal_block['hide-output'] = True + else: + self.literal_block['hide-output'] = False + + ## Check node language is the same as notebook language else make it markdown + if (self.nodelang != self.language and self.nodelang not in self.language_synonyms) or self.literal_block['no-execute']: + logger.warning("Found a code-block with different programming \ + language to the notebook language. Adding as markdown" + ) + self.cell.append(self.syntax.visit_literal_block(self.nodelang)) + self.add_newline() + self.cell_type = "markdown" + + + def depart_literal_block(self, node): + if (self.nodelang != self.language and self.nodelang not in self.language_synonyms) or self.literal_block['no-execute']: + self.cell.append(self.syntax.depart_literal_block()) + if self.cell_type == "code": + meta = { + 'no-execute' : self.literal_block['no-execute'], + 'hide-output' : self.literal_block['hide-output'] + } + self.cell_to_notebook(metadata=meta) + else: + self.cell_to_notebook() + #Initialise new cell + self.new_cell() + self.literal_block['in'] = False + + ## If this code block was inside a list, then resume the list again just in case there are more items in the list. + if "list_level" in self.cached_state: + self.List = List(self.cached_state["list_level"], self.cached_state["list_marker"], self.cached_state["list_item_no"]) + del self.cached_state["list_level"] + del self.cached_state["list_marker"] + del self.cached_state["list_item_no"] + + def visit_math(self, node): + """ + Inline Math + + Notes + ----- + With sphinx < 1.8, a math node has a 'latex' attribute, from which the + formula can be obtained and added to the text. + + With sphinx >= 1.8, a math node has no 'latex' attribute, which mean + that a flag has to be raised, so that the in visit_Text() we know that + we are dealing with a formula. + + TODO: + 1. Deprecate support for sphinx < 1.8 + """ + self.math['in'] = True + try: # sphinx < 1.8 + math_text = node.attributes["latex"].strip() + except KeyError: + # sphinx >= 1.8 + # the flag is raised, the function can be exited. + return #TODO: raise nodes.SkipNode? + + formatted_text = self.syntax.visit_math(math_text) + + if self.Table: + self.Table.add_item(formatted_text) + else: + self.cell.append(formatted_text) + + def depart_math(self, node): + self.math['in'] = False + + def visit_math_block(self, node): + """ + Math from Directives + + Notes: + ------ + visit_math_block is called only with sphinx >= 1.8 + """ + self.math_block['in'] = True + #check for labelled math + if node["label"]: + #Use \tags in the embedded LaTeX environment + #Haven't included this in self.syntax.MardownSyntax as it should be general across HTML (mathjax), PDF (latex) + self.math_block['math_block_label'] = "\\tag{" + str(node["number"]) + "}\n" + + def depart_math_block(self, node): + self.math_block['in'] = False + + def visit_note(self, node): + self.note = True + self.cell.append(self.syntax.visit_note()) + + def depart_note(self, node): + self.note = False + + def visit_only(self, node): + pass + + def depart_only(self, node): + pass + + def visit_paragraph(self, node): + pass + + def depart_paragraph(self, node): + if self.List: + pass + else: + if self.List and self.List.getlevel() > 0: #TODO: is this ever reach given above if statement? + self.add_newline() + elif self.Table: + pass + elif self.block_quote['block_quote_type'] == "epigraph": + try: + attribution = node.parent.children[1] + self.cell.append("\n>\n") #Continue block for attribution + except: + self.add_newparagraph() + else: + self.add_newparagraph() + + def visit_problematic(self, node): + pass + + def depart_problematic(self, node): + pass + + def visit_raw(self, node): + pass + + def depart_raw(self, node): + self.add_newparagraph() + + def visit_rubric(self, node): + self.rubric = True + self.cell_to_notebook() + self.new_cell(cell_type="markdown") + if len(node.children) == 1 and node.children[0].astext() in ['Footnotes']: + self.cell.append('**{}**\n\n'.format(node.children[0].astext())) #TODO: add to MarkdownSyntax? + raise nodes.SkipNode + + def depart_rubric(self, node): + self.cell_to_notebook() + self.new_cell(cell_type="markdown") + self.rubric = False + + def visit_section(self, node): + if self.config["jupyter_section_blocks"] and self.section_level > 0: + self.cell_to_notebook() + self.new_cell() + self.section_level += 1 + + def depart_section(self, node): + self.section_level -= 1 + + #Table(Start) + + def visit_colspec(self, node): + self.Table.add_column_width(node['colwidth']) + + def visit_entry(self, node): + pass + + def depart_entry(self, node): + pass + + def visit_row(self, node): + self.Table.start_row() + + def depart_row(self, node): + self.Table.end_row() + + def visit_table(self, node): + self.Table = TableBuilder(node) + + def depart_table(self, node): + markdown = self.Table.to_markdown() + self.cell.append(markdown) + self.Table = None + self.add_newline() + + def visit_thead(self, node): + """ Table Header """ + pass + + def depart_thead(self, node): + """ create the header line which contains the alignment for each column """ + self.Table.add_header_line("|") + + def visit_tgroup(self, node): + pass + + def depart_tgroup(self, node): + pass + + def visit_tbody(self, node): + pass + + def depart_tbody(self, node): + pass + + #Table(End) + + def visit_target(self, node): + if "refid" in node.attributes: + self.cell.append("\n\n".format(node.attributes["refid"])) + + def depart_target(self, node): + pass + + #Text(Start) + + def visit_emphasis(self, node): + if self.List: + self.List.add_item(self.syntax.visit_italic()) + else: + self.cell.append(self.syntax.visit_italic()) + + def depart_emphasis(self, node): + if self.List: + self.List.add_item(self.syntax.depart_italic()) + else: + self.cell.append(self.syntax.depart_italic()) + + def visit_strong(self, node): + if self.List: + self.List.add_item(self.syntax.visit_bold()) + else: + self.cell.append(self.syntax.visit_bold()) + + def depart_strong(self, node): + if self.List: + self.List.add_item(self.syntax.depart_bold()) + else: + self.cell.append(self.syntax.depart_bold()) + + def visit_Text(self, node): + text = node.astext() + + #Escape Special markdown chars except in code block + if self.literal_block['in'] == False: + text = text.replace("$", "\$") + #Inline Math + if self.math['in']: + text = self.syntax.visit_math(text.strip()) + #Math Blocks + elif self.math_block['in'] and self.math_block['math_block_label']: + text = self.syntax.visit_math_block(text.strip(), self.math_block['math_block_label']) + self.math_block['math_block_label'] = None + elif self.math_block['in']: + text = self.syntax.visit_math_block(text.strip()) + + self.text = text + + def depart_Text(self, node): + #Add text to cell + if self.List: + self.List.add_item(self.text) + elif self.Table: + self.Table.add_item(self.text) + elif self.math_block['in']: + self.cell.append(self.text) + self.add_newparagraph() + elif self.literal_block['in']: + self.cell.append(self.text) + self.add_newline() + elif self.block_quote['in'] or self.note: + if self.block_quote['block_quote_type'] == "epigraph": + self.cell.append(self.text.replace("\n", "\n> ")) #Ensure all lines are prepended (TODO: should this be in MarkdownSyntax) + else: + self.cell.append(self.text) + elif self.caption and self.toctree: #TODO: Check this condition + self.cell.append("# {}".format(self.text)) + else: + self.cell.append(self.text) + + #Text(End) + + def visit_title(self, node): + if self.visit_first_title: + self.title = node.astext() + self.visit_first_title = False + if self.topic: + # this prevents from making it a subsection from section + self.cell.append(self.syntax.visit_title(self.section_level + 1)) + self.add_space() + elif self.Table: + self.Table.add_title(node) + else: + self.cell.append(self.syntax.visit_title(self.section_level)) + self.add_space() + + def depart_title(self, node): + if not self.Table: + self.add_newparagraph() + + def visit_topic(self, node): + self.topic = True + + def depart_topic(self, node): + self.topic = False + + #References(Start) + + #TODO: Revisit References to Simplify using Sphinx Internals + #TODO: add too MarkdownSyntax() + + def visit_reference(self, node): + self.in_reference = dict() + + if self.List: + self.List.add_item("[") + self.reference_text_start = len(self.cell) + else: + self.cell.append("[") + self.reference_text_start = len(self.cell) + + def depart_reference(self, node): + subdirectory = False + formatted_text = "" + + if self.topic: + # Jupyter Notebook uses the target text as its id + uri_text = node.astext().replace(" ","-") + formatted_text = "](#{})".format(uri_text) + #self.cell.append(formatted_text) + else: + # if refuri exists, then it includes id reference + if "refuri" in node.attributes: + refuri = node["refuri"] + # add default extension(.ipynb) + if "internal" in node.attributes and node.attributes["internal"] == True: + refuri = self.add_extension_to_inline_link(refuri, self.default_ext) + else: + # in-page link + if "refid" in node: + refid = node["refid"] + self.inpage_reference = True + #markdown doesn't handle closing brackets very well so will replace with %28 and %29 + #ignore adjustment when targeting pdf as pandoc doesn't parse %28 correctly + refid = refid.replace("(", "%28") + refid = refid.replace(")", "%29") + #markdown target + refuri = "#{}".format(refid) + # error + else: + self.error("Invalid reference") + refuri = "" + + #TODO: review if both %28 replacements necessary in this function? + # Propose delete above in-link refuri + #ignore adjustment when targeting pdf as pandoc doesn't parse %28 correctly + refuri = refuri.replace("(", "%28") #Special case to handle markdown issue with reading first ) + refuri = refuri.replace(")", "%29") + formatted_text = "]({})".format(refuri) + + if self.toctree: + formatted_text += "\n" + + ## if there is a list add to it, else add it to the cell directly + if self.List: + self.List.add_item(formatted_text) + else: + self.cell.append(formatted_text) + + def visit_title_reference(self, node): + pass + + def depart_title_reference(self, node): + pass + + def visit_download_reference(self, node): + self.download_reference['in'] = True + html = "".format(node["reftarget"]) + self.cell.append(html) + + def depart_download_reference(self, node): + self.download_reference['in'] = False + self.cell.append("") + + def visit_footnote_reference(self, node): + self.footnote_reference['in'] = True + refid = node.attributes['refid'] + ids = node.astext() + self.footnote_reference['link'] = "[{}](#{})".format(ids, refid) #TODO: can this be harmonized with HTML + self.cell.append(self.footnote_reference['link']) + raise nodes.SkipNode + + def depart_footnote_reference(self, node): + self.footnote_reference['in'] = False + + #References(End) + + def unknown_visit(self, node): + raise NotImplementedError('Unknown node: ' + node.__class__.__name__) + + def unknown_departure(self, node): + pass + + # Nodes (Exercise) + #TODO: Are these needed (as they are written over by directive in __init__.py?) + + def visit_exercise_node(self, node): + pass + + def depart_exercise_node(self, node): + pass + + def visit_exerciselist_node(self, node): + pass + + def depart_exerciselist_node(self, node): + pass + + # Nodes (Review if Needed) + + def visit_field_name(self, node): + self.visit_term(node) + + def depart_field_name(self, node): + self.depart_term(node) + + def visit_term(self, node): + self.cell.append("

") + + def depart_term(self, node): + self.cell.append("
\n") + + #Utilities(Jupyter) + + def new_cell(self, cell_type="markdown"): + self.cell = [] + self.cell_type = cell_type + + def cell_to_notebook(self, metadata=None): + if len(self.cell) == 0: + return + # Default Cell type if not specified + if not self.cell_type: + self.cell_type = "markdown" + source = "".join(self.cell) + source = source.rstrip("\n") #Trim all trailing newlines before adding to notebook + self.output.add_cell(source, self.cell_type, metadata) + + def add_space(self, n=1): + self.cell.append(" " * n) + + def add_newline(self, n=1): + self.cell.append("\n" * n) + + def add_newparagraph(self): + self.cell.append("\n\n") + + #TODO: is this needed? + def add_markdown_cell(self, slide_type="slide", title=False): + """split a markdown cell here + * add the slideshow metadata + * append `markdown_lines` to notebook + * reset `markdown_lines` + """ + line_text = "".join(self.cell) + formatted_line_text = self.strip_blank_lines_in_end_of_block(line_text) + slide_info = {'slide_type': self.slide} + + if len(formatted_line_text.strip()) > 0: + new_md_cell = nbformat.v4.new_markdown_cell(formatted_line_text) + if self.metadata_slide: # modify the slide metadata on each cell + new_md_cell.metadata["slideshow"] = slide_info + self.slide = slide_type + if title: + new_md_cell.metadata["hide-input"] = True + self.cell_type = "markdown" + self.output.add_cell(new_md_cell, self.cell_type) + self.new_cell() + + #Utilities(Formatting) + + @classmethod + def split_uri_id(cls, uri): #TODO: required? + regex = re.compile(r"([^\#]*)\#?(.*)") + return re.search(regex, uri).groups() + + @classmethod + def add_extension_to_inline_link(cls, uri, ext): + """ + Removes an extension such as `html` and replaces with `ipynb` + + .. todo:: + + improve implementation for references (looks hardcoded) + """ + if "." not in uri: + if len(uri) > 0 and uri[0] == "#": + return uri + uri, id_ = cls.split_uri_id(uri) + if len(id_) == 0: + return "{}{}".format(uri, ext) + else: + return "{}{}#{}".format(uri, ext, id_) + #adjust relative references + elif "../" in uri: + # uri = uri.replace("../", "") + uri, id_ = cls.split_uri_id(uri) + if len(id_) == 0: + return "{}{}".format(uri, ext) + else: + return "{}{}#{}".format(uri, ext, id_) + + return uri + + # =================== + # general methods + # =================== + @staticmethod + def strip_blank_lines_in_end_of_block(line_text): + lines = line_text.split("\n") + + for line in range(len(lines)): + if len(lines[-1].strip()) == 0: + lines = lines[:-1] + else: + break + + return "\n".join(lines) \ No newline at end of file diff --git a/sphinxcontrib/jupyter/writers/translate_pdf.py b/sphinxcontrib/jupyter/writers/translate_pdf.py new file mode 100644 index 00000000..596b515d --- /dev/null +++ b/sphinxcontrib/jupyter/writers/translate_pdf.py @@ -0,0 +1,318 @@ +""" +Translator to Support PDF / LaTeX Files +""" + +from __future__ import unicode_literals +import re +import nbformat.v4 +from docutils import nodes, writers +from shutil import copyfile +import copy +import os + +from .utils import JupyterOutputCellGenerators +from .translate_ipynb import JupyterIPYNBTranslator +from .pdf import PDFSyntax + +class JupyterPDFTranslator(JupyterIPYNBTranslator): + """ + Jupyter Translator for PDF Support + + This will generate IPYNB files emphasis on LaTeX and + build to work with the `nbconvert` template to support + PDF construction + """ + + def __init__(self, document, builder): + super().__init__(document, builder) + + self.in_book_index = False + self.book_index_previous_links = [] + self.cell_trimmed = [] + self.book_index = self.config["jupyter_pdf_book_index"] + self.urlpath = builder.urlpath + self.skip_topic_list_content = True + self.syntax = PDFSyntax() + + + #-Document-# + + def visit_document(self, node): + """at start + """ + super().visit_document(node) + + ## if the source file parsed is book index file and target is pdf + if self.book_index is not None and self.book_index in self.source_file_name: + self.in_book_index = True + + #-Nodes-# + + def depart_image(self, node): + self.add_newline() + + # List items + + def visit_bullet_list(self, node): + #TODO: implement depth to skip and other pdf related things + super().visit_bullet_list(node) + + def depart_bullet_list(self, node): + if self.List is not None: + self.List.decrement_level() + if self.List and self.List.level == 0: + if self.in_book_index: + markdown = self.List.to_latex() + else: + markdown = self.List.to_markdown() + self.cell.append(markdown) + self.List = None + + # math + + def visit_math_block(self, node): + """ + Math from Directives + + Notes: + ------ + visit_math_block is called only with sphinx >= 1.8 + """ + self.math_block['in'] = True + #check for labelled math + if node["label"]: + #Use \tags in the embedded LaTeX environment + #Haven't included this in self.syntax.MardownSyntax as it should be general across HTML (mathjax), PDF (latex) + self.math_block['math_block_label'] = " \\tag{" + str(node["number"]) + "}" + "\\label{" + node["ids"][0] + "}\n" + + #Text(Start) + + def visit_Text(self, node): + text = node.astext() + + ## removing references from index file book + if self.in_book_index: + return + + #Escape Special markdown chars except in code block + if self.literal_block['in'] == False: + text = text.replace("$", "\$") + #Inline Math + if self.math['in']: + text = self.syntax.visit_math(text.strip()) + #Math Blocks + elif self.math_block['in'] and self.math_block['math_block_label']: + text = self.syntax.visit_math_block(text.strip(), self.math_block['math_block_label']) + self.math_block['math_block_label'] = None + elif self.math_block['in']: + text = self.syntax.visit_math_block(text.strip()) + + #Append Text to Cell (Should this be moved to depart_Text?) + if self.math_block['in']: + self.cell.append(text) + self.add_newparagraph() + elif self.List: + self.List.add_item(text) + elif self.Table: + self.Table.add_item(text) + elif self.literal_block['in']: + self.cell.append(text) + elif self.block_quote['in'] or self.note: + if self.block_quote['block_quote_type'] == "epigraph": + self.cell.append(text.replace("\n", "\n> ")) #Ensure all lines are prepended (TODO: should this be in MarkdownSyntax) + else: + self.cell.append(text) + elif self.caption and self.toctree: #TODO: Check this condition + self.cell.append("# {}".format(text)) + else: + self.cell.append(text) + + #Text(End) + + def depart_raw(self, node): + for attr in node.attributes: + if attr == 'format' and node.attributes[attr] == 'html': + self.new_cell() + return + self.add_newparagraph() + + #References(Start) + + #TODO: Revisit References to Simplify using Sphinx Internals + #TODO: add too MarkdownSyntax() + + def visit_reference(self, node): + + if self.in_book_index and node.attributes['refuri'] == 'zreferences': + return + + if self.topic and self.skip_topic_list_content: + self.skip_topic_list_content = False + self.List.decrement_level() + raise nodes.SkipNode + + self.in_reference = dict() + + if self.List: + marker = self.List.get_marker() + if not self.in_book_index and not self.topic: + self.List.add_item("[") + else: + self.cell.append("[") + + def depart_reference(self, node): + subdirectory = False + formatted_text = "" + + ## removing zreferences from the index file + if self.in_book_index and node.attributes['refuri'] == 'zreferences': + return + + if self.topic: + # Jupyter Notebook uses the target text as its id + uri_text = node.astext().replace(" ","-").lower() + SPECIALCHARS = [r"!", r"@", r"#", r"$", r"%", r"^", r"&", r"*", r"(", r")", r"[", r"]", r"{", + r"}", r"|", r":", r";", r",", r"?", r"'", r"’", r"–", r"`"] + for CHAR in SPECIALCHARS: + uri_text = uri_text.replace(CHAR,"") + uri_text = uri_text.replace("--","-") + uri_text = uri_text.replace(".-",".") + formatted_text = " \\ref{" + uri_text + "}" #Use Ref and Plain Text titles + else: + # if refuri exists, then it includes id reference + if "refuri" in node.attributes: + refuri = node["refuri"] + # add default extension(.ipynb) + if "internal" in node.attributes and node.attributes["internal"] == True: + #TODO: cross check this if else condition again with translate_all + if 'references#' in refuri: + label = refuri.split("#")[-1] + bibtex = self.cell.pop() + if len(self.cell) > 1 and "hyperlink" in self.cell[-1]: + self.cell.pop() + refuri = "reference-\\cite{" + label + self.add_bib_to_latex(self.output, True) + elif 'references' not in refuri: + if len(self.source_file_name.split('/')) >= 2 and self.source_file_name.split('/')[-2] and 'rst' not in self.source_file_name.split('/')[-2]: + subdirectory = self.source_file_name.split('/')[-2] + if subdirectory: refuri = subdirectory + "/" + refuri + hashIndex = refuri.rfind("#") + if hashIndex > 0: + refuri = refuri[0:hashIndex] + ".html" + refuri[hashIndex:] + else: + refuri = refuri + ".html" + if self.urlpath: + formatted_text = "]({})".format(self.urlpath + refuri) + else: + formatted_text = "]({})".format(refuri) + else: + refuri = self.add_extension_to_inline_link(refuri, self.default_ext) + else: + # in-page link + if "refid" in node: + refid = node["refid"] + self.inpage_reference = True + #markdown doesn't handle closing brackets very well so will replace with %28 and %29 + #ignore adjustment when targeting pdf as pandoc doesn't parse %28 correctly + refid = refid.replace("(", "%28") + refid = refid.replace(")", "%29") + #markdown target + refuri = "#{}".format(refid) + # error + else: + self.error("Invalid reference") + refuri = "" + + #TODO: review if both %28 replacements necessary in this function? + # Propose delete above in-link refuri + #TODO: show these checks while pushing to list as well? + if 'reference-' in refuri: + formatted_text = refuri.replace("reference-","") + "}" + elif "refuri" in node.attributes and "internal" in node.attributes and node.attributes["internal"] == True and "references" not in node["refuri"]: + ##### Below code, constructs an index file for the book + if self.in_book_index: + if self.List.level > 1: + # if it is not the top level, then probably it is a chapter + formatted_text = "\\chapter{{{}}}\\input{{{}}}".format(node.astext(), node["refuri"] + ".tex") + else: + formatted_text = "\\cleardoublepage\\part{{{}}}".format(node.astext()) + + elif "refuri" in node.attributes and "http" in node["refuri"]: + ### handling extrernal links + formatted_text = "]({})".format(refuri) + elif self.inpage_reference: + labeltext = self.cell.pop() + # Check for Equations as they do not need labetext + if 'equation-' in refuri: + formatted_text = refuri + "}" + else: + formatted_text = refuri + "}{" + labeltext + "}" + + # if self.in_toctree: + # #TODO: this will become an internal link when making a single unified latex file + # formatted_text = " \\ref{" + refuri + "}" + # self.cell.append(formatted_text) + + if self.toctree: + formatted_text += "\n" + + ## if there is a list add to it, else add it to the cell directly + if self.List: + marker = self.List.get_marker() + self.List.add_item(formatted_text) + else: + self.cell.append(formatted_text) + + #References(End) + + def visit_target(self, node): + if "refid" in node.attributes: + refid = node.attributes["refid"] + if 'equation' in refid: + #no html targets when computing notebook to target pdf in labelled math + pass + else: + #set hypertargets for non math targets + if self.cell: + self.cell.append("\n\\hypertarget{" + refid + "}{}\n\n") + + def visit_title(self, node): + ### to remove the main title from ipynb as they are already added by metadata + if self.section_level == 1 and not self.topic: + return + + if self.visit_first_title: + title = node.astext() + self.visit_first_title = False + + if self.topic: + # this prevents from making it a subsection from section + if self.section_level == 1: + self.cell.append(self.syntax.visit_title(self.section_level)) + else: + self.cell.append(self.syntax.visit_title(self.section_level + 1)) + self.add_space() + elif self.Table: + self.Table.add_title(node) + else: + # this makes all the sections go up one level to transform subsections to sections + self.cell.append(self.syntax.visit_title(self.section_level - 1)) + self.add_space() + + def depart_title(self, node): + if not self.Table: + if self.section_level == 1 and not self.topic: + self.new_cell() + return + self.add_newparagraph() + + @classmethod + def add_bib_to_latex(self, nb, boolean): + ## add bib include value to the latex metadata object + latex_metadata = nb.get_metadata('latex_metadata', {}) + + latex_metadata['bib_include'] = boolean + nb.add_metadata_notebook(latex_metadata) + + + diff --git a/sphinxcontrib/jupyter/writers/utils.py b/sphinxcontrib/jupyter/writers/utils.py index b9377eb8..ebef2581 100644 --- a/sphinxcontrib/jupyter/writers/utils.py +++ b/sphinxcontrib/jupyter/writers/utils.py @@ -1,16 +1,16 @@ import os.path import os import sys -import nbformat.v4 from xml.etree.ElementTree import ElementTree from enum import Enum from sphinx.util.osutil import ensuredir from shutil import copy -if sys.version_info.major == 2: - import fnmatch +import nbformat.v4 + -class LanguageTranslator: + +class LanguageTranslator(object): """ Simple extensible translator for programming language names between Sphinx and Jupyter. @@ -21,7 +21,7 @@ class LanguageTranslator: The data itself is stored in an XML file within the templates directory configured in conf.py; deciding whether this is the most appropriate place to store that - information is a @todo + information is a @TODO By default, if there is no entry in the XML file for a given language, the translator will return the language it was given; this decision was predicated on the fact that @@ -86,9 +86,9 @@ def GetGeneratorFromClasses(obj, node): class_list = node.attributes['classes'] for item in class_list: - if item == "no-execute" and not obj.jupyter_ignore_no_execute: + if item == "no-execute": res["type"] = JupyterOutputCellGenerators.MARKDOWN - elif item == "skip-test" and not obj.jupyter_ignore_skip_test: + elif item == "skip-test": res["type"] = JupyterOutputCellGenerators.MARKDOWN elif item == "output": res["type"] = JupyterOutputCellGenerators.CODE_OUTPUT @@ -143,57 +143,31 @@ def _str_to_lines(x): return x -def copy_dependencies(builderSelf, outdir = None): - """ - Copies the dependencies of source files or folders specified in the config to their respective output directories - """ - if outdir is None: - outdir = builderSelf.outdir - else: - outdir = outdir - srcdir = builderSelf.srcdir - if 'jupyter_dependencies' in builderSelf.config and builderSelf.config['jupyter_dependencies'] is not None: - depenencyObj = builderSelf.config['jupyter_dependencies'] - for key, deps in depenencyObj.items(): - full_src_path = srcdir + "/" + key - if full_src_path.find('.') == -1: - ## handling the case of key being a directory - full_dest_path = outdir + "/" + key - ensuredir(full_dest_path) - for dep in deps: - copy(full_src_path + "/" + dep, full_dest_path,follow_symlinks=True) - elif os.path.isfile(full_src_path): - ## handling the case of key being a file - # removing the filename to get the directory path - index = key.rfind('/') - if index!=0 and index != -1: - key = key[0:index] - - full_src_path = srcdir + "/" + key - full_dest_path = outdir + "/" + key - for dep in deps: - copy(full_src_path + "/" + dep, full_dest_path,follow_symlinks=True) - - -def python27_glob(path, pattern): - matches = [] - for root, dirnames, filenames in os.walk(path): - for filename in fnmatch.filter(filenames, pattern): - matches.append(os.path.join(root, filename)) - return matches - -def get_list_of_files(dirName): + +def get_list_of_files(dirname): # create a list of file and sub directories # names in the given directory - list_of_file = os.listdir(dirName) + list_of_file = os.listdir(dirname) all_files = list() # Iterate over all the entries for entry in list_of_file: # Create full path - full_path = os.path.join(dirName, entry) + full_path = os.path.join(dirname, entry) # If entry is a directory then get the list of files in this directory if os.path.isdir(full_path): all_files = all_files + get_list_of_files(full_path) else: all_files.append(full_path) - return all_files \ No newline at end of file + return all_files + +def get_subdirectory_and_filename(filename): + """ + Gets the subdirectory path and the filename from the full path + """ + subdirectory = '' + index = filename.rfind('/') + if index > 0: + subdirectory = filename[0:index] + filename = filename[index + 1:] + + return subdirectory, filename \ No newline at end of file diff --git a/tests/Makefile b/tests/Makefile index b014b645..73932894 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -7,11 +7,16 @@ SHELL := bash SPHINXOPTS = SPHINXBUILD = python -msphinx SPHINXPROJ = sphinxcontrib-jupyter-testcases -#BASE +#JupyterCodeBlockTranslator +SOURCEEXECUTE = execute/ +BUILDEXECUTE = execute/_build +#BASE JupyterIPYNBTranlsator SOURCEDIR = base/ BUILDDIR = base/_build -BUILDCOVERAGE = base/_build_coverage -#PDF +#PDF JupyterHTMLTranslator +SOURCEHTML = html/ +BUILDHTML = html/_build/ +#PDF JupyterPDFTranslator SOURCEPDF = pdf/ BUILDPDF = pdf/_build/ #no_inline_exercises @@ -24,58 +29,36 @@ CORES = 4 help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -.PHONY: help Makefile pdf rst-test +.PHONY: help Makefile pdf rst-test code -test: clean clean-pdf jupyter pdf - python check_diffs.py +test: test-base test-execute test-no-inline -no-inline: - @$(SPHINXBUILD) -M jupyter "$(SOURCENOEX)" "$(BUILDNOEX)" $(SPHINXOPTS) $(O) - -clean-no-inline: - rm -rf $(BUILDNOEX) - -no-inline-test: clean-no-inline no-inline - python check_diffs.py +test-base: clean jupyter + python check_diffs.py base -preview: -ifneq (,$(filter $(parallel),website Website)) - cd _build/jupyter_html/ && python -m http.server -else -ifdef lecture - cd _build/jupyter/ && jupyter notebook $(basename $(lecture)).ipynb -else - cd _build/jupyter/ && jupyter notebook -endif -endif +test-execute: clean-execute build-execute + python check_diffs.py execute -clean-coverage: - rm -rf $(BUILDCOVERAGE) +test-no-inline: clean-no-inline no-inline + python check_diffs.py no_inline_exercises -clean-website: - rm -rf $(BUILDWEBSITE) +build-execute: + @$(SPHINXBUILD) -M jupyter "$(SOURCEEXECUTE)" "$(BUILDEXECUTE)" $(SPHINXOPTS) $(O) -clean-pdf: - rm -rf $(BUILDPDF) +build-no-inline: + @$(SPHINXBUILD) -M jupyter "$(SOURCENOEX)" "$(BUILDNOEX)" $(SPHINXOPTS) $(O) -#### TODO - Add these back in once test cases are setup #### -# coverage: -# ifneq (,$(filter $(parallel),true True)) -# @$(SPHINXBUILD) -M jupyter "$(SOURCEDIR)" "$(BUILDCOVERAGE)" $(SPHINXOPTS) $(O) -D jupyter_make_coverage=1 -D jupyter_execute_notebooks=1 -D jupyter_ignore_skip_test=0 -D jupyter_number_workers=$(CORES) -# else -# @$(SPHINXBUILD) -M jupyter "$(SOURCEDIR)" "$(BUILDCOVERAGE)" $(SPHINXOPTS) $(O) -D jupyter_make_coverage=1 -D jupyter_execute_notebooks=1 -D jupyter_ignore_skip_test=0 -# endif +clean-execute: + rm -rf $(BUILDEXECUTE) -# website: -# ifneq (,$(filter $(parallel),true True)) -# @$(SPHINXBUILD) -M jupyter "$(SOURCEDIR)" "$(BUILDWEBSITE)" $(SPHINXOPTS) $(O) -D jupyter_make_site=1 -D jupyter_generate_html=1 -D jupyter_download_nb=1 -D jupyter_execute_notebooks=1 -D jupyter_coverage_dir=$(BUILDCOVERAGE) -D jupyter_number_workers=$(CORES) -# else -# @$(SPHINXBUILD) -M jupyter "$(SOURCEDIR)" "$(BUILDWEBSITE)" $(SPHINXOPTS) $(O) -D jupyter_make_site=1 -D jupyter_generate_html=1 -D jupyter_download_nb=1 -D jupyter_execute_notebooks=1 -D jupyter_coverage_dir=$(BUILDCOVERAGE) -# endif +clean-no-inline: + rm -rf $(BUILDNOEX) +clean-html: + rm -rf $(BUILDHTML) -pdf: - @$(SPHINXBUILD) -M jupyterpdf "$(SOURCEPDF)" "$(BUILDPDF)" $(SPHINXOPTS) $(O) -D jupyter_number_workers=$(CORES) +clean-pdf: + rm -rf $(BUILDPDF) # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). diff --git a/tests/base/conf.py b/tests/base/conf.py index d3423f20..76d0490d 100644 --- a/tests/base/conf.py +++ b/tests/base/conf.py @@ -176,12 +176,7 @@ # sphinxcontrib-jupyter Configuration Settings # -------------------------------------------- -# Conversion Mode Settings -# If "all", convert codes and texts into jupyter notebook -# If "code", convert code-blocks only -jupyter_conversion_mode = "all" - -jupyter_write_metadata = False +jupyter_execute = False # Location for _static folder jupyter_static_file_path = ["_static"] @@ -192,32 +187,62 @@ 'exercises' : ['footnotes'], } -# Configure Jupyter Kernels -jupyter_kernels = { - "python3": { - "kernelspec": { - "display_name": "Python", - "language": "python3", - "name": "python3" - }, - "file_extension": ".py", - }, -} - # Default language for Jupyter notebooks -jupyter_default_lang = "python3" - -# Prepend a Welcome Message to Each Notebook -jupyter_welcome_block = "welcome.rst" +jupyter_language = "python3" # Solutions Configuration -jupyter_drop_solutions = True +jupyter_solution_notebook = True # Tests configurations jupyter_drop_tests = True # Add Ipython, Pycon and python as language synonyms -jupyter_lang_synonyms = ["ipython", "python", "pycon", "ipython3"] +jupyter_language_synonyms = ["ipython", "python", "pycon", "ipython3"] exercise_include_exercises = True exercise_inline_exercises = True + +# Location of template folder for coverage reports +jupyter_coverage_template = False + +# html template specific to your website needs +jupyter_html_template = "" + +# latex template specific to your website needs +jupyter_template_latex = "" + +#force markdown image inclusion +jupyter_images_markdown = True + +#This is set true by default to pass html to the notebooks +jupyter_allow_html_only=True + +## Theme specific variables +jupyter_theme = 'theme' + +### pdf options +jupyter_pdf_logo = "_static/img/qe-menubar-logo.png" + +jupyter_bib_file = "_static/quant-econ" + +jupyter_pdf_author = "Thomas J. Sargent and John Stachurski" + +# Exclude Document Patterns for PDF Construction +jupyter_pdf_excludepatterns = ["404", "index", "references"] + +# Set urlpath for html links in documents +jupyter_pdf_urlpath = "https://lectures.quantecon.org/py/" + +# make book +jupyter_pdf_book = True + +# book title +jupyter_pdf_book_title = "Quantitative Economics with Python" + +# pdf book name +jupyter_pdf_book_name = "quantitative_economics_with_python" + +# pdf toc file +jupyter_pdf_book_index = "index" + +jupyter_execute_allow_errors = True diff --git a/tests/base/index.rst b/tests/base/index.rst index 00ca868c..39fd2ad4 100644 --- a/tests/base/index.rst +++ b/tests/base/index.rst @@ -32,6 +32,7 @@ Welcome to sphinxcontrib-jupyter.minimal's documentation! simple_notebook slides solutions + syntax tables tests diff --git a/tests/base/tables.rst b/tests/base/tables.rst index 15f78f06..894c8cf7 100644 --- a/tests/base/tables.rst +++ b/tests/base/tables.rst @@ -32,6 +32,17 @@ False True False True True True ===== ===== ======= +Math Tables +----------- + +============= ============================================ =========== +Treat Random Math Description +============= ============================================ =========== +Albatross :math:`n_0(s_0, b_0)` On a stick! +Crunchy Frog :math:`\beta \sum_{s=1}^S \Pi(s | s_0) x(s)` On a stick! +Gannet Ripple :math:`s \in [1,\ldots, S]` On a stick! +============= ============================================ =========== + Directive Table Types --------------------- diff --git a/tests/base/theme/static/css/python.css b/tests/base/theme/static/css/python.css new file mode 100644 index 00000000..8ec46e7c --- /dev/null +++ b/tests/base/theme/static/css/python.css @@ -0,0 +1,781 @@ + +/* python.css v1.0 */ + +/*! normalize.css v8.0.1 | MIT License | github.com/necolas/normalize.css */html{line-height:1.15;-webkit-text-size-adjust:100%}body{margin:0}main{display:block}h1{font-size:2em;margin:.67em 0}hr{box-sizing:content-box;height:0;overflow:visible}pre{font-family:monospace,monospace;font-size:1em}a{background-color:transparent}abbr[title]{border-bottom:none;text-decoration:underline;text-decoration:underline dotted}b,strong{font-weight:bolder}code,kbd,samp{font-family:monospace,monospace;font-size:1em}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}img{border-style:none}button,input,optgroup,select,textarea{font-family:inherit;font-size:100%;line-height:1.15;margin:0}button,input{overflow:visible}button,select{text-transform:none}[type=button],[type=reset],[type=submit],button{-webkit-appearance:button}[type=button]::-moz-focus-inner,[type=reset]::-moz-focus-inner,[type=submit]::-moz-focus-inner,button::-moz-focus-inner{border-style:none;padding:0}[type=button]:-moz-focusring,[type=reset]:-moz-focusring,[type=submit]:-moz-focusring,button:-moz-focusring{outline:1px dotted ButtonText}fieldset{padding:.35em .75em .625em}legend{box-sizing:border-box;color:inherit;display:table;max-width:100%;padding:0;white-space:normal}progress{vertical-align:baseline}textarea{overflow:auto}[type=checkbox],[type=radio]{box-sizing:border-box;padding:0}[type=number]::-webkit-inner-spin-button,[type=number]::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}details{display:block}summary{display:list-item}template{display:none}[hidden]{display:none} +/*! HTML5 Boilerplate v7.2.0 | MIT License | https://html5boilerplate.com/ */html{color:#222;font-size:1em;line-height:1.4}::-moz-selection{background:#b3d4fc;text-shadow:none}::selection{background:#b3d4fc;text-shadow:none}hr{display:block;height:1px;border:0;border-top:1px solid #ccc;margin:1em 0;padding:0}audio,canvas,iframe,img,svg,video{vertical-align:middle}fieldset{border:0;margin:0;padding:0}textarea{resize:vertical}.browserupgrade{margin:.2em 0;background:#ccc;color:#000;padding:.2em 0}.hidden{display:none!important}.sr-only{border:0;clip:rect(0,0,0,0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;white-space:nowrap;width:1px}.sr-only.focusable:active,.sr-only.focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;white-space:inherit;width:auto}.invisible{visibility:hidden}.clearfix:after,.clearfix:before{content:" ";display:table}.clearfix:after{clear:both}@media print{*,:after,:before{background:0 0!important;color:#000!important;-webkit-box-shadow:none!important;box-shadow:none!important;text-shadow:none!important}a,a:visited{text-decoration:underline}a[href]:after{content:" (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) ")"}a[href^="#"]:after,a[href^="javascript:"]:after{content:""}pre{white-space:pre-wrap!important}blockquote,pre{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}h2,h3,p{orphans:3;widows:3}h2,h3{page-break-after:avoid}} + +body { + color:#444; + background: #B8BAC3; + font-family: "Source Sans Pro", sans-serif; + font-size:1rem; + line-height: 1.4; + text-rendering: optimizeLegibility; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; +} + +h1, h2, h3, h4, h5 { + font-weight: normal; + font-family: 'Droid Serif', serif; +} +h1 { + font-size:2em; + color: #333; +} +h2 { + font-size: 1.7em; +} +h3 { + font-size:1.4em; +} +h4 { + font-size:1.2em; + font-family: "Source Sans Pro", sans-serif; + color: #000; +} + +strong, b { + font-weight: 700; +} + +li { + margin: 0.5em 0; +} + +a { + color:#0072bc; + text-decoration: none; + transition: all .15s linear; + overflow-wrap: break-word; +} +a:link {} +a:hover { + color:#004979; + text-decoration: underline; +} +a:active {} +a:visited { + color:#004979; +} + +pre { + white-space: pre-wrap; + word-wrap: break-word; +} + +cite, code, tt { + font-family: 'Source Code Pro', monospace; + letter-spacing: 0.01em; + background-color: #efefef; + font-style: normal; + border: 1px dotted #cccccc; + border-radius: 2px; + padding: 0 2px; + font-size: 0.9em; + overflow-wrap: break-word; +} + +.content .table-container { + overflow-x: scroll; +} +.content table { + max-width: 100%; + border-collapse: collapse; + border:0; + background-color: transparent; +} +.content table tbody tr:nth-child(odd) { + background-color: #f7f7f7; +} +.content table td, .content table th { + padding: .25rem 0.75rem; + text-align: left; + vertical-align: top; + border:0; +} +.content table th { + font-weight: bold; +} +.content table thead tr th { + text-align:left !important; +} +.content table thead th, .content table thead td { + vertical-align: bottom; + border:0; + border-top:0; + border-bottom: 1px solid #e1e1e1; +} + +.wrapper { + width: 1024px; + box-shadow: 0px 0px 5px 0px rgba(50, 50, 50, 0.4); + margin: 20px auto 23px auto; + position: relative; + background: #fff; +} + +.content { + padding: 2rem 4rem; + position: relative; + max-width: 1024px; + margin: 0 auto; + box-sizing: border-box; +} + + +/* Header */ + +.header { + background:#23262c; + color:#fff; + padding: 2.5rem 4rem; + display: flex; + justify-content: space-between; +} + +.branding { + font-family: 'Droid Serif', serif; + background:url(/_static/img/logo.png) no-repeat left 5px; + padding:0 0 0 65px; +} + +.site-title { + font-size:1.5em; + margin:0 0 15px 0; + line-height: 1; +} +.site-title a { + color:#fff; + text-decoration: none; +} +.site-title a span { + display: block; + font-size: 1rem; + line-height: 1.5; +} + +.site-authors { + list-style: none; + padding:0 0 0 5px; + margin:0; + font-size:0.9em; +} +.site-authors li { + display:inline-block; + margin:0 1rem 0 0; +} +.site-authors li a { + color:#fff;text-decoration: none; +} + +.site-search { + display:block; + margin:0; + width: 215px; +} +.search-searchbox { + margin: 2rem 1rem; +} +.site-search .gsc-input { + background: transparent !important; + color:#fff; + font-size: 0.9rem; +} +.site-search .gsc-input-box { + background: transparent !important; + border:1px solid #a1a1a1; + border-radius:2px; +} +.site-search .gsib_b { + display: none; +} +.site-search td.gsc-search-button { + background: url(/_static/img/search-icon.png) no-repeat left top; + background-size: 16px 16px; + background-position: 0 5px; + cursor: pointer; +} +.site-search button.gsc-search-button { + border: 0; + width: 16px; + height: 16px; + padding: 0px 0px; + outline: 0; + opacity: 0; + cursor: pointer; +} +.search-searchbox td.gsc-search-button { + background: url(/_static/img/search-icon.png) no-repeat left top; + background-size: 16px 16px; + background-position: center center; + border: 1px solid #898989; + background-color: #23262C; +} +.search-searchbox button.gsc-search-button { + border: 0; + width: 60px; + height: 26px; + padding: 0px 0px; + outline: 0; + opacity: 0; + display: block; + margin: 0; +} +.gsc-input::-webkit-input-placeholder { + opacity:0; +} +.gsc-input::-moz-placeholder { + opacity:0; +} +.gsc-input:-ms-input-placeholder { + opacity:0; +} +.gsc-input:-moz-placeholder { + opacity:0; +} + +.header-badge { + margin: 10px 30px 0 0; + opacity: 0.8; + display: flex; + justify-content: flex-end; +} + + +/* Homepage */ + +.home-intro { + display: flex; + align-content: center; +} +.home-detail { + +} +.home-blurb { + font-size: 1.1rem; + line-height: 1.5; +} +.home-intro .sponsor { + list-style: none; + padding:0; + flex-shrink: 0; + margin:0 60px 0 4rem; + text-align: center; +} +.home-intro .sponsor li { + display: block; + margin:1rem 0; + padding:0; +} +.web-version { + display:inline-block; + padding: 2rem 0rem; +} +.web-version a { + display: block; + color: #23262c; + border:1px solid #23262c; + border-radius: 5px; + padding:1rem 40px 1rem 120px; + position: relative; +} +.web-version a:hover { + border-color: #23262c; + text-decoration: none; + background:#23262c; + color:#fff; +} +.web-version a:hover p { + color: #eee; +} +.web-version a .thumb { + position: absolute; + left:40px; + top:1rem; + font-size:3.5rem; + color: #23262c; +} +.web-version a .thumb img { + width:50px; +} +.web-version a h2 { + line-height: 1; + margin:0; + font-size: 1.4rem; +} +.web-version a p { + margin:10px 0 0 0; + color: #444; +} + +.home-alternatives { + padding: 1rem 0rem; +} +.home-alternatives ul { + list-style: none; + padding:0; + margin:0 0; +} +.home-alternatives li { + padding:0; + margin:1rem 1rem; +} +.home-alternatives li a { + display: block; + color: #23262c; + border:1px solid #ddd; + border-radius: 5px; + padding:1rem 25px 1rem 75px; + position: relative; +} +.home-alternatives li a:hover { + border-color: #23262c; + text-decoration: none; + background:#23262c; + color:#fff; +} +.home-alternatives li a:hover svg { + color: #fff; +} +.home-alternatives li a:hover p { + color: #eee; +} +.home-alternatives li a svg { + position: absolute; + left:25px; + top:1rem; + font-size:2rem; + color: #23262c; +} +.home-alternatives li a h3 { + line-height: 1; + margin:0; + font-size: 1.2rem; +} +.home-alternatives li a p { + margin:10px 0 0 0; + color: #444; +} + + +/* Footer */ + +.footer { + font-size:0.8em; + color:#898989; + background:#f0f0f0; + border-top: 1px dashed #BFBFBF; + position: relative; + padding: 2rem 4rem; + max-width: 1024px; + margin: 0 auto; + box-sizing: border-box; +} +.footer a { + color:#898989; +} +.footer .logo { + float:right; + margin:0 0 1rem 1rem; +} +.footer .logo img { + width: 140px; +} + + +/* Floating page tools */ + +.page-tools { + position: fixed; + bottom:0px; + width:100%; + z-index:99; + font-size: 0.8em; +} +.page-tools ul { + margin: 0 auto; + padding: 0; + list-style: none; + max-width: 1024px; +} +.page-tools a, .page-tools .title { + color: #fff; + text-decoration: none; + display: block; + padding: 3px 5px 0px 5px; + background:#1B95E0; + height:20px; +} +.page-tools li { + float:right; + margin:0; +} +.page-tools .icon { + font-size: 1.2em; + position: relative; + top: 2px; +} +.page-tools .top a { + background: #23262c; +} + + +/* Content area */ + +.lecture-options { + display: flex; + justify-content: space-between; + margin:1rem 0; + padding: 7px 1rem 5px 1rem; + border: 1px solid #ddd; + background: #f8f8f8; + border-radius: 2px; +} +.lecture-options ul { + list-style: none; + margin:0; + padding:0; + display: flex; +} + +.lecture-options li { + margin: 0 0.5rem; +} +.lecture-options li a { + display: block; + color: #444; + font-size: 0.8rem; +} +.lecture-options li a:hover { + color: #004979; + text-decoration: none; +} +.lecture-options li a svg { + margin:0 3px 0 0; + color: #444; +} +.lecture-options li a:hover svg { + color: #0072bc; +} + +#qe-notebook-header { + display: none; +} + +.internal em { + font-style: normal; +} + +.footnote-reference { + vertical-align: super; + font-size: 0.9em; + line-height: 1; +} + +.index-tabs { + margin:3rem 0; + list-style: none; + padding:0; + display: flex; + border:1px solid #DEDEE2; + border-width: 0 0 1px 1px; + border-radius: 5px 5px 0 0; +} +.index-tabs li { + position: relative; + bottom:-1px; + margin:0; +} +.index-tabs li a { + border-width:1px 1px 1px 0; + border-radius: 5px 5px 0 0; + padding:1rem 2rem; + color: #444; + font-weight: 700; + display: block; + border:1px solid #DEDEE2; + background-color: #F2F2F6; +} +.index-tabs li a:hover { + background-color: #fbfbfb; + text-decoration: none; +} +.index-tabs li.active a { + border-bottom:1px solid #fff; + background-color: #fff; + font-weight: 700; +} + +.ml-a { + margin-left: auto !important; +} + +.breadcrumbs { + background: #f8f8f8; + padding:0.5rem 2rem; + font-size: 0.9em; + border-bottom: 1px solid #ddd; +} +.breadcrumbs ul { + list-style: none; + margin:0; + padding:0; +} +.breadcrumbs ul li { + margin:0 10px 0 0; + display:inline-block; +} +.breadcrumbs ul li a { + margin:0 10px 0 0; +} + +div[class^='collapse'] .highlight { + height: 22.4em; + overflow: hidden; + margin-bottom: 0; +} +div[class^='collapse'].expanded .highlight { + height:auto; +} +div[class^='collapse'] .highlight:after { + content : ""; + position : absolute; + z-index : 1; + bottom : 0; + left : 0; + pointer-events : none; + background: url(/_static/img/code-block-fade.png) repeat-x bottom left; + width : 100%; + height : 100%; +} +div[class^='collapse'].expanded .highlight:after { + content: none; +} +div[class^='collapse'] .toggle { + display: block; + border: 1px solid #ddd; + border-width: 0px 1px 1px 1px; + padding: 0.5rem 25px; + outline: 0; + position: relative; + text-align: center; +} +div[class^='collapse'] .toggle:hover { + text-decoration: none; + background: #f7f7f7; +} +div[class^='collapse'] .toggle span { + color: #444; + position: relative; + top: 3px; + left: -5px; +} +div[class^='collapse'] .toggle em { + font-style: normal; +} +div.collapse-5 .highlight {height:7em;}div.collapse-6 .highlight {height:8.4em;}div.collapse-7 .highlight {height:9.8em;}div.collapse-8 .highlight {height:11.2em;}div.collapse-9 .highlight {height:12.6em;}div.collapse-10 .highlight {height:14em;}div.collapse-11 .highlight {height:15.4em;}div.collapse-12 .highlight {height:16.8em;}div.collapse-13 .highlight {height:18.2em;}div.collapse-14 .highlight {height:19.6em;}div.collapse-15 .highlight {height:21em;}div.collapse-16 .highlight {height:22.4em;}div.collapse-17 .highlight {height:23.8em;}div.collapse-18 .highlight {height:25.2em;}div.collapse-19 .highlight {height:26.6em;}div.collapse-20 .highlight {height:28em;}div.collapse-21 .highlight {height:29.4em;}div.collapse-22 .highlight {height:30.8em;}div.collapse-23 .highlight {height:32.2em;}div.collapse-24 .highlight {height:33.6em;}div.collapse-25 .highlight {height:35em;} + +.status-table-container { + /* overflow-x: scroll; */ +} +.status-table { + width:100%; + border:0; +} +.status-table tr:nth-of-type(even) { + background-color: #f9f9f9; +} +.status-table tr th { + /* display: none; */ + vertical-align: bottom; + border-bottom: 2px solid #ddd; + padding: 8px; + line-height: 1.42857143; + text-align: left; + font-weight: bold; +} +.status-table tr td { + /* display: block; */ + padding: 8px; + line-height: 1.42857143; + vertical-align: top; + border-top: 1px solid #ddd; +} + +.contents {padding: 0 !important; border: 1px solid #ddd !important; border-width: 0 0 0 1px !important;margin:0 0 0 20px !important;} +.contents .topic-title {display:none;} +.contents>ul {list-style: none;padding:0;} +.contents li {margin:0;} +.contents>ul>li>a {display:none;} +.contents>ul>li>ul {list-style: disc;} +.contents>ul>li>ul>li>ul {display:none;} + +/* Lecture TOC */ +#Contents {border: 0;clip: rect(0 0 0 0);height: 1px;margin: -1px;overflow: hidden;padding: 0;position: absolute;width: 1px;} +#Contents + ul {list-style:none;padding: 0 !important; border: 1px solid #ddd !important; border-width: 0 0 0 1px !important;margin:0 0 0 20px !important;} +#Contents + ul>li {margin:0;} +#Contents + ul>li>a {display: none;} +#Contents + ul>li>ul {list-style: disc;} +#Contents + ul>li>ul>li {margin:0;} + +/* Lecture heading anchor links */ +.anchor-link {visibility: hidden;text-decoration: none;color:#555;margin-left: 6px;padding: 0 4px 0 4px;font-family: "Source Sans Pro", sans-serif;font-size: 0.8em;} +.anchor-link:hover {color:#555;} +*:hover>.anchor-link {visibility: visible;} + +.cell .input, .cell .output {position: relative;} +.cell .output .prompt, .cell .input .prompt {visibility:hidden;position: absolute;top:0rem;left: -55px;width:45px;} +.cell .output .prompt:before, .cell .input .prompt:before {visibility:visible;position:absolute;top:0rem;right:0;content:"Out"; font-weight: bold;font-size: 16px;text-align: right;color:#D84315;font-family:monospace, serif;font-weight: 400;} +.cell .input .prompt:before {content:"In";color:#303F9F;top:0.25rem;} + +div.content-table ~ table {border: 0;border-collapse: collapse;} +div.content-table ~ table td, div.content-table ~ table th {padding: 1px 8px 1px 5px;border-top: 0;border-left: 0;border-right: 0;border-bottom: 1px solid #aaa;} + +.headerlink {visibility: hidden;text-decoration: none;color:#555;margin-left: 6px;padding: 0 4px 0 4px;font-family: "Source Sans Pro", sans-serif;font-size: 0.8em;} +.headerlink:hover {color:#555;} +*:hover>.headerlink {visibility: visible;} + +.rendered_html img {max-width: 100%;display: block;margin: 0 auto;} +.output_png img {max-width: 100%;display: block;margin: 0 auto;} + +.math {color:#333;margin: 2em 0;} +a .math {color: #0072bc;} +span.math {font-size:0.92rem;} +.MathJax {color:#333;margin: 2em 0;} +a .MathJax {color: #0072bc;} +span.MathJax {font-size:0.92rem;} + + +/* Device media styles */ + +@media only screen and (max-width: 1024px) { + .wrapper { + width:auto; + margin:0; + } +} + +@media only screen and (max-width: 768px) { + .header, .footer, .content, .home-alternatives { + padding-left: 2.5rem; + padding-right: 2.5rem; + } + .header-tools { + display: none; + } + .home-intro { + display: block; + } + .home-intro .sponsor { + margin:0 auto; + } + .web-version { + display:block; + } + .home-alternatives { + padding: 2rem 0; + } + .lecture-options { + flex-direction: column; + } + .lecture-options li { + margin:0.2rem 0; + } + .cell .output .prompt:before, .cell .input .prompt:before { + font-size: 14px; + } + .index-tabs { + flex-direction: column; + border-width: 0 0 1px 0; + } + .index-tabs li a { + border-width:1px 1px 0 1px; + border-radius: 0; + padding:0.5rem 1rem; + } + .lecture-options ul { + display: block; + } + .status-table-container { + overflow-x: scroll; + } + .status-table tr th { + display: none; + } + .status-table tr td { + display: block; + } +} + +/* Syntax highlighting */ +.highlight {padding:0 10px;border:1px solid #e1e1e1;margin:0.5rem 0;background:#f7f7f7;border-radius: 2px;} +.highlight {position: relative;} +.highlight:before {position: absolute;top:0.25rem;left:-40px;font-weight: bold;width:25px;text-align: left;color:#303F9F;font-family:monospace, serif;font-weight: 400;} +.highlight-none .highlight:before {content: "Out";color: #D84315;top:0rem;} +.highlight-none .highlight {background:#ffffff;border:0;padding:0;margin:0rem 0 1.5rem 0;} +.highlight pre {overflow-x: auto;white-space: pre;word-wrap: normal;margin:0.25rem 0;} +.highlight .hll { background-color: #ffffcc } +.highlight .c { color: #60a0b0; font-style: italic } /* Comment */ +.highlight .err { border: 1px solid #FF0000 } /* Error */ +.highlight .k { color: #007020; font-weight: bold } /* Keyword */ +.highlight .o { color: #666666 } /* Operator */ +.highlight .cm { color: #60a0b0; font-style: italic } /* Comment.Multiline */ +.highlight .cp { color: #007020 } /* Comment.Preproc */ +.highlight .c1 { color: #60a0b0; font-style: italic } /* Comment.Single */ +.highlight .cs { color: #60a0b0; background-color: #fff0f0 } /* Comment.Special */ +.highlight .gd { color: #A00000 } /* Generic.Deleted */ +.highlight .ge { font-style: italic } /* Generic.Emph */ +.highlight .gr { color: #FF0000 } /* Generic.Error */ +.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ +.highlight .gi { color: #00A000 } /* Generic.Inserted */ +.highlight .go { color: #888888 } /* Generic.Output */ +.highlight .gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */ +.highlight .gs { font-weight: bold } /* Generic.Strong */ +.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ +.highlight .gt { color: #0044DD } /* Generic.Traceback */ +.highlight .kc { color: #007020; font-weight: bold } /* Keyword.Constant */ +.highlight .kd { color: #007020; font-weight: bold } /* Keyword.Declaration */ +.highlight .kn { color: #007020; font-weight: bold } /* Keyword.Namespace */ +.highlight .kp { color: #007020 } /* Keyword.Pseudo */ +.highlight .kr { color: #007020; font-weight: bold } /* Keyword.Reserved */ +.highlight .kt { color: #902000 } /* Keyword.Type */ +.highlight .m { color: #40a070 } /* Literal.Number */ +.highlight .s { color: #4070a0 } /* Literal.String */ +.highlight .na { color: #4070a0 } /* Name.Attribute */ +.highlight .nb { color: #007020 } /* Name.Builtin */ +.highlight .nc { color: #0e84b5; font-weight: bold } /* Name.Class */ +.highlight .no { color: #60add5 } /* Name.Constant */ +.highlight .nd { color: #555555; font-weight: bold } /* Name.Decorator */ +.highlight .ni { color: #d55537; font-weight: bold } /* Name.Entity */ +.highlight .ne { color: #007020 } /* Name.Exception */ +.highlight .nf { color: #06287e } /* Name.Function */ +.highlight .nl { color: #002070; font-weight: bold } /* Name.Label */ +.highlight .nn { color: #0e84b5; font-weight: bold } /* Name.Namespace */ +.highlight .nt { color: #062873; font-weight: bold } /* Name.Tag */ +.highlight .nv { color: #bb60d5 } /* Name.Variable */ +.highlight .ow { color: #007020; font-weight: bold } /* Operator.Word */ +.highlight .w { color: #bbbbbb } /* Text.Whitespace */ +.highlight .mf { color: #40a070 } /* Literal.Number.Float */ +.highlight .mh { color: #40a070 } /* Literal.Number.Hex */ +.highlight .mi { color: #40a070 } /* Literal.Number.Integer */ +.highlight .mo { color: #40a070 } /* Literal.Number.Oct */ +.highlight .sb { color: #4070a0 } /* Literal.String.Backtick */ +.highlight .sc { color: #4070a0 } /* Literal.String.Char */ +.highlight .sd { color: #4070a0; font-style: italic } /* Literal.String.Doc */ +.highlight .s2 { color: #4070a0 } /* Literal.String.Double */ +.highlight .se { color: #4070a0; font-weight: bold } /* Literal.String.Escape */ +.highlight .sh { color: #4070a0 } /* Literal.String.Heredoc */ +.highlight .si { color: #70a0d0; font-style: italic } /* Literal.String.Interpol */ +.highlight .sx { color: #c65d09 } /* Literal.String.Other */ +.highlight .sr { color: #235388 } /* Literal.String.Regex */ +.highlight .s1 { color: #4070a0 } /* Literal.String.Single */ +.highlight .ss { color: #517918 } /* Literal.String.Symbol */ +.highlight .bp { color: #007020 } /* Name.Builtin.Pseudo */ +.highlight .vc { color: #bb60d5 } /* Name.Variable.Class */ +.highlight .vg { color: #bb60d5 } /* Name.Variable.Global */ +.highlight .vi { color: #bb60d5 } /* Name.Variable.Instance */ +.highlight .il { color: #40a070 } /* Literal.Number.Integer.Long */ + +/* CSS font colors for translated ANSI colors. */ +.ansi-bold,.ansibold{font-weight:700}.ansi-black-fg{color:#3E424D}.ansi-black-bg{background-color:#3E424D}.ansi-black-intense-fg{color:#282C36}.ansi-black-intense-bg{background-color:#282C36}.ansi-red-fg{color:#E75C58}.ansi-red-bg{background-color:#E75C58}.ansi-red-intense-fg{color:#B22B31}.ansi-red-intense-bg{background-color:#B22B31}.ansi-green-fg{color:#00A250}.ansi-green-bg{background-color:#00A250}.ansi-green-intense-fg{color:#007427}.ansi-green-intense-bg{background-color:#007427}.ansi-yellow-fg{color:#DDB62B}.ansi-yellow-bg{background-color:#DDB62B}.ansi-yellow-intense-fg{color:#B27D12}.ansi-yellow-intense-bg{background-color:#B27D12}.ansi-blue-fg{color:#208FFB}.ansi-blue-bg{background-color:#208FFB}.ansi-blue-intense-fg{color:#0065CA}.ansi-blue-intense-bg{background-color:#0065CA}.ansi-magenta-fg{color:#D160C4}.ansi-magenta-bg{background-color:#D160C4}.ansi-magenta-intense-fg{color:#A03196}.ansi-magenta-intense-bg{background-color:#A03196}.ansi-cyan-fg{color:#60C6C8}.ansi-cyan-bg{background-color:#60C6C8}.ansi-cyan-intense-fg{color:#258F8F}.ansi-cyan-intense-bg{background-color:#258F8F}.ansi-white-fg{color:#C5C1B4}.ansi-white-bg{background-color:#C5C1B4}.ansi-white-intense-fg{color:#A1A6B2}.ansi-white-intense-bg{background-color:#A1A6B2}.ansi-default-inverse-bg,.ansibgblack{background-color:#000}.ansi-default-inverse-fg{color:#FFF}.ansi-underline{text-decoration:underline}.ansi-inverse{outline:dotted .5px}.ansiblack{color:#000}.ansired{color:#8b0000}.ansigreen{color:#006400}.ansiyellow{color:#c4a000}.ansiblue{color:#00008b}.ansipurple{color:#9400d3}.ansicyan{color:#4682b4}.ansigray{color:gray}.ansibgred{background-color:red}.ansibggreen{background-color:green}.ansibgyellow{background-color:#ff0}.ansibgblue{background-color:#00f}.ansibgpurple{background-color:#ff00ff}.ansibgcyan{background-color:#0ff}.ansibggray{background-color:gray} diff --git a/tests/base/theme/static/img/code-block-fade.png b/tests/base/theme/static/img/code-block-fade.png new file mode 100644 index 00000000..c6d3fe16 Binary files /dev/null and b/tests/base/theme/static/img/code-block-fade.png differ diff --git a/tests/base/theme/static/img/coverage-not available-lightgrey.svg b/tests/base/theme/static/img/coverage-not available-lightgrey.svg new file mode 100644 index 00000000..6688c13d --- /dev/null +++ b/tests/base/theme/static/img/coverage-not available-lightgrey.svg @@ -0,0 +1 @@ +coveragecoveragenot availablenot available \ No newline at end of file diff --git a/tests/base/theme/static/img/execution-test-failing-red.svg b/tests/base/theme/static/img/execution-test-failing-red.svg new file mode 100644 index 00000000..02777bba --- /dev/null +++ b/tests/base/theme/static/img/execution-test-failing-red.svg @@ -0,0 +1 @@ +execution testexecution testfailingfailing \ No newline at end of file diff --git a/tests/base/theme/static/img/execution-test-not available-lightgrey.svg b/tests/base/theme/static/img/execution-test-not available-lightgrey.svg new file mode 100644 index 00000000..190e58f2 --- /dev/null +++ b/tests/base/theme/static/img/execution-test-not available-lightgrey.svg @@ -0,0 +1 @@ +execution testexecution testnot availablenot available \ No newline at end of file diff --git a/tests/base/theme/static/img/execution-test-passing-brightgreen.svg b/tests/base/theme/static/img/execution-test-passing-brightgreen.svg new file mode 100644 index 00000000..6c022f80 --- /dev/null +++ b/tests/base/theme/static/img/execution-test-passing-brightgreen.svg @@ -0,0 +1 @@ +execution testexecution testpassingpassing \ No newline at end of file diff --git a/tests/base/theme/static/img/favicon.ico b/tests/base/theme/static/img/favicon.ico new file mode 100644 index 00000000..2b8b7a48 Binary files /dev/null and b/tests/base/theme/static/img/favicon.ico differ diff --git a/tests/base/theme/static/img/github-icon.svg b/tests/base/theme/static/img/github-icon.svg new file mode 100644 index 00000000..4903d26b --- /dev/null +++ b/tests/base/theme/static/img/github-icon.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/tests/base/theme/static/img/jl-logo.png b/tests/base/theme/static/img/jl-logo.png new file mode 100644 index 00000000..277b9641 Binary files /dev/null and b/tests/base/theme/static/img/jl-logo.png differ diff --git a/tests/base/theme/static/img/jupyter-notebook-download-blue.svg b/tests/base/theme/static/img/jupyter-notebook-download-blue.svg new file mode 100644 index 00000000..f93e604d --- /dev/null +++ b/tests/base/theme/static/img/jupyter-notebook-download-blue.svg @@ -0,0 +1 @@ +jupyter notebookjupyter notebookdownloaddownload \ No newline at end of file diff --git a/tests/base/theme/static/img/jupyter-notebook-run-006400.svg b/tests/base/theme/static/img/jupyter-notebook-run-006400.svg new file mode 100644 index 00000000..2836d70f --- /dev/null +++ b/tests/base/theme/static/img/jupyter-notebook-run-006400.svg @@ -0,0 +1 @@ + jupyter notebookjupyter notebookrunrun \ No newline at end of file diff --git a/tests/base/theme/static/img/logo.png b/tests/base/theme/static/img/logo.png new file mode 100644 index 00000000..9a1de73a Binary files /dev/null and b/tests/base/theme/static/img/logo.png differ diff --git a/tests/base/theme/static/img/pdf-download-blue.svg b/tests/base/theme/static/img/pdf-download-blue.svg new file mode 100644 index 00000000..6aa2830b --- /dev/null +++ b/tests/base/theme/static/img/pdf-download-blue.svg @@ -0,0 +1 @@ + pdfpdfdownloaddownload \ No newline at end of file diff --git a/tests/base/theme/static/img/powered-by-NumFOCUS-orange.svg b/tests/base/theme/static/img/powered-by-NumFOCUS-orange.svg new file mode 100644 index 00000000..f8a53a36 --- /dev/null +++ b/tests/base/theme/static/img/powered-by-NumFOCUS-orange.svg @@ -0,0 +1 @@ +powered bypowered byNumFOCUSNumFOCUS \ No newline at end of file diff --git a/tests/base/theme/static/img/py-logo.png b/tests/base/theme/static/img/py-logo.png new file mode 100644 index 00000000..e1923604 Binary files /dev/null and b/tests/base/theme/static/img/py-logo.png differ diff --git a/tests/base/theme/static/img/qe-logo.png b/tests/base/theme/static/img/qe-logo.png new file mode 100644 index 00000000..4e231ed6 Binary files /dev/null and b/tests/base/theme/static/img/qe-logo.png differ diff --git a/tests/base/theme/static/img/qe-menubar-icons.png b/tests/base/theme/static/img/qe-menubar-icons.png new file mode 100644 index 00000000..e314a28a Binary files /dev/null and b/tests/base/theme/static/img/qe-menubar-icons.png differ diff --git a/tests/base/theme/static/img/qe-menubar-logo.png b/tests/base/theme/static/img/qe-menubar-logo.png new file mode 100644 index 00000000..2a10a73f Binary files /dev/null and b/tests/base/theme/static/img/qe-menubar-logo.png differ diff --git a/tests/base/theme/static/img/qe-menubar-logo.svg b/tests/base/theme/static/img/qe-menubar-logo.svg new file mode 100644 index 00000000..b1c80eb2 --- /dev/null +++ b/tests/base/theme/static/img/qe-menubar-logo.svg @@ -0,0 +1 @@ +qe-menubar-logo \ No newline at end of file diff --git a/tests/base/theme/static/img/qeco-logo.png b/tests/base/theme/static/img/qeco-logo.png new file mode 100644 index 00000000..cfdf7d67 Binary files /dev/null and b/tests/base/theme/static/img/qeco-logo.png differ diff --git a/tests/base/theme/static/img/search-icon.png b/tests/base/theme/static/img/search-icon.png new file mode 100644 index 00000000..6c8a97ff Binary files /dev/null and b/tests/base/theme/static/img/search-icon.png differ diff --git a/tests/base/theme/static/img/v-div.png b/tests/base/theme/static/img/v-div.png new file mode 100644 index 00000000..386a0b1c Binary files /dev/null and b/tests/base/theme/static/img/v-div.png differ diff --git a/tests/base/theme/static/js/python.js b/tests/base/theme/static/js/python.js new file mode 100644 index 00000000..0ce84d1b --- /dev/null +++ b/tests/base/theme/static/js/python.js @@ -0,0 +1,371 @@ +// python.js v1.0 + + +// Declare MathJax Macros for the Appropriate Macros +MathJax.Hub.Config({ + TeX: { + Macros: { + Var: "\\mathop{\\mathrm{Var}}", + trace: "\\mathop{\\mathrm{trace}}", + argmax: "\\mathop{\\mathrm{arg\\,max}}", + argmin: "\\mathop{\\mathrm{arg\\,min}}", + proj: "\\mathop{\\mathrm{proj}}", + col: "\\mathop{\\mathrm{col}}", + Span: "\\mathop{\\mathrm{span}}", + epsilon: "\\varepsilon", + EE: "\\mathbb{E}", + PP: "\\mathbb{P}", + RR: "\\mathbb{R}", + NN: "\\mathbb{N}", + ZZ: "\\mathbb{Z}", + aA: "\\mathcal{A}", + bB: "\\mathcal{B}", + cC: "\\mathcal{C}", + dD: "\\mathcal{D}", + eE: "\\mathcal{E}", + fF: "\\mathcal{F}", + gG: "\\mathcal{G}", + hH: "\\mathcal{H}", + } + } +}); +MathJax.Hub.Config({ + tex2jax: { + inlineMath: [ ['$','$'], ['\\(','\\)'] ], + processEscapes: true + } +}); + + +/* Collapsed code block */ + +const collapsableCodeBlocks = document.querySelectorAll("div[class^='collapse'] .highlight"); +for (var i = 0; i < collapsableCodeBlocks.length; i++) { + const toggleContainer = document.createElement('div'); + toggleContainer.innerHTML = 'Show more...'; + collapsableCodeBlocks[i].parentNode.insertBefore(toggleContainer, collapsableCodeBlocks[i].nextSibling); +} + +const collapsableCodeToggles = document.querySelectorAll("div[class^='collapse'] .toggle"); +for (var i = 0; i < collapsableCodeToggles.length; i++) { + collapsableCodeToggles[i].addEventListener('click', function(e) { + e.preventDefault(); + var codeBlock = this.closest('div[class^="collapse"]'); + if ( codeBlock.classList.contains('expanded') ) { + codeBlock.classList.remove('expanded'); + this.style.display = 'none'; + this.nextSibling.style.display = 'block'; + } else { + codeBlock.classList.add('expanded'); + this.style.display = 'none'; + this.previousSibling.style.display = 'block'; + } + }); +} + + +/* Wrap container around all tables allowing hirizontal scroll */ + +const contentTables = document.querySelectorAll(".content table"); +for (var i = 0; i < contentTables.length; i++) { + var wrapper = document.createElement('div'); + wrapper.classList.add('table-container'); + contentTables[i].parentNode.insertBefore(wrapper, contentTables[i]); + wrapper.appendChild(contentTables[i]); +} + + +/* Show compilation date on the homepage */ + +function timeConverter(UNIX_timestamp){ + var a = new Date(UNIX_timestamp * 1000); + var months = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']; + var year = a.getFullYear(); + var month = months[a.getMonth()]; + var date = a.getDate(); + var time = date + ' ' + month + ' ' + year ; + return time; +} + +if ( document.getElementById('compiled_date') ) { + var timestamp = document.getElementById('nb_date').innerHTML; + var compiled_date = timeConverter( timestamp ); + document.getElementById('compiled_date').innerHTML = compiled_date; +} + + +// Populate status page from code execution results JSON + +function loadCodeExecutionJSON(callback) { + var xobj = new XMLHttpRequest(); + xobj.overrideMimeType("application/json"); + xobj.open('GET', '_static/code-execution-results.json', true); // Replace 'appDataServices' with the path to your file + xobj.onreadystatechange = function () { + if (xobj.readyState == 4 && xobj.status == "200") { + // Required use of an anonymous callback as .open will NOT return a value but simply returns undefined in asynchronous mode + callback(xobj.responseText); + } + }; + xobj.send(null); +} + +if ( document.getElementById('status_table') ) { + + loadCodeExecutionJSON(function(response) { + // Parsing JSON string into object + var data = JSON.parse(response); + var status_data = []; + var last_test_time = data.run_time; + document.getElementById('last_test_time').textContent = last_test_time; + for (var key in data.results) + { + var new_record = {}; + new_record['name'] = data.results[key].filename; + new_record['runtime'] = data.results[key].runtime; + new_record['extension'] = data.results[key].extension; + new_record['result'] = data.results[key].num_errors; + new_record['language'] = data.results[key].language; + + status_data.push(new_record); + } + + // empty the table + var table = document.getElementById("status_table"); + while (table.firstChild) + table.removeChild(table.firstChild); + var rawHTML = "Lecture FileLanguageRunning Time"; + table.innerHTML = rawHTML; + // add the data + for (var i = 0; i < status_data.length; i ++) + { + var table = document.getElementById("status_table"); + var row = table.insertRow(-1); + row.setAttribute("id", status_data[i]['name'], 0); + + // Insert new cells ( elements) at the 1st and 2nd position of the "new" element: + var lectureCell = row.insertCell(0); + var langCell = row.insertCell(1); + var runtimeCell = row.insertCell(2); + var statusCell = row.insertCell(3); + var badge, status, color, lang, link; + + if (status_data[i]['result'] === 0) + { + status = "Passing"; + color = "brightgreen"; + } + else if (status_data[i]['result'] === 1) + { + status = "Failing"; + color = "red"; + } + else if (status_data[i]['result'] === -1) { + status = "Not available"; + color = "lightgrey"; + } + + link = '/' + status_data[i]['name'] + '.html'; + + badge = ''; + + // Add some text to the new cells: + lectureCell.innerHTML = status_data[i]['name']; + langCell.innerHTML = status_data[i]['language']; + runtimeCell.innerHTML = status_data[i]['runtime']; + statusCell.innerHTML = badge; + + + } + }) +} + + +// Show executability status badge in header + +const LECTURE_OK = 0; +const LECTURE_FAILED = 1; +const LECTURE_ERROR = -1; + +function update_page_badge(page_status) +{ + var badge = document.getElementById("executability_status_badge"); + var status, color; + + if (page_status === LECTURE_OK) + { + status = "Passing"; + color = "brightgreen"; + } + else if (page_status == LECTURE_FAILED) + { + status = "Failing"; + color = "red"; + } + else if (page_status == LECTURE_ERROR) + { + status = "Not available"; + color = "lightgrey"; + } + else + { + console.log("Panic! Invalid parameter passed to update_page_badge()."); + } + + badge.innerHTML = ''; + + //badge.style.display="block"; + + return; +} + +function determine_page_status(status_data) +{ + var path = window.location.pathname; + var filename_parts = path.split("/"); + var filename = filename_parts.pop(); + + var lecture_name = filename.split(".")[0].toLowerCase(); + + var res = LECTURE_ERROR; + + for (var i = 0; i < status_data.length; i ++) + { + if (status_data[i]['name'].split('/').pop() === lecture_name) + { + if (status_data[i]['result'] === 0) + { + res = LECTURE_OK; + } + else + { + res = LECTURE_FAILED; + } + } + } + return res; +} + +function load_this_page_badge() +{ + loadCodeExecutionJSON(function(response) { + // Parsing JSON string into object + var data = JSON.parse(response); + status_data = []; + for (var key in data.results) + { + var new_record = {}; + new_record['name'] = data.results[key].filename; + new_record['runtime'] = data.results[key].runtime; + new_record['extension'] = data.results[key].extension; + new_record['result'] = data.results[key].num_errors; + new_record['language'] = data.results[key].language; + status_data.push(new_record); + } + var page_status = determine_page_status(status_data); + update_page_badge(page_status); + }); +} + + + + + + +function get_badge(percentage) +{ + var color, badge; + + if (percentage > -1) + { + if ( percentage < 50 ) { + color = 'red'; + } else { + color = 'brightgreen'; + } + badge = 'https://img.shields.io/badge/Total%20coverage-' + percentage + '%25-' + color + '.svg'; + } else { + badge = 'https://img.shields.io/badge/Total%20coverage-not%20available-lightgrey.svg>'; + } + return badge; +} + +function load_percentages() +{ + var number_of_lectures = {}; + var number_which_passed = {}; + var keys_list = []; + var combined_percentage; + + loadCodeExecutionJSON(function(response) { + // Parsing JSON string into object + var data = JSON.parse(response); + for (var key in data.results) + { + if (data.results[key].num_errors === 0) + { + if (!(data.results[key].extension in number_which_passed)) + { + number_which_passed[data.results[key].extension] = 0; + keys_list.push(data.results[key].extension); + } + number_which_passed[data.results[key].extension] += 1; + } + + if (!(data.results[key].extension in number_of_lectures)) + { + number_of_lectures[data.results[key].extension] = 0; + } + number_of_lectures[data.results[key].extension] += 1; + } + + var percentages = {}; + var total_lectures = 0; + var total_passing = 0; + for (var k in keys_list) + { + key = keys_list[k]; + + percentages[key] = 0; + if (number_of_lectures[key] === 0) + { + // An appropriate value for this is yet to be determined. + percentages[key] = 100; + } + else + { + percentages[key] = Math.floor(100 * number_which_passed[key] / number_of_lectures[key]); + } + + // Sensible boundary checking. + if (percentages[key] < 0 || percentages[key] > 100) + { + percentages[key] = -1; + } + + total_lectures += number_of_lectures[key]; + total_passing += number_which_passed[key]; + } + + if (total_lectures === 0) + { + combined_percentage = 0; + } + else + { + combined_percentage = Math.floor(100 * total_passing / total_lectures); + } + + var badge = document.getElementById("coverage_badge"); + badge.innerHTML = ''; + + }); + +} + +if ( document.getElementById('executability_status_badge') ) { + load_this_page_badge(); +} + +if ( document.getElementById('coverage_badge') ) { + load_percentages(); +} \ No newline at end of file diff --git a/tests/base/theme/static/js/vendor/jquery-1.11.0.min.js b/tests/base/theme/static/js/vendor/jquery-1.11.0.min.js new file mode 100644 index 00000000..1f7e7211 --- /dev/null +++ b/tests/base/theme/static/js/vendor/jquery-1.11.0.min.js @@ -0,0 +1,4 @@ +/*! jQuery v1.11.0 | (c) 2005, 2014 jQuery Foundation, Inc. | jquery.org/license */ +!function(a,b){"object"==typeof module&&"object"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error("jQuery requires a window with a document");return b(a)}:b(a)}("undefined"!=typeof window?window:this,function(a,b){var c=[],d=c.slice,e=c.concat,f=c.push,g=c.indexOf,h={},i=h.toString,j=h.hasOwnProperty,k="".trim,l={},m="1.11.0",n=function(a,b){return new n.fn.init(a,b)},o=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,p=/^-ms-/,q=/-([\da-z])/gi,r=function(a,b){return b.toUpperCase()};n.fn=n.prototype={jquery:m,constructor:n,selector:"",length:0,toArray:function(){return d.call(this)},get:function(a){return null!=a?0>a?this[a+this.length]:this[a]:d.call(this)},pushStack:function(a){var b=n.merge(this.constructor(),a);return b.prevObject=this,b.context=this.context,b},each:function(a,b){return n.each(this,a,b)},map:function(a){return this.pushStack(n.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(d.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(0>a?b:0);return this.pushStack(c>=0&&b>c?[this[c]]:[])},end:function(){return this.prevObject||this.constructor(null)},push:f,sort:c.sort,splice:c.splice},n.extend=n.fn.extend=function(){var a,b,c,d,e,f,g=arguments[0]||{},h=1,i=arguments.length,j=!1;for("boolean"==typeof g&&(j=g,g=arguments[h]||{},h++),"object"==typeof g||n.isFunction(g)||(g={}),h===i&&(g=this,h--);i>h;h++)if(null!=(e=arguments[h]))for(d in e)a=g[d],c=e[d],g!==c&&(j&&c&&(n.isPlainObject(c)||(b=n.isArray(c)))?(b?(b=!1,f=a&&n.isArray(a)?a:[]):f=a&&n.isPlainObject(a)?a:{},g[d]=n.extend(j,f,c)):void 0!==c&&(g[d]=c));return g},n.extend({expando:"jQuery"+(m+Math.random()).replace(/\D/g,""),isReady:!0,error:function(a){throw new Error(a)},noop:function(){},isFunction:function(a){return"function"===n.type(a)},isArray:Array.isArray||function(a){return"array"===n.type(a)},isWindow:function(a){return null!=a&&a==a.window},isNumeric:function(a){return a-parseFloat(a)>=0},isEmptyObject:function(a){var b;for(b in a)return!1;return!0},isPlainObject:function(a){var b;if(!a||"object"!==n.type(a)||a.nodeType||n.isWindow(a))return!1;try{if(a.constructor&&!j.call(a,"constructor")&&!j.call(a.constructor.prototype,"isPrototypeOf"))return!1}catch(c){return!1}if(l.ownLast)for(b in a)return j.call(a,b);for(b in a);return void 0===b||j.call(a,b)},type:function(a){return null==a?a+"":"object"==typeof a||"function"==typeof a?h[i.call(a)]||"object":typeof a},globalEval:function(b){b&&n.trim(b)&&(a.execScript||function(b){a.eval.call(a,b)})(b)},camelCase:function(a){return a.replace(p,"ms-").replace(q,r)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()},each:function(a,b,c){var d,e=0,f=a.length,g=s(a);if(c){if(g){for(;f>e;e++)if(d=b.apply(a[e],c),d===!1)break}else for(e in a)if(d=b.apply(a[e],c),d===!1)break}else if(g){for(;f>e;e++)if(d=b.call(a[e],e,a[e]),d===!1)break}else for(e in a)if(d=b.call(a[e],e,a[e]),d===!1)break;return a},trim:k&&!k.call("\ufeff\xa0")?function(a){return null==a?"":k.call(a)}:function(a){return null==a?"":(a+"").replace(o,"")},makeArray:function(a,b){var c=b||[];return null!=a&&(s(Object(a))?n.merge(c,"string"==typeof a?[a]:a):f.call(c,a)),c},inArray:function(a,b,c){var d;if(b){if(g)return g.call(b,a,c);for(d=b.length,c=c?0>c?Math.max(0,d+c):c:0;d>c;c++)if(c in b&&b[c]===a)return c}return-1},merge:function(a,b){var c=+b.length,d=0,e=a.length;while(c>d)a[e++]=b[d++];if(c!==c)while(void 0!==b[d])a[e++]=b[d++];return a.length=e,a},grep:function(a,b,c){for(var d,e=[],f=0,g=a.length,h=!c;g>f;f++)d=!b(a[f],f),d!==h&&e.push(a[f]);return e},map:function(a,b,c){var d,f=0,g=a.length,h=s(a),i=[];if(h)for(;g>f;f++)d=b(a[f],f,c),null!=d&&i.push(d);else for(f in a)d=b(a[f],f,c),null!=d&&i.push(d);return e.apply([],i)},guid:1,proxy:function(a,b){var c,e,f;return"string"==typeof b&&(f=a[b],b=a,a=f),n.isFunction(a)?(c=d.call(arguments,2),e=function(){return a.apply(b||this,c.concat(d.call(arguments)))},e.guid=a.guid=a.guid||n.guid++,e):void 0},now:function(){return+new Date},support:l}),n.each("Boolean Number String Function Array Date RegExp Object Error".split(" "),function(a,b){h["[object "+b+"]"]=b.toLowerCase()});function s(a){var b=a.length,c=n.type(a);return"function"===c||n.isWindow(a)?!1:1===a.nodeType&&b?!0:"array"===c||0===b||"number"==typeof b&&b>0&&b-1 in a}var t=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s="sizzle"+-new Date,t=a.document,u=0,v=0,w=eb(),x=eb(),y=eb(),z=function(a,b){return a===b&&(j=!0),0},A="undefined",B=1<<31,C={}.hasOwnProperty,D=[],E=D.pop,F=D.push,G=D.push,H=D.slice,I=D.indexOf||function(a){for(var b=0,c=this.length;c>b;b++)if(this[b]===a)return b;return-1},J="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",K="[\\x20\\t\\r\\n\\f]",L="(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+",M=L.replace("w","w#"),N="\\["+K+"*("+L+")"+K+"*(?:([*^$|!~]?=)"+K+"*(?:(['\"])((?:\\\\.|[^\\\\])*?)\\3|("+M+")|)|)"+K+"*\\]",O=":("+L+")(?:\\(((['\"])((?:\\\\.|[^\\\\])*?)\\3|((?:\\\\.|[^\\\\()[\\]]|"+N.replace(3,8)+")*)|.*)\\)|)",P=new RegExp("^"+K+"+|((?:^|[^\\\\])(?:\\\\.)*)"+K+"+$","g"),Q=new RegExp("^"+K+"*,"+K+"*"),R=new RegExp("^"+K+"*([>+~]|"+K+")"+K+"*"),S=new RegExp("="+K+"*([^\\]'\"]*?)"+K+"*\\]","g"),T=new RegExp(O),U=new RegExp("^"+M+"$"),V={ID:new RegExp("^#("+L+")"),CLASS:new RegExp("^\\.("+L+")"),TAG:new RegExp("^("+L.replace("w","w*")+")"),ATTR:new RegExp("^"+N),PSEUDO:new RegExp("^"+O),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+K+"*(even|odd|(([+-]|)(\\d*)n|)"+K+"*(?:([+-]|)"+K+"*(\\d+)|))"+K+"*\\)|)","i"),bool:new RegExp("^(?:"+J+")$","i"),needsContext:new RegExp("^"+K+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+K+"*((?:-\\d)?\\d*)"+K+"*\\)|)(?=[^-]|$)","i")},W=/^(?:input|select|textarea|button)$/i,X=/^h\d$/i,Y=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,$=/[+~]/,_=/'|\\/g,ab=new RegExp("\\\\([\\da-f]{1,6}"+K+"?|("+K+")|.)","ig"),bb=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:0>d?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)};try{G.apply(D=H.call(t.childNodes),t.childNodes),D[t.childNodes.length].nodeType}catch(cb){G={apply:D.length?function(a,b){F.apply(a,H.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function db(a,b,d,e){var f,g,h,i,j,m,p,q,u,v;if((b?b.ownerDocument||b:t)!==l&&k(b),b=b||l,d=d||[],!a||"string"!=typeof a)return d;if(1!==(i=b.nodeType)&&9!==i)return[];if(n&&!e){if(f=Z.exec(a))if(h=f[1]){if(9===i){if(g=b.getElementById(h),!g||!g.parentNode)return d;if(g.id===h)return d.push(g),d}else if(b.ownerDocument&&(g=b.ownerDocument.getElementById(h))&&r(b,g)&&g.id===h)return d.push(g),d}else{if(f[2])return G.apply(d,b.getElementsByTagName(a)),d;if((h=f[3])&&c.getElementsByClassName&&b.getElementsByClassName)return G.apply(d,b.getElementsByClassName(h)),d}if(c.qsa&&(!o||!o.test(a))){if(q=p=s,u=b,v=9===i&&a,1===i&&"object"!==b.nodeName.toLowerCase()){m=ob(a),(p=b.getAttribute("id"))?q=p.replace(_,"\\$&"):b.setAttribute("id",q),q="[id='"+q+"'] ",j=m.length;while(j--)m[j]=q+pb(m[j]);u=$.test(a)&&mb(b.parentNode)||b,v=m.join(",")}if(v)try{return G.apply(d,u.querySelectorAll(v)),d}catch(w){}finally{p||b.removeAttribute("id")}}}return xb(a.replace(P,"$1"),b,d,e)}function eb(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLength&&delete b[a.shift()],b[c+" "]=e}return b}function fb(a){return a[s]=!0,a}function gb(a){var b=l.createElement("div");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function hb(a,b){var c=a.split("|"),e=a.length;while(e--)d.attrHandle[c[e]]=b}function ib(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&(~b.sourceIndex||B)-(~a.sourceIndex||B);if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function jb(a){return function(b){var c=b.nodeName.toLowerCase();return"input"===c&&b.type===a}}function kb(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function lb(a){return fb(function(b){return b=+b,fb(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function mb(a){return a&&typeof a.getElementsByTagName!==A&&a}c=db.support={},f=db.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return b?"HTML"!==b.nodeName:!1},k=db.setDocument=function(a){var b,e=a?a.ownerDocument||a:t,g=e.defaultView;return e!==l&&9===e.nodeType&&e.documentElement?(l=e,m=e.documentElement,n=!f(e),g&&g!==g.top&&(g.addEventListener?g.addEventListener("unload",function(){k()},!1):g.attachEvent&&g.attachEvent("onunload",function(){k()})),c.attributes=gb(function(a){return a.className="i",!a.getAttribute("className")}),c.getElementsByTagName=gb(function(a){return a.appendChild(e.createComment("")),!a.getElementsByTagName("*").length}),c.getElementsByClassName=Y.test(e.getElementsByClassName)&&gb(function(a){return a.innerHTML="
",a.firstChild.className="i",2===a.getElementsByClassName("i").length}),c.getById=gb(function(a){return m.appendChild(a).id=s,!e.getElementsByName||!e.getElementsByName(s).length}),c.getById?(d.find.ID=function(a,b){if(typeof b.getElementById!==A&&n){var c=b.getElementById(a);return c&&c.parentNode?[c]:[]}},d.filter.ID=function(a){var b=a.replace(ab,bb);return function(a){return a.getAttribute("id")===b}}):(delete d.find.ID,d.filter.ID=function(a){var b=a.replace(ab,bb);return function(a){var c=typeof a.getAttributeNode!==A&&a.getAttributeNode("id");return c&&c.value===b}}),d.find.TAG=c.getElementsByTagName?function(a,b){return typeof b.getElementsByTagName!==A?b.getElementsByTagName(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){return typeof b.getElementsByClassName!==A&&n?b.getElementsByClassName(a):void 0},p=[],o=[],(c.qsa=Y.test(e.querySelectorAll))&&(gb(function(a){a.innerHTML="",a.querySelectorAll("[t^='']").length&&o.push("[*^$]="+K+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||o.push("\\["+K+"*(?:value|"+J+")"),a.querySelectorAll(":checked").length||o.push(":checked")}),gb(function(a){var b=e.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&o.push("name"+K+"*[*^$|!~]?="),a.querySelectorAll(":enabled").length||o.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),o.push(",.*:")})),(c.matchesSelector=Y.test(q=m.webkitMatchesSelector||m.mozMatchesSelector||m.oMatchesSelector||m.msMatchesSelector))&&gb(function(a){c.disconnectedMatch=q.call(a,"div"),q.call(a,"[s!='']:x"),p.push("!=",O)}),o=o.length&&new RegExp(o.join("|")),p=p.length&&new RegExp(p.join("|")),b=Y.test(m.compareDocumentPosition),r=b||Y.test(m.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},z=b?function(a,b){if(a===b)return j=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===e||a.ownerDocument===t&&r(t,a)?-1:b===e||b.ownerDocument===t&&r(t,b)?1:i?I.call(i,a)-I.call(i,b):0:4&d?-1:1)}:function(a,b){if(a===b)return j=!0,0;var c,d=0,f=a.parentNode,g=b.parentNode,h=[a],k=[b];if(!f||!g)return a===e?-1:b===e?1:f?-1:g?1:i?I.call(i,a)-I.call(i,b):0;if(f===g)return ib(a,b);c=a;while(c=c.parentNode)h.unshift(c);c=b;while(c=c.parentNode)k.unshift(c);while(h[d]===k[d])d++;return d?ib(h[d],k[d]):h[d]===t?-1:k[d]===t?1:0},e):l},db.matches=function(a,b){return db(a,null,null,b)},db.matchesSelector=function(a,b){if((a.ownerDocument||a)!==l&&k(a),b=b.replace(S,"='$1']"),!(!c.matchesSelector||!n||p&&p.test(b)||o&&o.test(b)))try{var d=q.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return db(b,l,null,[a]).length>0},db.contains=function(a,b){return(a.ownerDocument||a)!==l&&k(a),r(a,b)},db.attr=function(a,b){(a.ownerDocument||a)!==l&&k(a);var e=d.attrHandle[b.toLowerCase()],f=e&&C.call(d.attrHandle,b.toLowerCase())?e(a,b,!n):void 0;return void 0!==f?f:c.attributes||!n?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},db.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},db.uniqueSort=function(a){var b,d=[],e=0,f=0;if(j=!c.detectDuplicates,i=!c.sortStable&&a.slice(0),a.sort(z),j){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return i=null,a},e=db.getText=function(a){var b,c="",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=db.selectors={cacheLength:50,createPseudo:fb,match:V,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(ab,bb),a[3]=(a[4]||a[5]||"").replace(ab,bb),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||db.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&db.error(a[0]),a},PSEUDO:function(a){var b,c=!a[5]&&a[2];return V.CHILD.test(a[0])?null:(a[3]&&void 0!==a[4]?a[2]=a[4]:c&&T.test(c)&&(b=ob(c,!0))&&(b=c.indexOf(")",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(ab,bb).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=w[a+" "];return b||(b=new RegExp("(^|"+K+")"+a+"("+K+"|$)"))&&w(a,function(a){return b.test("string"==typeof a.className&&a.className||typeof a.getAttribute!==A&&a.getAttribute("class")||"")})},ATTR:function(a,b,c){return function(d){var e=db.attr(d,a);return null==e?"!="===b:b?(e+="","="===b?e===c:"!="===b?e!==c:"^="===b?c&&0===e.indexOf(c):"*="===b?c&&e.indexOf(c)>-1:"$="===b?c&&e.slice(-c.length)===c:"~="===b?(" "+e+" ").indexOf(c)>-1:"|="===b?e===c||e.slice(0,c.length+1)===c+"-":!1):!0}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),t=!i&&!h;if(q){if(f){while(p){l=b;while(l=l[p])if(h?l.nodeName.toLowerCase()===r:1===l.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&t){k=q[s]||(q[s]={}),j=k[a]||[],n=j[0]===u&&j[1],m=j[0]===u&&j[2],l=n&&q.childNodes[n];while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if(1===l.nodeType&&++m&&l===b){k[a]=[u,n,m];break}}else if(t&&(j=(b[s]||(b[s]={}))[a])&&j[0]===u)m=j[1];else while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if((h?l.nodeName.toLowerCase()===r:1===l.nodeType)&&++m&&(t&&((l[s]||(l[s]={}))[a]=[u,m]),l===b))break;return m-=e,m===d||m%d===0&&m/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||db.error("unsupported pseudo: "+a);return e[s]?e(b):e.length>1?(c=[a,a,"",b],d.setFilters.hasOwnProperty(a.toLowerCase())?fb(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=I.call(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:fb(function(a){var b=[],c=[],d=g(a.replace(P,"$1"));return d[s]?fb(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),!c.pop()}}),has:fb(function(a){return function(b){return db(a,b).length>0}}),contains:fb(function(a){return function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:fb(function(a){return U.test(a||"")||db.error("unsupported lang: "+a),a=a.replace(ab,bb).toLowerCase(),function(b){var c;do if(c=n?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===m},focus:function(a){return a===l.activeElement&&(!l.hasFocus||l.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:function(a){return a.disabled===!1},disabled:function(a){return a.disabled===!0},checked:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return X.test(a.nodeName)},input:function(a){return W.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:lb(function(){return[0]}),last:lb(function(a,b){return[b-1]}),eq:lb(function(a,b,c){return[0>c?c+b:c]}),even:lb(function(a,b){for(var c=0;b>c;c+=2)a.push(c);return a}),odd:lb(function(a,b){for(var c=1;b>c;c+=2)a.push(c);return a}),lt:lb(function(a,b,c){for(var d=0>c?c+b:c;--d>=0;)a.push(d);return a}),gt:lb(function(a,b,c){for(var d=0>c?c+b:c;++db;b++)d+=a[b].value;return d}function qb(a,b,c){var d=b.dir,e=c&&"parentNode"===d,f=v++;return b.first?function(b,c,f){while(b=b[d])if(1===b.nodeType||e)return a(b,c,f)}:function(b,c,g){var h,i,j=[u,f];if(g){while(b=b[d])if((1===b.nodeType||e)&&a(b,c,g))return!0}else while(b=b[d])if(1===b.nodeType||e){if(i=b[s]||(b[s]={}),(h=i[d])&&h[0]===u&&h[1]===f)return j[2]=h[2];if(i[d]=j,j[2]=a(b,c,g))return!0}}}function rb(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function sb(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;i>h;h++)(f=a[h])&&(!c||c(f,d,e))&&(g.push(f),j&&b.push(h));return g}function tb(a,b,c,d,e,f){return d&&!d[s]&&(d=tb(d)),e&&!e[s]&&(e=tb(e,f)),fb(function(f,g,h,i){var j,k,l,m=[],n=[],o=g.length,p=f||wb(b||"*",h.nodeType?[h]:h,[]),q=!a||!f&&b?p:sb(p,m,a,h,i),r=c?e||(f?a:o||d)?[]:g:q;if(c&&c(q,r,h,i),d){j=sb(r,n),d(j,[],h,i),k=j.length;while(k--)(l=j[k])&&(r[n[k]]=!(q[n[k]]=l))}if(f){if(e||a){if(e){j=[],k=r.length;while(k--)(l=r[k])&&j.push(q[k]=l);e(null,r=[],j,i)}k=r.length;while(k--)(l=r[k])&&(j=e?I.call(f,l):m[k])>-1&&(f[j]=!(g[j]=l))}}else r=sb(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):G.apply(g,r)})}function ub(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],i=g||d.relative[" "],j=g?1:0,k=qb(function(a){return a===b},i,!0),l=qb(function(a){return I.call(b,a)>-1},i,!0),m=[function(a,c,d){return!g&&(d||c!==h)||((b=c).nodeType?k(a,c,d):l(a,c,d))}];f>j;j++)if(c=d.relative[a[j].type])m=[qb(rb(m),c)];else{if(c=d.filter[a[j].type].apply(null,a[j].matches),c[s]){for(e=++j;f>e;e++)if(d.relative[a[e].type])break;return tb(j>1&&rb(m),j>1&&pb(a.slice(0,j-1).concat({value:" "===a[j-2].type?"*":""})).replace(P,"$1"),c,e>j&&ub(a.slice(j,e)),f>e&&ub(a=a.slice(e)),f>e&&pb(a))}m.push(c)}return rb(m)}function vb(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,i,j,k){var m,n,o,p=0,q="0",r=f&&[],s=[],t=h,v=f||e&&d.find.TAG("*",k),w=u+=null==t?1:Math.random()||.1,x=v.length;for(k&&(h=g!==l&&g);q!==x&&null!=(m=v[q]);q++){if(e&&m){n=0;while(o=a[n++])if(o(m,g,i)){j.push(m);break}k&&(u=w)}c&&((m=!o&&m)&&p--,f&&r.push(m))}if(p+=q,c&&q!==p){n=0;while(o=b[n++])o(r,s,g,i);if(f){if(p>0)while(q--)r[q]||s[q]||(s[q]=E.call(j));s=sb(s)}G.apply(j,s),k&&!f&&s.length>0&&p+b.length>1&&db.uniqueSort(j)}return k&&(u=w,h=t),r};return c?fb(f):f}g=db.compile=function(a,b){var c,d=[],e=[],f=y[a+" "];if(!f){b||(b=ob(a)),c=b.length;while(c--)f=ub(b[c]),f[s]?d.push(f):e.push(f);f=y(a,vb(e,d))}return f};function wb(a,b,c){for(var d=0,e=b.length;e>d;d++)db(a,b[d],c);return c}function xb(a,b,e,f){var h,i,j,k,l,m=ob(a);if(!f&&1===m.length){if(i=m[0]=m[0].slice(0),i.length>2&&"ID"===(j=i[0]).type&&c.getById&&9===b.nodeType&&n&&d.relative[i[1].type]){if(b=(d.find.ID(j.matches[0].replace(ab,bb),b)||[])[0],!b)return e;a=a.slice(i.shift().value.length)}h=V.needsContext.test(a)?0:i.length;while(h--){if(j=i[h],d.relative[k=j.type])break;if((l=d.find[k])&&(f=l(j.matches[0].replace(ab,bb),$.test(i[0].type)&&mb(b.parentNode)||b))){if(i.splice(h,1),a=f.length&&pb(i),!a)return G.apply(e,f),e;break}}}return g(a,m)(f,b,!n,e,$.test(a)&&mb(b.parentNode)||b),e}return c.sortStable=s.split("").sort(z).join("")===s,c.detectDuplicates=!!j,k(),c.sortDetached=gb(function(a){return 1&a.compareDocumentPosition(l.createElement("div"))}),gb(function(a){return a.innerHTML="","#"===a.firstChild.getAttribute("href")})||hb("type|href|height|width",function(a,b,c){return c?void 0:a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),c.attributes&&gb(function(a){return a.innerHTML="",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||hb("value",function(a,b,c){return c||"input"!==a.nodeName.toLowerCase()?void 0:a.defaultValue}),gb(function(a){return null==a.getAttribute("disabled")})||hb(J,function(a,b,c){var d;return c?void 0:a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),db}(a);n.find=t,n.expr=t.selectors,n.expr[":"]=n.expr.pseudos,n.unique=t.uniqueSort,n.text=t.getText,n.isXMLDoc=t.isXML,n.contains=t.contains;var u=n.expr.match.needsContext,v=/^<(\w+)\s*\/?>(?:<\/\1>|)$/,w=/^.[^:#\[\.,]*$/;function x(a,b,c){if(n.isFunction(b))return n.grep(a,function(a,d){return!!b.call(a,d,a)!==c});if(b.nodeType)return n.grep(a,function(a){return a===b!==c});if("string"==typeof b){if(w.test(b))return n.filter(b,a,c);b=n.filter(b,a)}return n.grep(a,function(a){return n.inArray(a,b)>=0!==c})}n.filter=function(a,b,c){var d=b[0];return c&&(a=":not("+a+")"),1===b.length&&1===d.nodeType?n.find.matchesSelector(d,a)?[d]:[]:n.find.matches(a,n.grep(b,function(a){return 1===a.nodeType}))},n.fn.extend({find:function(a){var b,c=[],d=this,e=d.length;if("string"!=typeof a)return this.pushStack(n(a).filter(function(){for(b=0;e>b;b++)if(n.contains(d[b],this))return!0}));for(b=0;e>b;b++)n.find(a,d[b],c);return c=this.pushStack(e>1?n.unique(c):c),c.selector=this.selector?this.selector+" "+a:a,c},filter:function(a){return this.pushStack(x(this,a||[],!1))},not:function(a){return this.pushStack(x(this,a||[],!0))},is:function(a){return!!x(this,"string"==typeof a&&u.test(a)?n(a):a||[],!1).length}});var y,z=a.document,A=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/,B=n.fn.init=function(a,b){var c,d;if(!a)return this;if("string"==typeof a){if(c="<"===a.charAt(0)&&">"===a.charAt(a.length-1)&&a.length>=3?[null,a,null]:A.exec(a),!c||!c[1]&&b)return!b||b.jquery?(b||y).find(a):this.constructor(b).find(a);if(c[1]){if(b=b instanceof n?b[0]:b,n.merge(this,n.parseHTML(c[1],b&&b.nodeType?b.ownerDocument||b:z,!0)),v.test(c[1])&&n.isPlainObject(b))for(c in b)n.isFunction(this[c])?this[c](b[c]):this.attr(c,b[c]);return this}if(d=z.getElementById(c[2]),d&&d.parentNode){if(d.id!==c[2])return y.find(a);this.length=1,this[0]=d}return this.context=z,this.selector=a,this}return a.nodeType?(this.context=this[0]=a,this.length=1,this):n.isFunction(a)?"undefined"!=typeof y.ready?y.ready(a):a(n):(void 0!==a.selector&&(this.selector=a.selector,this.context=a.context),n.makeArray(a,this))};B.prototype=n.fn,y=n(z);var C=/^(?:parents|prev(?:Until|All))/,D={children:!0,contents:!0,next:!0,prev:!0};n.extend({dir:function(a,b,c){var d=[],e=a[b];while(e&&9!==e.nodeType&&(void 0===c||1!==e.nodeType||!n(e).is(c)))1===e.nodeType&&d.push(e),e=e[b];return d},sibling:function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c}}),n.fn.extend({has:function(a){var b,c=n(a,this),d=c.length;return this.filter(function(){for(b=0;d>b;b++)if(n.contains(this,c[b]))return!0})},closest:function(a,b){for(var c,d=0,e=this.length,f=[],g=u.test(a)||"string"!=typeof a?n(a,b||this.context):0;e>d;d++)for(c=this[d];c&&c!==b;c=c.parentNode)if(c.nodeType<11&&(g?g.index(c)>-1:1===c.nodeType&&n.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?n.unique(f):f)},index:function(a){return a?"string"==typeof a?n.inArray(this[0],n(a)):n.inArray(a.jquery?a[0]:a,this):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(n.unique(n.merge(this.get(),n(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}});function E(a,b){do a=a[b];while(a&&1!==a.nodeType);return a}n.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return n.dir(a,"parentNode")},parentsUntil:function(a,b,c){return n.dir(a,"parentNode",c)},next:function(a){return E(a,"nextSibling")},prev:function(a){return E(a,"previousSibling")},nextAll:function(a){return n.dir(a,"nextSibling")},prevAll:function(a){return n.dir(a,"previousSibling")},nextUntil:function(a,b,c){return n.dir(a,"nextSibling",c)},prevUntil:function(a,b,c){return n.dir(a,"previousSibling",c)},siblings:function(a){return n.sibling((a.parentNode||{}).firstChild,a)},children:function(a){return n.sibling(a.firstChild)},contents:function(a){return n.nodeName(a,"iframe")?a.contentDocument||a.contentWindow.document:n.merge([],a.childNodes)}},function(a,b){n.fn[a]=function(c,d){var e=n.map(this,b,c);return"Until"!==a.slice(-5)&&(d=c),d&&"string"==typeof d&&(e=n.filter(d,e)),this.length>1&&(D[a]||(e=n.unique(e)),C.test(a)&&(e=e.reverse())),this.pushStack(e)}});var F=/\S+/g,G={};function H(a){var b=G[a]={};return n.each(a.match(F)||[],function(a,c){b[c]=!0}),b}n.Callbacks=function(a){a="string"==typeof a?G[a]||H(a):n.extend({},a);var b,c,d,e,f,g,h=[],i=!a.once&&[],j=function(l){for(c=a.memory&&l,d=!0,f=g||0,g=0,e=h.length,b=!0;h&&e>f;f++)if(h[f].apply(l[0],l[1])===!1&&a.stopOnFalse){c=!1;break}b=!1,h&&(i?i.length&&j(i.shift()):c?h=[]:k.disable())},k={add:function(){if(h){var d=h.length;!function f(b){n.each(b,function(b,c){var d=n.type(c);"function"===d?a.unique&&k.has(c)||h.push(c):c&&c.length&&"string"!==d&&f(c)})}(arguments),b?e=h.length:c&&(g=d,j(c))}return this},remove:function(){return h&&n.each(arguments,function(a,c){var d;while((d=n.inArray(c,h,d))>-1)h.splice(d,1),b&&(e>=d&&e--,f>=d&&f--)}),this},has:function(a){return a?n.inArray(a,h)>-1:!(!h||!h.length)},empty:function(){return h=[],e=0,this},disable:function(){return h=i=c=void 0,this},disabled:function(){return!h},lock:function(){return i=void 0,c||k.disable(),this},locked:function(){return!i},fireWith:function(a,c){return!h||d&&!i||(c=c||[],c=[a,c.slice?c.slice():c],b?i.push(c):j(c)),this},fire:function(){return k.fireWith(this,arguments),this},fired:function(){return!!d}};return k},n.extend({Deferred:function(a){var b=[["resolve","done",n.Callbacks("once memory"),"resolved"],["reject","fail",n.Callbacks("once memory"),"rejected"],["notify","progress",n.Callbacks("memory")]],c="pending",d={state:function(){return c},always:function(){return e.done(arguments).fail(arguments),this},then:function(){var a=arguments;return n.Deferred(function(c){n.each(b,function(b,f){var g=n.isFunction(a[b])&&a[b];e[f[1]](function(){var a=g&&g.apply(this,arguments);a&&n.isFunction(a.promise)?a.promise().done(c.resolve).fail(c.reject).progress(c.notify):c[f[0]+"With"](this===d?c.promise():this,g?[a]:arguments)})}),a=null}).promise()},promise:function(a){return null!=a?n.extend(a,d):d}},e={};return d.pipe=d.then,n.each(b,function(a,f){var g=f[2],h=f[3];d[f[1]]=g.add,h&&g.add(function(){c=h},b[1^a][2].disable,b[2][2].lock),e[f[0]]=function(){return e[f[0]+"With"](this===e?d:this,arguments),this},e[f[0]+"With"]=g.fireWith}),d.promise(e),a&&a.call(e,e),e},when:function(a){var b=0,c=d.call(arguments),e=c.length,f=1!==e||a&&n.isFunction(a.promise)?e:0,g=1===f?a:n.Deferred(),h=function(a,b,c){return function(e){b[a]=this,c[a]=arguments.length>1?d.call(arguments):e,c===i?g.notifyWith(b,c):--f||g.resolveWith(b,c)}},i,j,k;if(e>1)for(i=new Array(e),j=new Array(e),k=new Array(e);e>b;b++)c[b]&&n.isFunction(c[b].promise)?c[b].promise().done(h(b,k,c)).fail(g.reject).progress(h(b,j,i)):--f;return f||g.resolveWith(k,c),g.promise()}});var I;n.fn.ready=function(a){return n.ready.promise().done(a),this},n.extend({isReady:!1,readyWait:1,holdReady:function(a){a?n.readyWait++:n.ready(!0)},ready:function(a){if(a===!0?!--n.readyWait:!n.isReady){if(!z.body)return setTimeout(n.ready);n.isReady=!0,a!==!0&&--n.readyWait>0||(I.resolveWith(z,[n]),n.fn.trigger&&n(z).trigger("ready").off("ready"))}}});function J(){z.addEventListener?(z.removeEventListener("DOMContentLoaded",K,!1),a.removeEventListener("load",K,!1)):(z.detachEvent("onreadystatechange",K),a.detachEvent("onload",K))}function K(){(z.addEventListener||"load"===event.type||"complete"===z.readyState)&&(J(),n.ready())}n.ready.promise=function(b){if(!I)if(I=n.Deferred(),"complete"===z.readyState)setTimeout(n.ready);else if(z.addEventListener)z.addEventListener("DOMContentLoaded",K,!1),a.addEventListener("load",K,!1);else{z.attachEvent("onreadystatechange",K),a.attachEvent("onload",K);var c=!1;try{c=null==a.frameElement&&z.documentElement}catch(d){}c&&c.doScroll&&!function e(){if(!n.isReady){try{c.doScroll("left")}catch(a){return setTimeout(e,50)}J(),n.ready()}}()}return I.promise(b)};var L="undefined",M;for(M in n(l))break;l.ownLast="0"!==M,l.inlineBlockNeedsLayout=!1,n(function(){var a,b,c=z.getElementsByTagName("body")[0];c&&(a=z.createElement("div"),a.style.cssText="border:0;width:0;height:0;position:absolute;top:0;left:-9999px;margin-top:1px",b=z.createElement("div"),c.appendChild(a).appendChild(b),typeof b.style.zoom!==L&&(b.style.cssText="border:0;margin:0;width:1px;padding:1px;display:inline;zoom:1",(l.inlineBlockNeedsLayout=3===b.offsetWidth)&&(c.style.zoom=1)),c.removeChild(a),a=b=null)}),function(){var a=z.createElement("div");if(null==l.deleteExpando){l.deleteExpando=!0;try{delete a.test}catch(b){l.deleteExpando=!1}}a=null}(),n.acceptData=function(a){var b=n.noData[(a.nodeName+" ").toLowerCase()],c=+a.nodeType||1;return 1!==c&&9!==c?!1:!b||b!==!0&&a.getAttribute("classid")===b};var N=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,O=/([A-Z])/g;function P(a,b,c){if(void 0===c&&1===a.nodeType){var d="data-"+b.replace(O,"-$1").toLowerCase();if(c=a.getAttribute(d),"string"==typeof c){try{c="true"===c?!0:"false"===c?!1:"null"===c?null:+c+""===c?+c:N.test(c)?n.parseJSON(c):c}catch(e){}n.data(a,b,c)}else c=void 0}return c}function Q(a){var b;for(b in a)if(("data"!==b||!n.isEmptyObject(a[b]))&&"toJSON"!==b)return!1;return!0}function R(a,b,d,e){if(n.acceptData(a)){var f,g,h=n.expando,i=a.nodeType,j=i?n.cache:a,k=i?a[h]:a[h]&&h;if(k&&j[k]&&(e||j[k].data)||void 0!==d||"string"!=typeof b)return k||(k=i?a[h]=c.pop()||n.guid++:h),j[k]||(j[k]=i?{}:{toJSON:n.noop}),("object"==typeof b||"function"==typeof b)&&(e?j[k]=n.extend(j[k],b):j[k].data=n.extend(j[k].data,b)),g=j[k],e||(g.data||(g.data={}),g=g.data),void 0!==d&&(g[n.camelCase(b)]=d),"string"==typeof b?(f=g[b],null==f&&(f=g[n.camelCase(b)])):f=g,f +}}function S(a,b,c){if(n.acceptData(a)){var d,e,f=a.nodeType,g=f?n.cache:a,h=f?a[n.expando]:n.expando;if(g[h]){if(b&&(d=c?g[h]:g[h].data)){n.isArray(b)?b=b.concat(n.map(b,n.camelCase)):b in d?b=[b]:(b=n.camelCase(b),b=b in d?[b]:b.split(" ")),e=b.length;while(e--)delete d[b[e]];if(c?!Q(d):!n.isEmptyObject(d))return}(c||(delete g[h].data,Q(g[h])))&&(f?n.cleanData([a],!0):l.deleteExpando||g!=g.window?delete g[h]:g[h]=null)}}}n.extend({cache:{},noData:{"applet ":!0,"embed ":!0,"object ":"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000"},hasData:function(a){return a=a.nodeType?n.cache[a[n.expando]]:a[n.expando],!!a&&!Q(a)},data:function(a,b,c){return R(a,b,c)},removeData:function(a,b){return S(a,b)},_data:function(a,b,c){return R(a,b,c,!0)},_removeData:function(a,b){return S(a,b,!0)}}),n.fn.extend({data:function(a,b){var c,d,e,f=this[0],g=f&&f.attributes;if(void 0===a){if(this.length&&(e=n.data(f),1===f.nodeType&&!n._data(f,"parsedAttrs"))){c=g.length;while(c--)d=g[c].name,0===d.indexOf("data-")&&(d=n.camelCase(d.slice(5)),P(f,d,e[d]));n._data(f,"parsedAttrs",!0)}return e}return"object"==typeof a?this.each(function(){n.data(this,a)}):arguments.length>1?this.each(function(){n.data(this,a,b)}):f?P(f,a,n.data(f,a)):void 0},removeData:function(a){return this.each(function(){n.removeData(this,a)})}}),n.extend({queue:function(a,b,c){var d;return a?(b=(b||"fx")+"queue",d=n._data(a,b),c&&(!d||n.isArray(c)?d=n._data(a,b,n.makeArray(c)):d.push(c)),d||[]):void 0},dequeue:function(a,b){b=b||"fx";var c=n.queue(a,b),d=c.length,e=c.shift(),f=n._queueHooks(a,b),g=function(){n.dequeue(a,b)};"inprogress"===e&&(e=c.shift(),d--),e&&("fx"===b&&c.unshift("inprogress"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+"queueHooks";return n._data(a,c)||n._data(a,c,{empty:n.Callbacks("once memory").add(function(){n._removeData(a,b+"queue"),n._removeData(a,c)})})}}),n.fn.extend({queue:function(a,b){var c=2;return"string"!=typeof a&&(b=a,a="fx",c--),arguments.lengthh;h++)b(a[h],c,g?d:d.call(a[h],h,b(a[h],c)));return e?a:j?b.call(a):i?b(a[0],c):f},X=/^(?:checkbox|radio)$/i;!function(){var a=z.createDocumentFragment(),b=z.createElement("div"),c=z.createElement("input");if(b.setAttribute("className","t"),b.innerHTML="
a",l.leadingWhitespace=3===b.firstChild.nodeType,l.tbody=!b.getElementsByTagName("tbody").length,l.htmlSerialize=!!b.getElementsByTagName("link").length,l.html5Clone="<:nav>"!==z.createElement("nav").cloneNode(!0).outerHTML,c.type="checkbox",c.checked=!0,a.appendChild(c),l.appendChecked=c.checked,b.innerHTML="",l.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue,a.appendChild(b),b.innerHTML="",l.checkClone=b.cloneNode(!0).cloneNode(!0).lastChild.checked,l.noCloneEvent=!0,b.attachEvent&&(b.attachEvent("onclick",function(){l.noCloneEvent=!1}),b.cloneNode(!0).click()),null==l.deleteExpando){l.deleteExpando=!0;try{delete b.test}catch(d){l.deleteExpando=!1}}a=b=c=null}(),function(){var b,c,d=z.createElement("div");for(b in{submit:!0,change:!0,focusin:!0})c="on"+b,(l[b+"Bubbles"]=c in a)||(d.setAttribute(c,"t"),l[b+"Bubbles"]=d.attributes[c].expando===!1);d=null}();var Y=/^(?:input|select|textarea)$/i,Z=/^key/,$=/^(?:mouse|contextmenu)|click/,_=/^(?:focusinfocus|focusoutblur)$/,ab=/^([^.]*)(?:\.(.+)|)$/;function bb(){return!0}function cb(){return!1}function db(){try{return z.activeElement}catch(a){}}n.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=n._data(a);if(r){c.handler&&(i=c,c=i.handler,e=i.selector),c.guid||(c.guid=n.guid++),(g=r.events)||(g=r.events={}),(k=r.handle)||(k=r.handle=function(a){return typeof n===L||a&&n.event.triggered===a.type?void 0:n.event.dispatch.apply(k.elem,arguments)},k.elem=a),b=(b||"").match(F)||[""],h=b.length;while(h--)f=ab.exec(b[h])||[],o=q=f[1],p=(f[2]||"").split(".").sort(),o&&(j=n.event.special[o]||{},o=(e?j.delegateType:j.bindType)||o,j=n.event.special[o]||{},l=n.extend({type:o,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&n.expr.match.needsContext.test(e),namespace:p.join(".")},i),(m=g[o])||(m=g[o]=[],m.delegateCount=0,j.setup&&j.setup.call(a,d,p,k)!==!1||(a.addEventListener?a.addEventListener(o,k,!1):a.attachEvent&&a.attachEvent("on"+o,k))),j.add&&(j.add.call(a,l),l.handler.guid||(l.handler.guid=c.guid)),e?m.splice(m.delegateCount++,0,l):m.push(l),n.event.global[o]=!0);a=null}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=n.hasData(a)&&n._data(a);if(r&&(k=r.events)){b=(b||"").match(F)||[""],j=b.length;while(j--)if(h=ab.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o){l=n.event.special[o]||{},o=(d?l.delegateType:l.bindType)||o,m=k[o]||[],h=h[2]&&new RegExp("(^|\\.)"+p.join("\\.(?:.*\\.|)")+"(\\.|$)"),i=f=m.length;while(f--)g=m[f],!e&&q!==g.origType||c&&c.guid!==g.guid||h&&!h.test(g.namespace)||d&&d!==g.selector&&("**"!==d||!g.selector)||(m.splice(f,1),g.selector&&m.delegateCount--,l.remove&&l.remove.call(a,g));i&&!m.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||n.removeEvent(a,o,r.handle),delete k[o])}else for(o in k)n.event.remove(a,o+b[j],c,d,!0);n.isEmptyObject(k)&&(delete r.handle,n._removeData(a,"events"))}},trigger:function(b,c,d,e){var f,g,h,i,k,l,m,o=[d||z],p=j.call(b,"type")?b.type:b,q=j.call(b,"namespace")?b.namespace.split("."):[];if(h=l=d=d||z,3!==d.nodeType&&8!==d.nodeType&&!_.test(p+n.event.triggered)&&(p.indexOf(".")>=0&&(q=p.split("."),p=q.shift(),q.sort()),g=p.indexOf(":")<0&&"on"+p,b=b[n.expando]?b:new n.Event(p,"object"==typeof b&&b),b.isTrigger=e?2:3,b.namespace=q.join("."),b.namespace_re=b.namespace?new RegExp("(^|\\.)"+q.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,b.result=void 0,b.target||(b.target=d),c=null==c?[b]:n.makeArray(c,[b]),k=n.event.special[p]||{},e||!k.trigger||k.trigger.apply(d,c)!==!1)){if(!e&&!k.noBubble&&!n.isWindow(d)){for(i=k.delegateType||p,_.test(i+p)||(h=h.parentNode);h;h=h.parentNode)o.push(h),l=h;l===(d.ownerDocument||z)&&o.push(l.defaultView||l.parentWindow||a)}m=0;while((h=o[m++])&&!b.isPropagationStopped())b.type=m>1?i:k.bindType||p,f=(n._data(h,"events")||{})[b.type]&&n._data(h,"handle"),f&&f.apply(h,c),f=g&&h[g],f&&f.apply&&n.acceptData(h)&&(b.result=f.apply(h,c),b.result===!1&&b.preventDefault());if(b.type=p,!e&&!b.isDefaultPrevented()&&(!k._default||k._default.apply(o.pop(),c)===!1)&&n.acceptData(d)&&g&&d[p]&&!n.isWindow(d)){l=d[g],l&&(d[g]=null),n.event.triggered=p;try{d[p]()}catch(r){}n.event.triggered=void 0,l&&(d[g]=l)}return b.result}},dispatch:function(a){a=n.event.fix(a);var b,c,e,f,g,h=[],i=d.call(arguments),j=(n._data(this,"events")||{})[a.type]||[],k=n.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=n.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,g=0;while((e=f.handlers[g++])&&!a.isImmediatePropagationStopped())(!a.namespace_re||a.namespace_re.test(e.namespace))&&(a.handleObj=e,a.data=e.data,c=((n.event.special[e.origType]||{}).handle||e.handler).apply(f.elem,i),void 0!==c&&(a.result=c)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&(!a.button||"click"!==a.type))for(;i!=this;i=i.parentNode||this)if(1===i.nodeType&&(i.disabled!==!0||"click"!==a.type)){for(e=[],f=0;h>f;f++)d=b[f],c=d.selector+" ",void 0===e[c]&&(e[c]=d.needsContext?n(c,this).index(i)>=0:n.find(c,this,null,[i]).length),e[c]&&e.push(d);e.length&&g.push({elem:i,handlers:e})}return h]","i"),ib=/^\s+/,jb=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi,kb=/<([\w:]+)/,lb=/\s*$/g,sb={option:[1,""],legend:[1,"
","
"],area:[1,"",""],param:[1,"",""],thead:[1,"","
"],tr:[2,"","
"],col:[2,"","
"],td:[3,"","
"],_default:l.htmlSerialize?[0,"",""]:[1,"X
","
"]},tb=eb(z),ub=tb.appendChild(z.createElement("div"));sb.optgroup=sb.option,sb.tbody=sb.tfoot=sb.colgroup=sb.caption=sb.thead,sb.th=sb.td;function vb(a,b){var c,d,e=0,f=typeof a.getElementsByTagName!==L?a.getElementsByTagName(b||"*"):typeof a.querySelectorAll!==L?a.querySelectorAll(b||"*"):void 0;if(!f)for(f=[],c=a.childNodes||a;null!=(d=c[e]);e++)!b||n.nodeName(d,b)?f.push(d):n.merge(f,vb(d,b));return void 0===b||b&&n.nodeName(a,b)?n.merge([a],f):f}function wb(a){X.test(a.type)&&(a.defaultChecked=a.checked)}function xb(a,b){return n.nodeName(a,"table")&&n.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function yb(a){return a.type=(null!==n.find.attr(a,"type"))+"/"+a.type,a}function zb(a){var b=qb.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function Ab(a,b){for(var c,d=0;null!=(c=a[d]);d++)n._data(c,"globalEval",!b||n._data(b[d],"globalEval"))}function Bb(a,b){if(1===b.nodeType&&n.hasData(a)){var c,d,e,f=n._data(a),g=n._data(b,f),h=f.events;if(h){delete g.handle,g.events={};for(c in h)for(d=0,e=h[c].length;e>d;d++)n.event.add(b,c,h[c][d])}g.data&&(g.data=n.extend({},g.data))}}function Cb(a,b){var c,d,e;if(1===b.nodeType){if(c=b.nodeName.toLowerCase(),!l.noCloneEvent&&b[n.expando]){e=n._data(b);for(d in e.events)n.removeEvent(b,d,e.handle);b.removeAttribute(n.expando)}"script"===c&&b.text!==a.text?(yb(b).text=a.text,zb(b)):"object"===c?(b.parentNode&&(b.outerHTML=a.outerHTML),l.html5Clone&&a.innerHTML&&!n.trim(b.innerHTML)&&(b.innerHTML=a.innerHTML)):"input"===c&&X.test(a.type)?(b.defaultChecked=b.checked=a.checked,b.value!==a.value&&(b.value=a.value)):"option"===c?b.defaultSelected=b.selected=a.defaultSelected:("input"===c||"textarea"===c)&&(b.defaultValue=a.defaultValue)}}n.extend({clone:function(a,b,c){var d,e,f,g,h,i=n.contains(a.ownerDocument,a);if(l.html5Clone||n.isXMLDoc(a)||!hb.test("<"+a.nodeName+">")?f=a.cloneNode(!0):(ub.innerHTML=a.outerHTML,ub.removeChild(f=ub.firstChild)),!(l.noCloneEvent&&l.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||n.isXMLDoc(a)))for(d=vb(f),h=vb(a),g=0;null!=(e=h[g]);++g)d[g]&&Cb(e,d[g]);if(b)if(c)for(h=h||vb(a),d=d||vb(f),g=0;null!=(e=h[g]);g++)Bb(e,d[g]);else Bb(a,f);return d=vb(f,"script"),d.length>0&&Ab(d,!i&&vb(a,"script")),d=h=e=null,f},buildFragment:function(a,b,c,d){for(var e,f,g,h,i,j,k,m=a.length,o=eb(b),p=[],q=0;m>q;q++)if(f=a[q],f||0===f)if("object"===n.type(f))n.merge(p,f.nodeType?[f]:f);else if(mb.test(f)){h=h||o.appendChild(b.createElement("div")),i=(kb.exec(f)||["",""])[1].toLowerCase(),k=sb[i]||sb._default,h.innerHTML=k[1]+f.replace(jb,"<$1>")+k[2],e=k[0];while(e--)h=h.lastChild;if(!l.leadingWhitespace&&ib.test(f)&&p.push(b.createTextNode(ib.exec(f)[0])),!l.tbody){f="table"!==i||lb.test(f)?""!==k[1]||lb.test(f)?0:h:h.firstChild,e=f&&f.childNodes.length;while(e--)n.nodeName(j=f.childNodes[e],"tbody")&&!j.childNodes.length&&f.removeChild(j)}n.merge(p,h.childNodes),h.textContent="";while(h.firstChild)h.removeChild(h.firstChild);h=o.lastChild}else p.push(b.createTextNode(f));h&&o.removeChild(h),l.appendChecked||n.grep(vb(p,"input"),wb),q=0;while(f=p[q++])if((!d||-1===n.inArray(f,d))&&(g=n.contains(f.ownerDocument,f),h=vb(o.appendChild(f),"script"),g&&Ab(h),c)){e=0;while(f=h[e++])pb.test(f.type||"")&&c.push(f)}return h=null,o},cleanData:function(a,b){for(var d,e,f,g,h=0,i=n.expando,j=n.cache,k=l.deleteExpando,m=n.event.special;null!=(d=a[h]);h++)if((b||n.acceptData(d))&&(f=d[i],g=f&&j[f])){if(g.events)for(e in g.events)m[e]?n.event.remove(d,e):n.removeEvent(d,e,g.handle);j[f]&&(delete j[f],k?delete d[i]:typeof d.removeAttribute!==L?d.removeAttribute(i):d[i]=null,c.push(f))}}}),n.fn.extend({text:function(a){return W(this,function(a){return void 0===a?n.text(this):this.empty().append((this[0]&&this[0].ownerDocument||z).createTextNode(a))},null,a,arguments.length)},append:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=xb(this,a);b.appendChild(a)}})},prepend:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=xb(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},remove:function(a,b){for(var c,d=a?n.filter(a,this):this,e=0;null!=(c=d[e]);e++)b||1!==c.nodeType||n.cleanData(vb(c)),c.parentNode&&(b&&n.contains(c.ownerDocument,c)&&Ab(vb(c,"script")),c.parentNode.removeChild(c));return this},empty:function(){for(var a,b=0;null!=(a=this[b]);b++){1===a.nodeType&&n.cleanData(vb(a,!1));while(a.firstChild)a.removeChild(a.firstChild);a.options&&n.nodeName(a,"select")&&(a.options.length=0)}return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return n.clone(this,a,b)})},html:function(a){return W(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a)return 1===b.nodeType?b.innerHTML.replace(gb,""):void 0;if(!("string"!=typeof a||nb.test(a)||!l.htmlSerialize&&hb.test(a)||!l.leadingWhitespace&&ib.test(a)||sb[(kb.exec(a)||["",""])[1].toLowerCase()])){a=a.replace(jb,"<$1>");try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(n.cleanData(vb(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=arguments[0];return this.domManip(arguments,function(b){a=this.parentNode,n.cleanData(vb(this)),a&&a.replaceChild(b,this)}),a&&(a.length||a.nodeType)?this:this.remove()},detach:function(a){return this.remove(a,!0)},domManip:function(a,b){a=e.apply([],a);var c,d,f,g,h,i,j=0,k=this.length,m=this,o=k-1,p=a[0],q=n.isFunction(p);if(q||k>1&&"string"==typeof p&&!l.checkClone&&ob.test(p))return this.each(function(c){var d=m.eq(c);q&&(a[0]=p.call(this,c,d.html())),d.domManip(a,b)});if(k&&(i=n.buildFragment(a,this[0].ownerDocument,!1,this),c=i.firstChild,1===i.childNodes.length&&(i=c),c)){for(g=n.map(vb(i,"script"),yb),f=g.length;k>j;j++)d=i,j!==o&&(d=n.clone(d,!0,!0),f&&n.merge(g,vb(d,"script"))),b.call(this[j],d,j);if(f)for(h=g[g.length-1].ownerDocument,n.map(g,zb),j=0;f>j;j++)d=g[j],pb.test(d.type||"")&&!n._data(d,"globalEval")&&n.contains(h,d)&&(d.src?n._evalUrl&&n._evalUrl(d.src):n.globalEval((d.text||d.textContent||d.innerHTML||"").replace(rb,"")));i=c=null}return this}}),n.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){n.fn[a]=function(a){for(var c,d=0,e=[],g=n(a),h=g.length-1;h>=d;d++)c=d===h?this:this.clone(!0),n(g[d])[b](c),f.apply(e,c.get());return this.pushStack(e)}});var Db,Eb={};function Fb(b,c){var d=n(c.createElement(b)).appendTo(c.body),e=a.getDefaultComputedStyle?a.getDefaultComputedStyle(d[0]).display:n.css(d[0],"display");return d.detach(),e}function Gb(a){var b=z,c=Eb[a];return c||(c=Fb(a,b),"none"!==c&&c||(Db=(Db||n("